diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml index bd35fe1d22..09191f5477 100644 --- a/.github/workflows/external.yml +++ b/.github/workflows/external.yml @@ -9,6 +9,8 @@ name: External on: schedule: - cron: '1 15 1,15 * *' # 15:01 UTC on 1st and 15th of month + # for testing: + workflow_dispatch: jobs: proofs: diff --git a/.github/workflows/proof-deploy.yml b/.github/workflows/proof-deploy.yml index 4c73139350..bf2006f651 100644 --- a/.github/workflows/proof-deploy.yml +++ b/.github/workflows/proof-deploy.yml @@ -13,6 +13,8 @@ on: repository_dispatch: types: - manifest-update + # for testing: + workflow_dispatch: jobs: code: @@ -74,3 +76,32 @@ jobs: with: token: ${{ secrets.PRIV_REPO_TOKEN }} tag: "l4v/proof-deploy/${{ github.event_name }}" + + rebase: + name: Rebase platform branches + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + branch: [imx8-fpu-ver, exynos5-ver] + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + path: l4v-${{ matrix.branch }} + fetch-depth: 0 + # needed to trigger push actions on the -rebased branch + # (implict GITHUB_TOKEN does not trigger further push actions). + token: ${{ secrets.PRIV_REPO_TOKEN }} + - name: Rebase + run: | + cd l4v-${{ matrix.branch }} + git config --global user.name "seL4 CI" + git config --global user.email "ci@sel4.systems" + git rebase origin/master + git status + - name: Push + run: | + cd l4v-${{ matrix.branch }} + git push -f origin HEAD:${{ matrix.branch }}-rebased diff --git a/.github/workflows/proof.yml b/.github/workflows/proof.yml index 19299fc561..48a96ca0ce 100644 --- a/.github/workflows/proof.yml +++ b/.github/workflows/proof.yml @@ -2,7 +2,7 @@ # # SPDX-License-Identifier: BSD-2-Clause -name: Proofs +name: Proof PR on: push: diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 355bede7ba..a3a3317ec9 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -12,6 +12,8 @@ on: - rt - aarch64 pull_request: + # for testing: + workflow_dispatch: jobs: check: diff --git a/.github/workflows/weekly-clean.yml b/.github/workflows/weekly-clean.yml index 2c1ff80a2a..4bc06134ce 100644 --- a/.github/workflows/weekly-clean.yml +++ b/.github/workflows/weekly-clean.yml @@ -7,6 +7,8 @@ name: Weekly Clean on: schedule: - cron: '1 15 * * 6' # 15:01 UTC every Sat = 1:01 am Syd every Sun + # for testing: + workflow_dispatch: jobs: proofs: diff --git a/camkes/cdl-refine/Eval_CAMKES_CDL.thy b/camkes/cdl-refine/Eval_CAMKES_CDL.thy index a02b4ad5e5..1a1347223d 100644 --- a/camkes/cdl-refine/Eval_CAMKES_CDL.thy +++ b/camkes/cdl-refine/Eval_CAMKES_CDL.thy @@ -211,7 +211,7 @@ lemma Collect_asid_high__eval_helper: section \Assorted helpers\ lemma fun_upds_to_map_of[THEN eq_reflection]: "Map.empty = map_of []" - "(map_of xs(k \ v)) = map_of ((k, v) # xs)" + "((map_of xs)(k \ v)) = map_of ((k, v) # xs)" by auto lemma subst_eqn_helper: diff --git a/docs/Style.thy b/docs/Style.thy index ca65849ec8..0dfcde2b12 100644 --- a/docs/Style.thy +++ b/docs/Style.thy @@ -219,6 +219,138 @@ lemma test_lemma3: case_tac h; simp) done +section \Right vs left operator-wrapping\ + +text \ + When a term is too long, there is a general consensus to wrap it at operators. However, consensus + has never emerged on whether the operator should then end up at the end of the line (right + operator wrapping), or start of the next one (left operator wrapping). + Some people have a tendency towards right-wrapping, others towards left-wrapping. They + each have advantages in specific contexts, thus both appear in the l4v proofs and are permitted + style.\ + +term \A \ B \ C \ D\ \ \no wrapping when A..D are small terms\ + +term \A \ + B \ + C \ + D\ \ \right-wrapping when A..D are large terms\ + +term \A + \ B + \ C + \ D\ \ \left-wrapping when A..D are large terms\ + +text \ + While both styles are permitted, do not mix them in the same lemma. If a lemma already uses + one style and you aren't doing a major rewrite, stick to the existing style.\ + +lemma + shows "\ A; B; C\ \ + D" \ \right-wrapping OK\ + and "\ A; B; C\ + \ D" \ \left-wrapping OK\ + oops \ \mixing styles: NOT OK\ + +text \ + Some operators and syntax only have ONE style. As seen in other sections: + * the `|` in `case` never appears on the right + * `;` is always on the right when wrapping lists of assumptions + * `shows .. and ... and` wraps with `and` on the left + * `|` in method invocations always goes on the left + * commas and (semi)colons, owing to our natural language origins, always end up on the right\ + +lemma + shows + "\ A + ; B \ \ \wrong: always on right\ + \ \ok: \ can be either left or right\ + \ C" and \ \wrong: `shows/and` only on left!\ + "D" + and "E" \ \ok: on left\ +proof - + have "True \ True" + by (rule conjI, + blast, + blast) \ \ok\ + have "True \ True" + by (rule conjI + , blast + , blast) \ \NOT OK: commas go on right\ + have "True \ True" + by (rule conjI; + blast) \ \ok\ + have "True \ True" + by (rule conjI + ; blast) \ \NOT OK: semicolons go on right\ + have "True \ True" + by (rule conjI + | blast)+ \ \ok\ + have "True \ True" + by (rule conjI | + blast)+ \ \NOT OK: `|` goes on the left\ + oops + +text \ + The general principle of "nothing indented less than what it belongs to" is in effect for both + wrapping styles. Remember, the goal of the exercise is to make it as clear to read for others as + you can. Sometimes, scanning the left side of the screen to see the overall term can help, + while other times putting @{text \} on the right will save space and prevent subsequent lines + from wrapping.\ + +text \ + Inner-syntax indentation is not automatable in the general case, so our goal is to help + ease of comprehension as much as possible, i.e. + @{term "A \ B \ C \ D \ E \ F"} is bearable if A..F are short, but if they are large terms, + please avoid doing either of these:\ + +term " + A \ + B \ + C \ + D \ + E \ + F" \ \avoid: requires building a parse tree in one's head\ + +term " + A + \ B + \ C + \ D + \ E + \ F" \ \can be marginally easier to scan, but still avoid due to mental parsing difficulty\ + +text \Instead, help out the reader like this:\ + +term " + A \ + B \ + C \ + D \ + E \ + F" + +term " + A + \ B + \ C + \ D + \ E + \ F" + +text \AVOID indentation that misrepresents the parse tree and confuses the reader:\ + +term " + A + \ B + \ C" \ \NOT OK: implies this parses as @{text "A \ (B \ C)"}\ + +term " + A \ + B \ + B \ + A" \ \NOT OK: implies this parses as @{text "((A \ B) \ B) \ A"}\ + section \Other\ text \ diff --git a/docs/setup.md b/docs/setup.md index d477d7b9ab..29abf3de03 100644 --- a/docs/setup.md +++ b/docs/setup.md @@ -91,12 +91,13 @@ pip3 install --user sel4-deps After installing [haskell-stack](https://docs.haskellstack.org/en/stable/) (already included in the packages above on Mac and Ubuntu), make sure you've -adjusted your `PATH` to include `$HOME/.local/bin`, and that you're running an -up-to-date version: +adjusted your `PATH` to include `$HOME/.local/bin`, that you're running an +up-to-date version, and that you have installed cabal: ```bash stack upgrade --binary-only which stack # should be $HOME/.local/bin/stack +stack install cabal-install ``` ## Checking out the repository collection diff --git a/lib/BCorres_UL.thy b/lib/BCorres_UL.thy index 6d62a32b0c..003df6db63 100644 --- a/lib/BCorres_UL.thy +++ b/lib/BCorres_UL.thy @@ -6,7 +6,7 @@ theory BCorres_UL imports - Monads.NonDetMonadVCG + Monads.Nondet_VCG Crunch_Instances_NonDet begin @@ -17,12 +17,12 @@ definition bcorres_underlying where "bcorres_underlying t f g \ \s. s_bcorres_underlying t f g s" lemma wpc_helper_bcorres: - "bcorres_underlying t f g \ wpc_helper (P, P') (Q, Q') (bcorres_underlying t f g)" - by (simp add: wpc_helper_def) + "bcorres_underlying t f g \ wpc_helper P Q (bcorres_underlying t f g)" + by (simp add: wpc_helper_def split: prod.split) lemma wpc_helper_s_bcorres: - "s_bcorres_underlying t f g s \ wpc_helper (P, P') (Q, Q') (s_bcorres_underlying t f g s)" - by (simp add: wpc_helper_def) + "s_bcorres_underlying t f g s \ wpc_helper P Q (s_bcorres_underlying t f g s)" + by (simp add: wpc_helper_def split: prod.split) wpc_setup "\f. bcorres_underlying t f g" wpc_helper_bcorres wpc_setup "\f. s_bcorres_underlying t f g s" wpc_helper_bcorres diff --git a/lib/Bisim_UL.thy b/lib/Bisim_UL.thy index bc4b981eea..4eb36999f6 100644 --- a/lib/Bisim_UL.thy +++ b/lib/Bisim_UL.thy @@ -8,9 +8,9 @@ theory Bisim_UL imports - Monads.NonDetMonadVCG + Monads.Nondet_VCG Corres_UL - Monads.Empty_Fail + Monads.Nondet_Empty_Fail begin (* This still looks a bit wrong to me, although it is more or less what I want \ we want to be @@ -159,7 +159,7 @@ lemma bisim_split_handle: (* Set up wpc *) lemma wpc_helper_bisim: - "bisim_underlying SR r Q Q' f f' \ wpc_helper (P, P') (Q, {s. Q' s}) (bisim_underlying SR r P (\s. s \ P') f f')" + "bisim_underlying SR r Q Q' f f' \ wpc_helper (P, P', P'') (Q, Q', Q'') (bisim_underlying SR r P P' f f')" apply (clarsimp simp: wpc_helper_def) apply (erule bisim_guard_imp) apply simp @@ -342,7 +342,7 @@ lemmas dets_to_det_on [wp] = det_det_on [OF det_gets] det_det_on [OF return_det] (* Set up wpc *) lemma wpc_helper_det_on: - "det_on Q f \ wpc_helper (P, P') (Q, Q') (det_on P f)" + "det_on Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (det_on P f)" apply (clarsimp simp: wpc_helper_def det_on_def) done @@ -426,7 +426,7 @@ lemma not_empty_gets [wp]: (* Set up wpc *) lemma wpc_helper_not_empty: - "not_empty Q f \ wpc_helper (P, P') (Q, Q') (not_empty P f)" + "not_empty Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (not_empty P f)" apply (clarsimp simp: wpc_helper_def not_empty_def) done diff --git a/lib/CorresK/CorresK_Lemmas.thy b/lib/CorresK/CorresK_Lemmas.thy index 1588afd6b9..df0a11085f 100644 --- a/lib/CorresK/CorresK_Lemmas.thy +++ b/lib/CorresK/CorresK_Lemmas.thy @@ -7,12 +7,12 @@ theory CorresK_Lemmas imports - "Lib.Corres_Method" + "Lib.CorresK_Method" "ExecSpec.Syscall_H" "ASpec.Syscall_A" begin -lemma corres_throwError_str [corres_concrete_rER]: +lemma corres_throwError_str [corresK_concrete_rER]: "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throw b)" "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" by (simp add: corres_underlyingK_def)+ @@ -41,7 +41,7 @@ lemma mapME_x_corresK_inv: show ?case apply (simp add: mapME_x_def sequenceE_x_def) apply (fold mapME_x_def sequenceE_x_def dc_def) - apply (corressimp corresK: x IH wp: y) + apply (corresKsimp corresK: x IH wp: y) done qed done @@ -141,7 +141,7 @@ lemma corresK_mapM_list_all2: lemma corresK_discard_rv: assumes A[corresK]: "corres_underlyingK sr nf nf' F r' P P' a c" shows "corres_underlyingK sr nf nf' F dc P P' (do x \ a; return () od) (do x \ c; return () od)" - by corressimp + by corresKsimp lemma corresK_mapM_mapM_x: assumes "corres_underlyingK sr nf nf' F r' P P' (mapM f as) (mapM f' cs)" @@ -163,12 +163,12 @@ lemma corresK_subst_both: "g' = f' \ g = f \ lemma if_fun_true: "(if A then B else (\_. True)) = (\s. (A \ B s))" by simp -lemmas corresK_whenE [corres_splits] = +lemmas corresK_whenE [corresK_splits] = corresK_if[THEN corresK_subst_both[OF whenE_def[THEN meta_eq_to_obj_eq] whenE_def[THEN meta_eq_to_obj_eq]], OF _ corresK_returnOk[where r="f \ dc" for f], simplified, simplified if_fun_true] -lemmas corresK_head_splits[corres_splits] = +lemmas corresK_head_splits[corresK_splits] = corresK_split[where d="return", simplified] corresK_splitE[where d="returnOk", simplified] corresK_split[where b="return", simplified] @@ -192,7 +192,7 @@ lemmas [corresK] = corresK_Id[where nf'=True and r="(=)", simplified] corresK_Id[where nf'=True, simplified] -lemma corresK_unit_rv_eq_any[corres_concrete_r]: +lemma corresK_unit_rv_eq_any[corresK_concrete_r]: "corres_underlyingK sr nf nf' F r P P' f f' \ corres_underlyingK sr nf nf' F (\(x :: unit) (y :: unit). x = y) P P' f f'" @@ -201,7 +201,7 @@ lemma corresK_unit_rv_eq_any[corres_concrete_r]: apply simp done -lemma corresK_unit_rv_dc_any[corres_concrete_r]: +lemma corresK_unit_rv_dc_any[corresK_concrete_r]: "corres_underlyingK sr nf nf' F r P P' f f' \ corres_underlyingK sr nf nf' F (\(x :: unit) (y :: unit). dc x y) P P' f f'" diff --git a/lib/CorresK_Method.thy b/lib/CorresK_Method.thy new file mode 100644 index 0000000000..37e1219d49 --- /dev/null +++ b/lib/CorresK_Method.thy @@ -0,0 +1,1133 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory CorresK_Method +imports Corres_Cases SpecValid_R +begin + +(* Advanced Eisbach example for automating corres proofs via a new corresK calculus that improves + on some of properties that are problematic for automation in the original corres calculus. + + See also section 7.3 in + + Daniel Matichuk: Automation for proof engineering: Machine-checked proofs at scale, + PhD thesis, UNSW 2018. https://trustworthy.systems/publications/papers/Matichuk%3Aphd.abstract +*) + +chapter \CorresK Methods\ + +section \Boilerplate\ + +context begin + +private definition "my_true \ True" + +private lemma my_true: "my_true" by (simp add: my_true_def) + +method no_schematic_concl = (fails \rule my_true\) + +end + +definition + "corres_underlyingK sr nf nf' F r Q Q' f g \ + F \ corres_underlying sr nf nf' r Q Q' f g" + +lemma corresK_name_pre: + "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ + \ corres_underlyingK sr nf nf' F r ((=) s) ((=) s') f g \ + \ corres_underlyingK sr nf nf' F r P P' f g" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_name_pre) + apply blast + done + +lemma corresK_assume_pre: + "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ + \ corres_underlyingK sr nf nf' F r P P' f g \ + \ corres_underlyingK sr nf nf' F r P P' f g" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_assume_pre) + apply blast + done + +lemma corresK_drop_any_guard: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_assume_guard: + "(F \ corres_underlying sr nf nf' r Q Q' f g) \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_unlift: + "corres_underlyingK sr nf nf' F r Q Q' f g \ (F \ corres_underlying sr nf nf' r Q Q' f g)" + by (simp add: corres_underlyingK_def) + +lemma corresK_lift: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def) + +lemma corresK_lift_rule: + "corres_underlying sr nf nf' r Q Q' f g \ corres_underlying sra nfa nfa' ra Qa Qa' fa ga + \ corres_underlyingK sr nf nf' F r Q Q' f g \ corres_underlyingK sra nfa nfa' F ra Qa Qa' fa ga" + by (simp add: corres_underlyingK_def) + +lemmas corresK_drop = corresK_drop_any_guard[where F=True] + +context begin + +lemma corresK_start: + assumes x: "corres_underlyingK sr nf nf' F r Q Q' f g" + assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ F \ Q s \ Q' s'" + shows "corres_underlying sr nf nf' r P P' f g" + using x by (auto simp: y corres_underlying_def corres_underlyingK_def) + +lemma corresK_weaken: + assumes x: "corres_underlyingK sr nf nf' F' r Q Q' f g" + assumes y: "\s s'. \ P s; P' s'; F; (s, s') \ sr \ \ F' \ Q s \ Q' s'" + shows "corres_underlyingK sr nf nf' F r P P' f g" + using x by (auto simp: y corres_underlying_def corres_underlyingK_def) + +private lemma corres_trivial: + "False \ corres_underlying sr nf nf' r P P' f f'" + by simp + +method check_corres = + (succeeds \rule corres_trivial\, fails \rule TrueI\) + +private lemma corresK_trivial: + "False \ corres_underlyingK sr nf nf' F r P P' f f'" + by simp + +(* Ensure we don't apply calculational rules if either function is schematic *) + +private definition "dummy_fun \ undefined" + +private lemma corresK_dummy_left: + "False \ corres_underlyingK sr nf nf' F r P P' dummy_fun f'" + by simp + +private lemma corresK_dummy_right: + "False \ corres_underlyingK sr nf nf' F r P P' f dummy_fun" + by simp + +method check_corresK = + (succeeds \rule corresK_trivial\, fails \rule corresK_dummy_left corresK_dummy_right\) + +private definition "my_false s \ False" + +private + lemma corres_my_falseE: "my_false x \ P" by (simp add: my_false_def) + +private method no_schematic_prems = (fails \erule corres_my_falseE\) + +private lemma hoare_pre: "\my_false\ f \Q\" by (simp add: valid_def my_false_def) +private lemma hoareE_pre: "\my_false\ f \Q\,\Q'\" by (simp add: validE_def valid_def my_false_def) +private lemma hoare_E_E_pre: "\my_false\ f -,\Q\" by (simp add: validE_E_def validE_def valid_def my_false_def) +private lemma hoare_E_R_pre: "\my_false\ f \Q\,-" by (simp add: validE_R_def validE_def valid_def my_false_def) + +private lemmas hoare_pres = hoare_pre hoare_pre hoare_E_E_pre hoare_E_R_pre + +method schematic_hoare_pre = (succeeds \rule hoare_pres\) + +private + lemma corres_my_false: "corres_underlying sr nf nf' r my_false P f f'" + "corres_underlying sr nf nf' r P' my_false f f'" + by (auto simp add: my_false_def[abs_def] corres_underlying_def) + +private + lemma corresK_my_false: "corres_underlyingK sr nf nf' F r my_false P f f'" + "corres_underlyingK sr nf nf' F r P' my_false f f'" + by (auto simp add: corres_my_false corres_underlyingK_def) + + +method corresK_raw_pre = + (check_corres, (fails \rule corres_my_false\, rule corresK_start)?) + +lemma corresK_weaken_states: + "corres_underlyingK sr nf nf' F r Q Q' f g \ + corres_underlyingK sr nf nf' (F \ (\s s'. P s \ P' s' \ (s, s') \ sr \ Q s \ Q' s')) + r P P' f g" + apply (erule corresK_weaken) + apply simp + done + +private lemma + corresK_my_falseF: + "corres_underlyingK sr nf nf' (my_false undefined) r P P' f f'" + by (simp add: corres_underlyingK_def my_false_def) + +method corresK_pre = + (check_corresK, + (fails \rule corresK_my_false\, + ((succeeds \rule corresK_my_falseF\, rule corresK_weaken_states) | + rule corresK_weaken))) + +method corresK_pre' = (corresK_raw_pre | corresK_pre)? + +lemma corresK_weakenK: + "corres_underlyingK sr nf nf' F' r P P' f f' \ (F \ F') \ corres_underlyingK sr nf nf' F r P P' f f'" + by (simp add: corres_underlyingK_def) + +(* Special corres rules which should only be applied when the return value relation is + concrete, to avoid bare schematics. *) + +named_theorems corresK_concrete_r and corresK_concrete_rER + +private lemma corres_r_False: + "False \ corres_underlyingK sr nf nf' F (\_. my_false) P P' f f'" + by simp + +private lemma corres_r_FalseE: + "False \ corres_underlyingK sr nf nf' F ((\_. my_false) \ r) P P' f f'" + by simp + +private lemma corres_r_FalseE': + "False \ corres_underlyingK sr nf nf' F (r \ (\_. my_false)) P P' f f'" + by simp + +method corresK_concrete_r declares corresK_concrete_r corresK_concrete_rER = + (fails \rule corres_r_False corres_r_FalseE corres_r_FalseE'\, determ \rule corresK_concrete_r\) + | (fails \rule corres_r_FalseE\, determ \rule corresK_concrete_rER\) + + +end + + +section \CorresKc - Corres over case statements\ + +text + \Based on wpc, corresKc examines the split rule for top-level case statements on the left + and right hand sides, propagating backwards the stateless and left/right preconditions.\ + +definition + wpc2_helper :: "(('a \ bool) \ 'b set) + \ (('a \ bool) \ 'b set) \ (('a \ bool) \ 'b set) + \ (('a \ bool) \ 'b set) \ bool \ bool" where + "wpc2_helper \ \(P, P') (Q, Q') (PP, PP') (QQ,QQ') R. + ((\s. P s \ Q s) \ P' \ Q') \ ((\s. PP s \ QQ s) \ PP' \ QQ') \ R" + +definition + "wpc2_protect B Q \ (Q :: bool)" + +lemma wpc2_helperI: + "wpc2_helper (P, P') (P, P') (PP, PP') (PP, PP') Q \ Q" + by (simp add: wpc2_helper_def) + +lemma wpc2_conj_process: + "\ wpc2_helper (P, P') (A, A') (PP, PP') (AA, AA') C; wpc2_helper (P, P') (B, B') (PP, PP') (BB, BB') D \ + \ wpc2_helper (P, P') (\s. A s \ B s, A' \ B') (PP, PP') (\s. AA s \ BB s, AA' \ BB') (C \ D)" + by (clarsimp simp add: wpc2_helper_def) + +lemma wpc2_all_process: + "\ \x. wpc2_helper (P, P') (Q x, Q' x) (PP, PP') (QQ x, QQ' x) (R x) \ + \ wpc2_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (PP, PP') (\s. \x. QQ x s, {s. \x. s \ QQ' x}) (\x. R x)" + by (clarsimp simp: wpc2_helper_def subset_iff) + +lemma wpc2_imp_process: + "\ wpc2_protect B Q \ wpc2_helper (P, P') (R, R') (PP, PP') (RR, RR') S \ + \ wpc2_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (PP, PP') (\s. Q \ RR s, {s. Q \ s \ RR'}) (Q \ S)" + by (clarsimp simp add: wpc2_helper_def subset_iff wpc2_protect_def) + + + +text \ + Generate quadratic blowup of the case statements on either side of refinement. + Attempt to discharge resulting contradictions. +\ + +context +begin + +private method corresKc_body for B :: bool uses helper = + determ \(rule wpc2_helperI, + repeat_new \rule wpc2_conj_process wpc2_all_process wpc2_imp_process[where B=B]\ ; (rule helper))\ + +lemma wpc2_helper_corres_left: + "corres_underlyingK sr nf nf' QQ r Q A f f' \ + wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r P A f f')" + by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) + +private method corresKc_left_raw = + determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' + \ \apply_split f "\f. corres_underlyingK sr nf nf' F r P P' f f'"\, + corresKc_body False helper: wpc2_helper_corres_left)\ + +lemma wpc2_helper_corres_right: + "corres_underlyingK sr nf nf' QQ r A Q f f' \ + wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r A P f f')" + by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) + +private method corresKc_right_raw = + determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' + \ \apply_split f' "\f'. corres_underlyingK sr nf nf' F r P P' f f'"\, + corresKc_body True helper: wpc2_helper_corres_right)\ + +definition + "corres_protect r = (r :: bool)" + +lemma corres_protect_conj_elim[simp]: + "corres_protect (a \ b) = (corres_protect a \ corres_protect b)" + by (simp add: corres_protect_def) + +lemma wpc2_corres_protect: + "wpc2_protect B Q \ corres_protect Q" + by (simp add: wpc2_protect_def corres_protect_def) + +method corresKc_left = (corresKc_left_raw; (drule wpc2_corres_protect[where B=False])) +method corresKc_right = (corresKc_right_raw; (drule wpc2_corres_protect[where B=True])) + +named_theorems corresKc_simp + +declare wpc2_protect_def[corresKc_simp] +declare corres_protect_def[corresKc_simp] + +lemma corresK_false_guard_instantiate: + "False \ corres_underlyingK sr nf nf' True r P P' f f'" + by (simp add: corres_underlyingK_def) + +lemma + wpc_contr_helper: + "wpc2_protect False (A = B) \ wpc2_protect True (A = C) \ B \ C \ P" + by (auto simp: wpc2_protect_def) + +method corresKc declares corresKc_simp = + (check_corresK, corresKc_left_raw; corresKc_right_raw; + ((solves \rule corresK_false_guard_instantiate, + determ \(erule (1) wpc_contr_helper)?\, simp add: corresKc_simp\) + | (drule wpc2_corres_protect[where B=False], drule wpc2_corres_protect[where B=True])))[1] + +end + +section \CorresK_rv\ + +text \CorresK_rv is used to propagate backwards the stateless precondition (F) from corres_underlyingK. + Its main purpose is to defer the decision of where each condition should go: either continue + through the stateless precondition, or be pushed into the left/right side as a hoare triple.\ + + +(*Don't unfold the definition. Use corresK_rv method or associated rules. *) +definition corres_rv :: "bool \ ('a \ 'b \ bool) \ ('s \ bool) \ ('t \ bool) + \ ('s, 'a) nondet_monad \ ('t, 'b) nondet_monad \ + ('a \ 'b \ bool) \ bool" + where + "corres_rv F r P P' f f' Q \ + F \ (\s s'. P s \ P' s' \ + (\sa rv. (rv, sa) \ fst (f s) \ (\sa' rv'. (rv', sa') \ fst (f' s') \ r rv rv' \ Q rv rv')))" + +(*Don't unfold the definition. Use corresK_rv method or associated rules. *) +definition "corres_rvE_R F r P P' f f' Q \ + corres_rv F (\_ _. True) P P' f f' + (\rvE rvE'. case (rvE,rvE') of (Inr rv, Inr rv') \ r rv rv' \ Q rv rv' | _ \ True)" + +lemma corres_rvD: + "corres_rv F r P P' f f' Q \ + F \ P s \ P' s' \ (rv,sa) \ fst (f s) \ + (rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_RD: + "corres_rvE_R F r P P' f f' Q \ + F \ P s \ P' s' \ (Inr rv,sa) \ fst (f s) \ + (Inr rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_prove: + "(\s s' sa sa' rv rv'. F \ + (rv,sa) \ fst (f s) \ (rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ + corres_rv F r P P' f f' Q" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_prove: + "(\s s' sa sa' rv rv'. F \ + (Inr rv,sa) \ fst (f s) \ (Inr rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ + corres_rvE_R F r P P' f f' Q" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_wp_left: + "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\ \ corres_rv True r P \ f f' Q" + by (fastforce simp add: corres_rv_def valid_def) + +lemma corres_rvE_R_wp_left: + "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r P \ f f' Q" + apply (simp add: corres_rvE_R_def validE_def validE_R_def) + apply (rule corres_rv_wp_left) + apply (erule hoare_strengthen_post) + apply (auto split: sum.splits) + done + +lemma corres_rv_wp_right: + "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\ \ corres_rv True r \ P' f f' Q" + by (fastforce simp add: corres_rv_def valid_def) + +lemma corres_rvE_R_wp_right: + "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r \ P' f f' Q" + apply (simp add: corres_rvE_R_def validE_def validE_R_def) + apply (rule corres_rv_wp_right) + apply (erule hoare_strengthen_post) + apply (auto split: sum.splits) + done + +lemma corres_rv_weaken: + "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rv F r P P' f f' Q \ corres_rv F r P P' f f' Q'" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_weaken: + "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rvE_R F r P P' f f' Q \ corres_rvE_R F r P P' f f' Q'" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemma corres_rv_defer_no_args: + "corres_rv (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" + by (auto simp add: corres_rv_def) + +lemma corres_rvE_R_defer_no_args: + "corres_rvE_R (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +(*UNSAFE*) +lemma corres_rv_defer: + "corres_rv (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" + by (auto simp add: corres_rv_def) + +(*UNSAFE*) +lemma corres_rvE_R_defer: + "corres_rvE_R (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemmas corres_rv_proveT = + corres_rv_prove[where P=\ and P'=\ and F=True, simplified] + +lemmas corres_rvE_R_proveT = + corres_rvE_R_prove[where P=\ and P'=\ and F=True,simplified] + +lemma corres_rv_conj_lift: + "corres_rv F r P PP f g Q \ corres_rv F' r P' PP' f g Q' \ + corres_rv (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" + by (clarsimp simp add: corres_rv_def) + +lemma corres_rvE_R_conj_lift: + "corres_rvE_R F r P PP f g Q \ corres_rvE_R F' r P' PP' f g Q' \ + corres_rvE_R (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" + by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +subsection \CorresK_rv method\ + +text \This method propagate corresK_rv obligations into each precondition according to the following +heuristic: + For each conjunct in the obligation: + + 1) Try to solve trivially (to handle schematic conditions) + 2) If it does not depend on function return values, propagate it as a stateless precondition + 3) If either side is a corres_noop (used by symbolic execution), propagate as hoare triple + for other side. + 4) If it can be phrased entirely with variables accessible to the left side, propagate it as + a left hoare triple. + 5) As in 3) but on the right. + + Fail if any of 1-5 are unsuccessful for any conjunct. + +In the case where corres_rv fails, the user will need to intervene, either +by specifying where to defer the obligation or solving the goal in-place. +\ + +definition "corres_noop = return undefined" + +context begin + +private lemma corres_rv_defer_left: + "corres_rv F r (\_. \rv rv'. Q rv rv') P' f f' Q" + by (simp add: corres_rv_def) + +private lemma corres_rvE_R_defer_left: + "corres_rvE_R F r (\_. \rv rv'. Q rv rv') P' f f' Q" + by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +private lemma corres_rv_defer_right: + "corres_rv F r P (\_. \rv rv'. Q rv rv') f f' Q" + by (simp add: corres_rv_def) + +private lemma corres_rvE_R_defer_right: + "corres_rvE_R F r P (\_. \rv rv'. Q rv rv') f f' Q" + by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) + +lemmas corres_rv_proves = + corres_rv_proveT corres_rvE_R_proveT + +(* Try to handle cases where corres_rv obligations have been left schematic *) +lemmas corres_rv_trivials = + corres_rv_proves[where Q="\_ _. True", OF TrueI] + corres_rv_proves[where Q="\rv rv'. F rv rv' \ True" for F, # \simp\] + corres_rv_proves[where Q=r and r=r for r, # \simp\] + +lemmas corres_rv_defers = + corres_rv_defer_no_args corres_rvE_R_defer_no_args + +lemmas corres_rv_wp_lefts = + corres_rv_wp_left corres_rvE_R_wp_left + +lemmas corres_rv_wp_rights = + corres_rv_wp_right corres_rvE_R_wp_right + +lemmas corres_rv_noops = + corres_rv_wp_lefts[where f'=corres_noop] corres_rv_wp_rights[where f=corres_noop] + +lemmas corres_rv_lifts' = + corres_rv_conj_lift corres_rvE_R_conj_lift + +lemmas corres_rv_lifts = + corres_rv_lifts' + corres_rv_lifts'[where P="\_. True" and P'="\_. True" and f="corres_noop", simplified] + corres_rv_lifts'[where PP="\_. True" and PP'="\_. True" and g="corres_noop", simplified] + +lemmas corres_rv_prove_simple = + corres_rv_proveT[# \thin_tac _, thin_tac _\, simplified] + +method corresK_rv = + (((repeat_new \rule corres_rv_trivials corres_rv_lifts\)?); + ((rule corres_rv_trivials corres_rv_defers corres_rv_noops | + (succeeds \rule corres_rv_defer_left corres_rvE_R_defer_left\, + rule corres_rv_wp_lefts) | + (succeeds \rule corres_rv_defer_right corres_rvE_R_defer_right\, + rule corres_rv_wp_rights)))) + +end + + +section \CorresK Split rules\ + +text \ + The corresK split allows preconditions to be propagated backward via the extra stateless precondition + (here given as @{term F}. The head function is propagated backward directly, while the tail + is propagated via corres_rv. Using the corresK_rv method, this condition is then decomposed and + pushed into the stateless, left, and right preconditions as appropriate. + + The return value relation is now almost never needed directly, and so it is wrapped in corres_protect + to prevent it from being used during simplification. + \ + +lemma corresK_split: + assumes x: "corres_underlyingK sr nf nf' F r' P P' a c" + assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') r (R rv) (R' rv') (b rv) (d rv')" + assumes c: "corres_rv F'' r' PP PP' a c F'" + assumes z: "\Q\ a \R\" "\Q'\ c \R'\" + shows "corres_underlyingK sr nf nf' (F \ F'') r (PP and P and Q) (PP' and P' and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def bind_def) + apply (rule conjI) + apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply (drule (5) corres_rvD[OF c]) + apply (rule_tac x="(ac,bc)" in bexI,clarsimp) + apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) + apply assumption+ + apply (erule(1) use_valid[OF _ z(1)]) + apply (erule(1) use_valid[OF _ z(2)]) + apply fastforce + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply simp + apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) + apply clarsimp + apply (drule(1) bspec,clarsimp) + apply (drule (5) corres_rvD[OF c]) + apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) + apply simp+ + apply (erule(1) use_valid[OF _ z(1)]) + apply (erule(1) use_valid[OF _ z(2)]) + apply fastforce + apply clarsimp + done + +section \CorresK_inst\ + +text \Handles rare in-place subgoals generated by corres rules which need to be solved immediately + in order to instantiate a schematic. + We peek into the generated return-value relation to see if we can solve the instantiation. +\ + +definition "corres_inst_eq x y \ x = y" + +lemma corres_inst_eqI[wp]: "corres_inst_eq x x" by (simp add: corres_inst_eq_def) + +lemma corres_inst_test: "False \ corres_inst_eq x y" by simp + +method corresK_inst = + (succeeds \rule corres_inst_test\, fails \rule TrueI\, + (rule corres_inst_eqI | + (clarsimp simp: corres_protect_def split del: if_split, rule corres_inst_eqI) + | (clarsimp simp: corres_protect_def split del: if_split, + fastforce intro!: corres_inst_eqI)))[1] + +section \CorresK Method\ + +text \Handles structured decomposition of corres goals\ + +named_theorems + corresK_splits and (* rules that, one applied, must + eventually yield a successful corres or corresK rule application*) + corresK (* calculational rules that are phrased as corresK rules *) + +context begin + +lemma corresK_fold_dc: + "corres_underlyingK sr nf nf' F dc P P' f f' \ corres_underlyingK sr nf nf' F (\_ _. True) P P' f f'" + by (simp add: dc_def[abs_def]) + +private method corresK_fold_dc = + (match conclusion in + "corres_underlyingK _ _ _ _ (\_ _. True) _ _ _ _" \ \rule corresK_fold_dc\) + +section \CorresK_apply method\ + +text \This is a private method that performs an in-place rewrite of corres rules into + corresK rules. This is primarily for backwards-compatibility with the existing corres proofs. + Works by trying to apply a corres rule, then folding the resulting subgoal state into a single + conjunct and atomizing it, then propagating the result into the stateless precondition. +\ + +private definition "guard_collect (F :: bool) \ F" +private definition "maybe_guard F \ True" + +private lemma corresK_assume_guard_guarded: + "(guard_collect F \ corres_underlying sr nf nf' r Q Q' f g) \ + maybe_guard F \ corres_underlyingK sr nf nf' F r Q Q' f g" + by (simp add: corres_underlyingK_def guard_collect_def) + +private lemma guard_collect: "guard_collect F \ F" + by (simp add: guard_collect_def) + +private lemma has_guard: "maybe_guard F" by (simp add: maybe_guard_def) +private lemma no_guard: "maybe_guard True" by (simp add: maybe_guard_def) + +private method corresK_apply = + (rule corresK_assume_guard_guarded, + (determ \rule corres\, safe_fold_subgoals)[1], + #break "corres_apply", + ((focus_concl \(atomize (full))?\, erule guard_collect, rule has_guard) | rule no_guard))[1] + +private method corresK_alternate = corresK_inst | corresK_rv + +(* Corres_Method and CorresK_Method share the [corres] set. Corres_Method is more resilient against + unsafe terminal rules that set, so we list those [corres] rules here that might be problematic + for corresK. Users shouldn't need to interact with this set, but if you have declared something + [corres] and want it to be used by the corres method only (not corresK), then additionally + declare it [corres_unsafeK]. *) +named_theorems corres_unsafeK +lemmas [corres_unsafeK] = + whenE_throwError_corres + corres_if2 + corres_when + corres_whenE + corres_split_handle + corres_split_catch + corres_mapM_x + +method corresK_once declares corresK_splits corres corresK corresKc_simp = + use corres_unsafeK[corres del] in \use in \ + (no_schematic_concl, + (corresK_alternate | + (corresK_fold_dc?, + (corresK_pre', + #break "corres", + ( (check_corresK, determ \rule corresK\) + | corresK_apply + | corresK_concrete_r + | corresKc + | (rule corresK_splits, corresK_once) + )))))\\ + + +method corresK declares corresK_splits corres corresK corresKc_simp = + (corresK_once+)[1] + +text \Unconditionally try applying split rules. Useful for determining why corres is not applying + in a given proof.\ + +method corresK_unsafe_split declares corresK_splits corres corresK corresKc_simp = + ((rule corresK_splits | corresK_pre' | corresK_once)+)[1] + +end + +lemmas [corresK_splits] = + corresK_split + +lemma corresK_when [corresK_splits]: + "\corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F dc P P' a c\ +\ corres_underlyingK sr nf nf' ((G = G') \ F) dc ((\x. G \ P x)) (\x. G' \ P' x) (when G a) (when G' c)" + apply (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + apply (cases "G = G'"; cases G; simp) + by (clarsimp simp: return_def) + +lemma corresK_return_trivial: + "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) (return ()) (return ())" + by (simp add: corres_underlyingK_def) + +lemma corresK_return_eq: + "corres_underlyingK sr nf nf' True (=) (\_. True) (\_. True) (return x) (return x)" + by (simp add: corres_underlyingK_def) + +lemma corres_lift_to_K: + "corres_underlying sra nfa nf'a ra Pa P'a fa f'a \ corres_underlying sr nf nf' r P P' f f' \ + corres_underlyingK sra nfa nf'a F ra Pa P'a fa f'a \ corres_underlyingK sr nf nf' F r P P' f f'" + by (simp add: corres_underlyingK_def) + +lemmas [THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, simplified o_def, corresK_splits] = + corres_liftE_rel_sum + corres_liftM_simp + corres_liftM2_simp + + +lemmas [corresK] = + corresK_return_trivial + corresK_return_eq + +lemma corresK_subst_left: "g = f \ + corres_underlyingK sr nf nf' F r P P' f f' \ + corres_underlyingK sr nf nf' F r P P' g f'" by simp + +lemma corresK_subst_right: "g' = f' \ + corres_underlyingK sr nf nf' F r P P' f f' \ + corres_underlyingK sr nf nf' F r P P' f g'" by simp + +lemmas corresK_fun_app_left[corresK_splits] = corresK_subst_left[OF fun_app_def[THEN meta_eq_to_obj_eq]] +lemmas corresK_fun_app_right[corresK_splits] = corresK_subst_right[OF fun_app_def[THEN meta_eq_to_obj_eq]] + +lemmas corresK_Let_left[corresK_splits] = corresK_subst_left[OF Let_def[THEN meta_eq_to_obj_eq]] +lemmas corresK_Let_right[corresK_splits] = corresK_subst_right[OF Let_def[THEN meta_eq_to_obj_eq]] + +lemmas corresK_return_bind_left[corresK_splits] = corresK_subst_left[OF return_bind] +lemmas corresK_return_bind_right[corresK_splits] = corresK_subst_right[OF return_bind] + +lemmas corresK_liftE_bindE_left[corresK_splits] = corresK_subst_left[OF liftE_bindE] +lemmas corresK_liftE_bindE_right[corresK_splits] = corresK_subst_right[OF liftE_bindE] + +lemmas corresK_K_bind_left[corresK_splits] = + corresK_subst_left[where g="K_bind f rv" and f="f" for f rv, # \simp\] + +lemmas corresK_K_bind_right[corresK_splits] = + corresK_subst_right[where g'="K_bind f' rv" and f'="f'" for f' rv, # \simp\] + + +section \CorresK Search - find symbolic execution path that allows a given rule to be applied\ + +lemma corresK_if [corresK_splits]: + "\(corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); + (corres_protect (\G) \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ +\ corres_underlyingK sr nf nf' ((G = G') \ (G \ F) \ (\G \ F')) r (if G then P else Q) (if G' then P' else Q') (if G then a else b) + (if G' then c else d)" + by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + +lemma corresK_if_rev: + "\(corres_protect (\ G) \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); + (corres_protect G \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ +\ corres_underlyingK sr nf nf' ((\ G = G') \ (\G \ F) \ (G \ F')) r (if G then Q else P) (if G' then P' else Q') (if G then b else a) + (if G' then c else d)" + by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) + + + +named_theorems corresK_symb_exec_ls and corresK_symb_exec_rs + +lemma corresK_symb_exec_l_search[corresK_symb_exec_ls]: + fixes x :: "'b \ 'a \ ('d \ 'a) set \ bool" + notes [simp] = corres_noop_def + shows + "\\s. \PP s\ m \\_. (=) s\; \rv. corres_underlyingK sr nf True (F rv) r (Q rv) P' (x rv) y; + corres_rv F' dc RR (\_. True) m (corres_noop) (\rv _. F rv); + empty_fail m; no_fail P m; \R\ m \Q\\ +\ corres_underlyingK sr nf True F' r (RR and P and R and (\s. \s'. s = s' \ PP s' s)) P' (m >>= x) y" + apply (clarsimp simp add: corres_underlyingK_def) + apply (rule corres_name_pre) + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def + bind_def valid_def empty_fail_def no_fail_def) + apply (drule_tac x=a in meta_spec)+ + apply (drule_tac x=a in spec)+ + apply (drule mp, assumption)+ + apply (clarsimp simp: not_empty_eq) + apply (drule corres_rvD; (assumption | simp add: return_def)?) + apply (drule_tac x="(aa,ba)" in bspec,simp)+ + apply clarsimp + apply (drule_tac x=aa in meta_spec) + apply clarsimp + apply (drule_tac x="(ba,b)" in bspec,simp) + apply clarsimp + apply (drule mp, fastforce) + apply clarsimp + apply (drule_tac x="(a,bb)" in bspec,simp) + apply clarsimp + apply (rule_tac x="(aa,ba)" in bexI) + apply (clarsimp) + apply (rule_tac x="(ab,bc)" in bexI) + apply (clarsimp)+ + done + + +lemmas corresK_symb_exec_liftME_l_search[corresK_symb_exec_ls] = + corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] + +lemma corresK_symb_exec_r_search[corresK_symb_exec_rs]: + fixes y :: "'b \ 'a \ ('e \ 'a) set \ bool" + assumes X: "\s. \PP' s\ m \\r. (=) s\" + assumes corres: "\rv. corres_underlyingK sr nf nf' (F rv) r P (Q' rv) x (y rv)" + assumes Y: "corres_rv F' dc (\_. True) RR (corres_noop) m (\_ rv'. F rv')" + assumes nf: "nf' \ no_fail P' m" + assumes Z: "\R\ m \Q'\" + notes [simp] = corres_noop_def + shows + "corres_underlyingK sr nf nf' F' r P (RR and P' and R and (\s. \s'. s = s' \ PP' s' s)) x (m >>= y)" + apply (insert corres) + apply (simp add: corres_underlyingK_def) + apply (rule impI) + apply (rule corres_name_pre) + apply (clarsimp simp: corres_underlying_def corres_underlyingK_def + bind_def valid_def empty_fail_def no_fail_def) + apply (intro impI conjI ballI) + apply clarsimp + apply (frule(1) use_valid[OF _ X]) + apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) + apply (frule(1) use_valid[OF _ Z]) + apply (drule_tac x=aa in meta_spec) + apply clarsimp + apply (drule_tac x="(a, ba)" in bspec,simp) + apply (clarsimp) + apply (drule(1) bspec) + apply clarsimp + apply clarsimp + apply (frule(1) use_valid[OF _ X]) + apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) + apply (frule(1) use_valid[OF _ Z]) + apply fastforce + apply (rule no_failD[OF nf],simp+) + done + +lemmas corresK_symb_exec_liftME_r_search[corresK_symb_exec_rs] = + corresK_symb_exec_r_search[where 'e="'x + 'y", folded liftE_bindE] + +context begin + +private method corresK_search_wp = solves \((wp | wpc | simp)+)[1]\ + +text \ + Depth-first search via symbolic execution of both left and right hand + sides, handling case statements and + potentially mismatched if statements. The find_goal + method handles searching each branch of case or if statements, while + we rely on backtracking to guess the order of left/right executions. + + According to the above rules, a symbolic execution step can be taken + when the function can be shown to not modify the state. Questions + of wellformedness (i.e. empty_fail or no_fail) are deferred to the user + after the search concludes. +\ + + +private method corresK_search_frame methods m uses search = + (#break "corresK_search", + ((corresK?, corresK_once corres: search corresK:search) + | (corresKc, find_goal \m\)[1] + | (rule corresK_if, find_goal \m\)[1] + | (rule corresK_if_rev, find_goal \m\)[1] + | (rule corresK_symb_exec_ls, corresK_search_wp, m) + | (rule corresK_symb_exec_rs, corresK_search_wp, m))) + +text \ + Set up local context where we make sure we don't know how to + corres our given rule. The search is finished when we can only + make corres progress once we add our rule back in +\ + +method corresK_search uses search + declares corres corresK_symb_exec_ls corresK_symb_exec_rs = + (corresK_pre', + use search[corres del] search[corresK del] search[corresK_splits del] in + \use in \corresK_search_frame \corresK_search search: search\ search: search\\)[1] + +end + +chapter \Misc Helper Lemmas\ + + +lemma corresK_assert[corresK]: + "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) dc \ \ (assert P) (assert Q)" + by (auto simp add: corres_underlyingK_def corres_underlying_def return_def assert_def fail_def) + +lemma corres_stateAssert_implied_frame: + assumes A: "\s s'. (s, s') \ sr \ F' \ P' s \ Q' s' \ A s'" + assumes C: "\x. corres_underlyingK sr nf nf' F r P Q f (g x)" + shows + "corres_underlyingK sr nf nf' (F \ F') r (P and P') (Q and Q') f (stateAssert A [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (corresK_search search: C[THEN corresK_unlift]) + apply (wp corres_rv_defer | simp add: A)+ + done + +lemma corresK_return [corresK_concrete_r]: + "corres_underlyingK sr nf nf' (r a b) r \ \ (return a) (return b)" + by (simp add: corres_underlyingK_def) + +lemma corres_throwError_str [corresK_concrete_rER]: + "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" + by (simp add: corres_underlyingK_def)+ + +section \Error Monad\ + + + +lemma corresK_splitE [corresK_splits]: + assumes x: "corres_underlyingK sr nf nf' F (f \ r') P P' a c" + assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') (f \ r) (R rv) (R' rv') (b rv) (d rv')" + assumes c: "corres_rvE_R F'' r' PP PP' a c F'" + assumes z: "\Q\ a \R\, -" "\Q'\ c \R'\, -" + shows "corres_underlyingK sr nf nf' (F \ F'') (f \ r) (PP and P and Q) (PP' and P' and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" + unfolding bindE_def + apply (rule corresK_weakenK) + apply (rule corresK_split[OF x, where F'="\rv rv'. case (rv,rv') of (Inr rva, Inr rva') \ F' rva rva' | _ \ True"]) + apply (simp add: corres_protect_def) + prefer 2 + apply simp + apply (rule corres_rv_prove[where F=F'']) + apply (case_tac rv; case_tac rv'; simp) + apply (rule corres_rvE_RD[OF c]; assumption) + apply (case_tac rv; case_tac rv'; simp) + apply (simp add: corres_underlyingK_def corres_protect_def) + apply (rule corresK_weaken) + apply (rule y) + apply (simp add: corres_protect_def) + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + apply (rule conjI) + apply (subgoal_tac "(case (Inr b) of (Inr b) \ R b s + | _ \ True)"; assumption?) + apply (subgoal_tac "(case (Inr ba) of (Inr ba) \ R' ba s' + | _ \ True)"; assumption?) + apply clarsimp+ + apply (insert z) + by ((fastforce simp: valid_def validE_def validE_R_def split: sum.splits)+) + +lemma corresK_returnOk [corresK_concrete_r]: + "corres_underlyingK sr nf nf' (r (Inr a) (Inr b)) r \ \ (returnOk a) (returnOk b)" + by (simp add: returnOk_def corres_underlyingK_def) + +lemma corres_assertE_str[corresK]: + "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) (f \ dc) \ \ (assertE P) (assertE Q)" + by (auto simp add: corres_underlying_def corres_underlyingK_def returnOk_def return_def assertE_def fail_def) + +lemmas corres_symb_exec_whenE_l_search[corresK_symb_exec_ls] = + corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] + +lemmas corres_returnOk_liftEs + [folded returnOk_liftE, THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, corresK] = + corres_liftE_rel_sum[where m="return x" for x] + corres_liftE_rel_sum[where m'="return x" for x] + + +(* Failure *) + +lemma corresK_fail[corresK]: + "corres_underlyingK sr nf True False r P P' f fail" + by (simp add: corres_underlyingK_def) + +lemma corresK_fail_no_fail'[corresK]: + "corres_underlyingK sr nf False True (\_ _. False) (\_. True) (\_. True) f fail" + apply (simp add: corres_underlyingK_def) + by (fastforce intro!: corres_fail) + +section \CorresKwp\ + +text + \This method wraps up wp and wpc to ensure that they don't accidentally generate schematic + assumptions when handling hoare triples that emerge from corres proofs. + This is partially due to wp not being smart enough to avoid applying certain wp_comb rules + when the precondition is schematic, and arises when the schematic precondition doesn't have + access to some meta-variables in the postcondition. + + To solve this, instead of meta-implication in the wp_comb rules we use corres_inst_eq, which + can only be solved by reflexivity. In most cases these comb rules are either never applied or + solved trivially. If users manually apply corres_rv rules to create postconditions with + inaccessible meta-variables (@{method corresK_rv} will never do this), then these rules will + be used. Since @{method corresK_inst} has access to the protected return-value relation, it has a chance + to unify the generated precondition with the original schematic one.\ + +named_theorems corresKwp_wp_comb and corresKwp_wp_comb_del + +lemma corres_inst_eq_imp: + "corres_inst_eq A B \ A \ B" by (simp add: corres_inst_eq_def) + +lemmas corres_hoare_pre = hoare_pre[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] + +method corresKwp uses wp = + (determ \ + (fails \schematic_hoare_pre\, (wp add: wp | wpc)) + | (schematic_hoare_pre, + (use corresKwp_wp_comb [wp_comb] + corresKwp_wp_comb_del[wp_comb del] + hoare_pre[wp_pre del] + corres_hoare_pre[wp_pre] + in + \use in \wp add: wp | wpc\\))\) + +lemmas [corresKwp_wp_comb_del] = + hoare_vcg_precond_imp + hoare_vcg_precond_impE + hoare_vcg_precond_impE_R + +lemma corres_inst_conj_lift[corresKwp_wp_comb]: + "\\R\ f \Q\; \P'\ f \Q'\; \s. corres_inst_eq (R s) (P s)\ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + by (rule hoare_vcg_conj_lift; simp add: valid_def corres_inst_eq_def) + +lemmas [corresKwp_wp_comb] = + corresKwp_wp_comb_del[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] + valid_validE_R + hoare_vcg_R_conj[OF valid_validE_R] + hoare_vcg_E_elim[OF valid_validE_E] + hoare_vcg_E_conj[OF valid_validE_E] + validE_validE_R + hoare_vcg_R_conj + hoare_vcg_E_elim + hoare_vcg_E_conj + hoare_vcg_conj_lift + +declare hoare_post_comb_imp_conj[corresKwp_wp_comb_del] + +section \CorresKsimp\ +text \Combines corresK, wp and clarsimp\ + +text +\If clarsimp solves a terminal subgoal, its preconditions are left uninstantiated. We can +try to catch this by first attempting a trivial instantiation before invoking clarsimp, but +only keeping the result if clarsimp solves the goal,\ + +lemmas hoare_True_inst = + hoare_pre[where P="\_. True", of "\_. True", # \-\ \simp\] + asm_rl[of "\\_. True\ f \E\,\R\" for f E R] + +lemmas corres_rv_True_inst = + asm_rl[of "corres_rv True r (\_. True) (\_. True) f f' Q" for r f f' Q] + asm_rl[of "corres_rvE_R True r (\_. True) (\_. True) f f' Q" for r f f' Q] + +lemmas corresK_True_inst = + asm_rl[of "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) f g" for sr nf nf' f g] + asm_rl[of "corres_underlyingK sr nf nf' True r (\_. True) (\_. True) f g" for sr nf nf' r f g] + asm_rl[of "corres_underlying sr nf nf' dc (\_. True) (\_. True) f g" for sr nf nf' f g] + asm_rl[of "corres_underlying sr nf nf' r (\_. True) (\_. True) f g" for sr nf nf' r f g] + +lemmas calculus_True_insts = hoare_True_inst corres_rv_True_inst corresK_True_inst + +method corresKsimp uses simp cong search wp + declares corres corresK corresK_splits corresKc_simp = + ((no_schematic_concl, + (corresK corresKc_simp: simp + | corresKwp wp: wp + | (rule calculus_True_insts, solves \clarsimp cong: cong simp: simp corres_protect_def\) + | clarsimp cong: cong simp: simp simp del: corres_no_simp split del: if_split + | (match search in _ \ \corresK_search search: search\)))+)[1] + +section \Normalize corres rule into corresK rule\ + +lemma corresK_convert: + "A \ corres_underlying sr nf nf' r P Q f f' \ + corres_underlyingK sr nf nf' A r P Q f f'" + by (auto simp add: corres_underlyingK_def) + +method corresK_convert = (((drule uncurry)+)?, drule corresK_convert corresK_drop) + +section \Lifting corres results into wp proofs\ + +lemma use_corresK': + "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \Q\ \ + \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv' s'. \rv. r rv rv' \ ex_abs_underlying sr (Q rv) s'\" + by (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) + +lemma use_corresK [wp]: + "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \\rv s. \rv'. r rv rv' \ Q rv' s\ \ + \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv'. ex_abs_underlying sr (Q rv')\" + apply (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) + done + +lemma hoare_add_post': + "\\P'\ f \Q'\; \P''\ f \\rv s. Q' rv s \ Q rv s\\ \ \P' and P''\ f \Q\" + by (fastforce simp add: valid_def) + +lemma use_corresK_frame: + assumes corres: "corres_underlyingK sr False nf' F r PP P' f f'" + assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" + assumes valid: "\P\ f \Q\" + assumes valid': "\PP'\ f' \Q'\" + shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\" + apply (rule hoare_pre) + apply (rule hoare_add_post'[OF valid']) + apply (rule hoare_strengthen_post) + apply (rule use_corresK'[OF corres valid]) + apply (insert frame)[1] + apply (clarsimp simp: ex_abs_underlying_def) + apply clarsimp + done + +lemma use_corresK_frame_E_R: + assumes corres: "corres_underlyingK sr False nf' F (lf \ r) PP P' f f'" + assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" + assumes valid: "\P\ f \Q\, -" + assumes valid': "\PP'\ f' \Q'\, -" + shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\, -" + apply (simp only: validE_R_def validE_def) + apply (rule use_corresK_frame[OF corres _ valid[simplified validE_R_def validE_def] valid'[simplified validE_R_def validE_def]]) + by (auto simp: frame split: sum.splits) + +lemma K_True: "K True = (\_. True)" by simp +lemma True_And: "((\_. True) and P) = P" by simp + +method use_corresK uses frame = + (corresK_convert?, drule use_corresK_frame use_corresK_frame_E_R, rule frame, + (solves \wp\ | defer_tac), (solves \wp\ | defer_tac), (simp only: True_And K_True)?) + +experiment + fixes sr nf' r P P' f f' F G Q Q' QQ' PP PP' g g' + assumes f_corres[corres]: "G \ F \ corres_underlying sr False True r P P' f f'" and + g_corres[corres]: "corres_underlying sr False True dc \ \ g g'" and + wpl [wp]: "\PP\ f \Q\" and wpr [wp]: "\PP'\ f' \Q'\" + and [wp]: "\P\ g \\_. P\" "\PP\ g \\_. PP\" "\P'\ g' \\_. P'\" "\PP'\ g' \\_. PP'\" and + frameA: "\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s'" + begin + + lemmas f_Q' = f_corres[atomized, @\use_corresK frame: frameA\] + + lemma "G \ F \ corres_underlying sr False True dc (P and PP) (P' and PP') + (g >>= (K (f >>= K (assert True)))) (g' >>= (K (f' >>= (\rv'. (stateAssert (QQ' rv') [])))))" + apply (simp only: stateAssert_def K_def) + apply corresK + apply (corresK_search search: corresK_assert) + apply corresK_rv + apply (corresKwp | simp)+ + apply corresK_rv + apply (corresKwp wp: f_Q' | simp)+ + apply corresKsimp+ + by auto + +end + +section \Corres Argument lifting\ + +text \Used for rewriting corres rules with syntactically equivalent arguments\ + +lemma lift_args_corres: + "corres_underlying sr nf nf' r (P x) (P' x) (f x) (f' x) \ x = x' \ + corres_underlying sr nf nf' r (P x) (P' x') (f x) (f' x')" by simp + +method lift_corres_args = + (match premises in + H[thin]:"corres_underlying _ _ _ _ (P x) (P' x) (f x) (f' x)" (cut 5) for P P' f f' x \ + \match (f) in "\_. g" for g \ \fail\ \ _ \ + \match (f') in "\_. g'" for g' \ \fail\ \ _ \ + \cut_tac lift_args_corres[where f=f and f'=f' and P=P and P'=P', OF H]\\\)+ + +(* Use calculational rules. Don't unfold the definition! *) +lemmas corres_rv_def_I_know_what_I'm_doing = corres_rv_def +lemmas corres_rvE_R_def_I_know_what_I'm_doing = corres_rvE_R_def + +hide_fact corres_rv_def +hide_fact corres_rvE_R_def + +end diff --git a/lib/Corres_Cases.thy b/lib/Corres_Cases.thy new file mode 100644 index 0000000000..c6fea429ef --- /dev/null +++ b/lib/Corres_Cases.thy @@ -0,0 +1,323 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Corres_Cases +imports Corres_UL +begin + +text \ + This file defines the following main methods for safe data type case distinctions on + corres/corres_underlying predicates. + + \<^item> corres_cases_left: case distinction on abstract monad + \<^item> corres_cases_right: case distinction on concrete monad + \<^item> corres_cases: try corres_cases_left, then corres_cases_right + \<^item> corres_cases_both: simultaneous (quadratic) case distinction on both sides, with safe + elimination of trivially contradictory cases. + + The first 3 methods take no arguments, corres_cases_both takes an optional simp argument to, + for example, unfold relations that synchronise cases between the abstract and concrete side. + + The case distinctions work if the entire monad is a "case" statement, or if the monad is a + @{const bind} or @{const bindE} term with a "case" statement in the head position. + + There is an existing method for case distinctions (@{method wpc}), but this method is not + flexible enough for @{term corres}: consider the goal + @{text "\x. corres r (?G x) ?G' (case x of None \ a | Some y \ b y) m"} -- if we perform + case distinction on @{term x}, then we can transform @{text "?G x"} into + @{text "\x s. (x = None \ ?Q1 x s) \ (\y. x = Some y \ ?Q2 x y s)"}, + but we cannot do the same with @{text "?G'"}, because @{text "?G'"} does not depend on @{text x}. + The best we can do is @{text "?G' = \s. ?A s \ ?B s"}, which so far seems to be good enough + in our manual proofs. + + The @{method wpc} method will try to treat both preconditions uniformly and fail on @{text "?G'"}. + Extending @{method wpc} to deal with guards in a non-uniform way would be possible, but would make + setup for new constants even more messy than it already is. Instead we re-use the general idea + here (in Eisbach instead of ML), and leave the @{method wpc} setup clean for other uses. +\ + +section \Helper functions and definitions\ + +(* The following three definitions are originally by Dan Matichuck from the Eisbach + CorresK_Method example *) + +(* Retrieve a split rule for a target term that is expected to be a case statement. *) +ML \ +fun get_split_rule ctxt target = +let + val (hdTarget, args) = strip_comb (Envir.eta_contract target) + val (constNm, _) = dest_Const hdTarget + val constNm_fds = String.fields (fn c => c = #".") constNm + + val _ = if String.isPrefix "case_" (List.last constNm_fds) then () + else raise TERM ("Not a case statement", [target]) + + val typeNm = (String.concatWith "." o rev o tl o rev) constNm_fds + val split = Proof_Context.get_thm ctxt (typeNm ^ ".split") + val vars = Term.add_vars (Thm.prop_of split) [] + + val datatype_name = List.nth (rev constNm_fds, 1) + + fun T_is_datatype (Type (nm, _)) = (Long_Name.base_name nm = Long_Name.base_name datatype_name) + | T_is_datatype _ = false + + val datatype_var = + case find_first (fn ((_, _), T') => T_is_datatype T') vars of + SOME (ix, _) => ix + | NONE => error ("Couldn't find datatype in thm: " ^ datatype_name) + + val split' = Drule.infer_instantiate ctxt + [(datatype_var, Thm.cterm_of ctxt (List.last args))] split + +in SOME split' end +handle TERM _ => NONE; +\ + +(* The above function as an attribute. The term argument is expected to be a case statement. *) +attribute_setup get_split_rule = \Args.term >> + (fn t => Thm.rule_attribute [] (fn context => fn _ => + case get_split_rule (Context.proof_of context) t of + SOME thm => thm + | NONE => Drule.free_dummy_thm))\ + +(* Apply a split rule to a goal. Example usage: + + apply_split f "\f. corres_underlying sr nf nf' r P P' f f'" + + The first (free) f is expected to be a case statement and is used to extract the split rule. + The second term is expected to take this f as a parameter and provide the term context of the + case statement in the goal so the split rule is applied to the correct occurrence of the case + statement. +*) +method apply_split for f :: 'a and R :: "'a \ bool" = + (match [[get_split_rule f]] in U: "(?x :: bool) = ?y" \ + \match U[THEN iffD2] in U': "\H. ?A \ H (?z :: 'c)" \ + \match (R) in "R' :: 'c \ bool" for R' \ + \rule U'[where H=R']\\\) + +context +begin + +(* This predicate provides an abstraction for guard/precondition terms for transformations + on those guards. + + P and P' are the abstract and concrete preconditions before the transformation + Q and Q' are the abstract and concrete preconditions after the transformation + + R is the predicate to be transformed. +*) +private definition corres_case_helper :: + "(('a \ bool) \ ('b \ bool)) \ (('a \ bool) \ ('b \ bool)) \ bool \ bool" where + "corres_case_helper \ \(P, P') (Q, Q') R. (\s. P s \ Q s) \ (\s. P' s \ Q' s) \ R" + + +(* The following lemmas enable us to lift preconditions of corres_case_helper over conjunction, + universal quantifiers, and implication. Note that there are strong versions for forall/implies + where both guards are treated uniformly, and weak versions, where forall/implies is dropped + in one guard, but not the other. + + The collection of the lemmas below is used to process the term R in corres_case_helper and + create appropriately lifted guard/preconditions during that procedure. The names and general + idea are from the WPC theory. +*) + +private lemma corres_case_helperI: + "corres_case_helper (P, P') (P, P') R \ R" + by (simp add: corres_case_helper_def) + +private lemma corres_case_conj_process: + "\ corres_case_helper (P, P') (A, A') R; corres_case_helper (P, P') (B, B') S \ + \ corres_case_helper (P, P') (\s. A s \ B s, \s. A' s \ B' s) (R \ S)" + by (clarsimp simp add: corres_case_helper_def) + +private lemma corres_case_all_process: + "\ \x. corres_case_helper (P, P') (Q x, Q' x) (R x) \ + \ corres_case_helper (P, P') (\s. \x. Q x s, \s. \x. Q' x s) (\x. R x)" + by (clarsimp simp: corres_case_helper_def subset_iff) + +private lemma corres_case_all_process_weak: + "\ \x. corres_case_helper (P, P') (Q x, Q') (R x) \ + \ corres_case_helper (P, P') (\s. \x. Q x s, Q') (\x. R x)" + by (clarsimp simp: corres_case_helper_def subset_iff) + +private lemma corres_case_imp_process: + "\ S \ corres_case_helper (P, P') (Q, Q') R \ + \ corres_case_helper (P, P') (\s. S \ Q s, \s. S \ Q' s) (S \ R)" + by (clarsimp simp add: corres_case_helper_def subset_iff) + +private lemma corres_case_imp_process_weak: + "\ S \ corres_case_helper (P, P') (Q, Q') R \ + \ corres_case_helper (P, P') (\s. S \ Q s, Q') (S \ R)" + by (clarsimp simp add: corres_case_helper_def subset_iff) + +private lemmas corres_case_process = + corres_case_conj_process corres_case_all_process corres_case_imp_process + +private lemmas corres_case_process_weak = + corres_case_conj_process corres_case_all_process_weak corres_case_imp_process_weak + +(* Turn goals of the form + + (\y. x = SomeConstr y \ corres (?P x) P' (SomeConstr y) g) \ + (\y. x = OtherConstr y \ corres (?P x) P' (OtherConstr y) g) \ + ... + + into multiple goals of the form + + \y. x = SomeConstr y \ corres (?P1 x y) ?P'1 (SomeConstr y) g) + \y. x = OtherConstr y \ corres (?P2 x y) ?P'2 (OtherConstr y) g) + + with instantiations + + ?P x = \s. (\y. x = SomeConstr y \ ?P1 x y s) \ (\y. x = OtherConstr y \ ?P2 x y s) + ?P' = \s. ?P'1 s \ ?P'2 s + + We do this by first transforming the goal into a corres_case_helper goal, and then applying + the corresponding lifting rules. We first try to get both sides (?P and ?P') to have + quantifiers and implications to get a stronger statement, and fall back to the weaker \ for ?P' + shown above when that doesn't work (e.g. because ?P' might not depend on x). + + When all lifting rules have applied, we transform the goal back into a corres goal using the + provided helper rule (e.g. corres_case_helper_corres_left below). +*) +private method corres_cases_body uses helper = + determ \rule corres_case_helperI, repeat_new \rule corres_case_process\; rule helper + | rule corres_case_helperI, repeat_new \rule corres_case_process_weak\; rule helper\ + + +(* Instances of corres_case_helper for left and right side of the corres predicate. + These lemmas bind the corres guards to the corres_case_helper guards. *) +private lemma corres_case_helper_corres_left: + "corres_underlying sr nf nf' r Q Q' f f' \ + corres_case_helper (P, P') (Q, Q') (corres_underlying sr nf nf' r P P' f f')" + by (auto simp: corres_case_helper_def elim!: corres_guard_imp) + +private lemma corres_case_helper_corres_right: + "corres_underlying sr nf nf' r Q' Q f f' \ + corres_case_helper (P, P') (Q, Q') (corres_underlying sr nf nf' r P' P f f')" + by (auto simp: corres_case_helper_def elim!: corres_guard_imp) + + +section \Main method definitions\ + +(* Case distinction on abstract side *) +method corres_cases_left = + determ \ + corres_pre, + (match conclusion in + "corres_underlying sr nf nf' r P P' (f >>= g) f'" for sr nf nf' r P P' f g f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' (f >>= g) f'"\ + \ "corres_underlying sr nf nf' r P P' (f >>=E g) f'" for sr nf nf' r P P' f g f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' (f >>=E g) f'"\ + \ "corres_underlying sr nf nf' r P P' f f'" for sr nf nf' r P P' f f' + \ \apply_split f "\f. corres_underlying sr nf nf' r P P' f f'"\), + corres_cases_body helper: corres_case_helper_corres_left\ + +(* case distinction on concrete side *) +method corres_cases_right = + determ \ + corres_pre, + (match conclusion in + "corres_underlying sr nf nf' r P P' f (f' >>= g)" for sr nf nf' r P P' f g f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f (f' >>= g)"\ + \ "corres_underlying sr nf nf' r P P' f (f' >>=E g)" for sr nf nf' r P P' f g f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f (f' >>=E g)"\ + \ "corres_underlying sr nf nf' r P P' f f'" for sr nf nf' r P P' f f' + \ \apply_split f' "\f'. corres_underlying sr nf nf' r P P' f f'"\), + corres_cases_body helper: corres_case_helper_corres_right\ + +(* single case distinction on either left or right, whichever works first *) +method corres_cases = corres_cases_left | corres_cases_right + +(* Case distinction on abstract and concrete side with quadractic blowup, but attempt to solve + contradictory side conditions by simp. Cases that are solved by simp will produce \ as guard + so that no free schematics are introduced into later goals. *) +method corres_cases_both uses simp = + (* corres_pre first, so that the ";" later only refers to corres goals, not the final implications *) + determ \ + corres_pre, + (corres_cases_left; corres_cases_right; + (solves \rule corres_inst[where P=\ and P'=\], simp add: simp\)?)\ + +end + + +section \Examples and tests\ + +experiment +begin + +(* abstract side *) +lemma "corres_underlying srel nf nf' rrel (G x) G' (case x of None \ a | Some y \ b y) m" + (* produces strong (forall, implies) guard conditions in the final implications for both sides *) + apply corres_cases + oops + +schematic_goal + "\x. corres_underlying srel nf nf' rrel (?G x) ?G' (case x of None \ a | Some y \ b y) m" + (* produces weak (just ?A \ ?B) guard conditions for concrete side, because ?G' does not + depend on "x", on which we do the case distinction *) + apply corres_cases + oops + +(* abstract side, with bind *) +lemma "corres_underlying srel nf nf' rrel G G' ((case x of None \ a | Some y \ b y) >>= g) m" + apply corres_cases + oops + +(* abstract side, with bindE *) +lemma "corres_underlying srel nf nf' rrel G G' ((case x of None \ a | Some y \ b y) >>=E g) m" + apply corres_cases + oops + +(* concrete side: *) +lemma "corres_underlying srel nf nf' rrel G G' m (case x of None \ a | Some y \ b y)" + apply corres_cases + oops + +schematic_goal + "\x. corres_underlying srel nf nf' rrel ?G (?G' x) m (case x of None \ a | Some y \ b y)" + apply corres_cases + oops + +lemma "corres_underlying srel nf nf' rrel G G' m ((case x of None \ a | Some y \ b y) >>= g)" + apply corres_cases + oops + +lemma "corres_underlying srel nf nf' rrel G G' m ((case x of None \ a | Some y \ b y) >>=E g)" + apply corres_cases + oops + +(* both sides: *) +lemma "corres_underlying srel nf nf' rrel G G' (case x of None \ a | Some y \ b) + (case x of None \ a' | Some y \ b' y)" + (* two cases remain (both None, both Some); eliminated cases have guard \ in final implication *) + apply corres_cases_both + oops + +schematic_goal + "\x y. corres_underlying srel nf nf' rrel (?G x) (?G' y) (case x of None \ a | Some y \ b) + (case y of None \ a' | Some y \ b' y)" + (* 4 cases remain, because none are contradictory *) + apply corres_cases_both + oops + +(* some example relation between abstract and concrete values *) +definition + "none_rel x y \ (x = None) = (y = None)" + +lemma + "none_rel x y \ + corres_underlying srel nf nf' rrel G G' (case x of None \ a | Some y \ b) + (case y of None \ a' | Some y \ b' y)" + (* two cases remain, none_rel is untouched in the cases that remain, but unfolded in the + ones that were eliminated *) + apply (corres_cases_both simp: none_rel_def) + oops + +end + +end \ No newline at end of file diff --git a/lib/Corres_Method.thy b/lib/Corres_Method.thy index 89c39d5ba9..f6b9b52ff5 100644 --- a/lib/Corres_Method.thy +++ b/lib/Corres_Method.thy @@ -1,1168 +1,223 @@ (* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2023, Proofcraft Pty Ltd * * SPDX-License-Identifier: BSD-2-Clause *) theory Corres_Method -imports Corres_UL SpecValid_R +imports Corres_Cases ExtraCorres begin -(*TODO move this *) - -method_setup repeat_new = - \Method.text_closure >> (fn m => fn ctxt => fn facts => - let - fun tac i st' = - Goal.restrict i 1 st' - |> method_evaluate m ctxt facts - |> Seq.map (Goal.unrestrict i) - - in SIMPLE_METHOD (SUBGOAL (fn (_,i) => REPEAT_ALL_NEW tac i) 1) facts end) -\ - -chapter \Corres Methods\ - -section \Boilerplate\ - -context begin - -private definition "my_true \ True" - -private lemma my_true: "my_true" by (simp add: my_true_def) - -method no_schematic_concl = (fails \rule my_true\) - -end - -definition - "corres_underlyingK sr nf nf' F r Q Q' f g \ - F \ corres_underlying sr nf nf' r Q Q' f g" - -lemma corresK_name_pre: - "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ - \ corres_underlyingK sr nf nf' F r ((=) s) ((=) s') f g \ - \ corres_underlyingK sr nf nf' F r P P' f g" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_name_pre) - apply blast - done - -lemma corresK_assume_pre: - "\ \s s'. \ P s; P' s'; F; (s, s') \ sr \ - \ corres_underlyingK sr nf nf' F r P P' f g \ - \ corres_underlyingK sr nf nf' F r P P' f g" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_assume_pre) - apply blast - done - -lemma corresK_drop_any_guard: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_assume_guard: - "(F \ corres_underlying sr nf nf' r Q Q' f g) \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_unlift: - "corres_underlyingK sr nf nf' F r Q Q' f g \ (F \ corres_underlying sr nf nf' r Q Q' f g)" - by (simp add: corres_underlyingK_def) - -lemma corresK_lift: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def) - -lemma corresK_lift_rule: - "corres_underlying sr nf nf' r Q Q' f g \ corres_underlying sra nfa nfa' ra Qa Qa' fa ga - \ corres_underlyingK sr nf nf' F r Q Q' f g \ corres_underlyingK sra nfa nfa' F ra Qa Qa' fa ga" - by (simp add: corres_underlyingK_def) - -lemmas corresK_drop = corresK_drop_any_guard[where F=True] - -context begin - -lemma corresK_start: - assumes x: "corres_underlyingK sr nf nf' F r Q Q' f g" - assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ F \ Q s \ Q' s'" - shows "corres_underlying sr nf nf' r P P' f g" - using x by (auto simp: y corres_underlying_def corres_underlyingK_def) - -lemma corresK_weaken: - assumes x: "corres_underlyingK sr nf nf' F' r Q Q' f g" - assumes y: "\s s'. \ P s; P' s'; F; (s, s') \ sr \ \ F' \ Q s \ Q' s'" - shows "corres_underlyingK sr nf nf' F r P P' f g" - using x by (auto simp: y corres_underlying_def corres_underlyingK_def) - -private lemma corres_trivial: - "False \ corres_underlying sr nf nf' r P P' f f'" - by simp - -method check_corres = - (succeeds \rule corres_trivial\, fails \rule TrueI\) - -private lemma corresK_trivial: - "False \ corres_underlyingK sr nf nf' F r P P' f f'" - by simp - -(* Ensure we don't apply calculational rules if either function is schematic *) - -private definition "dummy_fun \ undefined" - -private lemma corresK_dummy_left: - "False \ corres_underlyingK sr nf nf' F r P P' dummy_fun f'" - by simp - -private lemma corresK_dummy_right: - "False \ corres_underlyingK sr nf nf' F r P P' f dummy_fun" - by simp - -method check_corresK = - (succeeds \rule corresK_trivial\, fails \rule corresK_dummy_left corresK_dummy_right\) - -private definition "my_false s \ False" - -private - lemma corres_my_falseE: "my_false x \ P" by (simp add: my_false_def) - -method no_schematic_prems = (fails \erule corres_my_falseE\) - -private lemma hoare_pre: "\my_false\ f \Q\" by (simp add: valid_def my_false_def) -private lemma hoareE_pre: "\my_false\ f \Q\,\Q'\" by (simp add: validE_def valid_def my_false_def) -private lemma hoare_E_E_pre: "\my_false\ f -,\Q\" by (simp add: validE_E_def validE_def valid_def my_false_def) -private lemma hoare_E_R_pre: "\my_false\ f \Q\,-" by (simp add: validE_R_def validE_def valid_def my_false_def) - -private lemmas hoare_pres = hoare_pre hoare_pre hoare_E_E_pre hoare_E_R_pre - -method schematic_hoare_pre = (succeeds \rule hoare_pres\) - -private - lemma corres_my_false: "corres_underlying sr nf nf' r my_false P f f'" - "corres_underlying sr nf nf' r P' my_false f f'" - by (auto simp add: my_false_def[abs_def] corres_underlying_def) - -private - lemma corresK_my_false: "corres_underlyingK sr nf nf' F r my_false P f f'" - "corres_underlyingK sr nf nf' F r P' my_false f f'" - by (auto simp add: corres_my_false corres_underlyingK_def) - - -method corres_raw_pre = - (check_corres, (fails \rule corres_my_false\, rule corresK_start)?) - -lemma corresK_weaken_states: - "corres_underlyingK sr nf nf' F r Q Q' f g \ - corres_underlyingK sr nf nf' (F \ (\s s'. P s \ P' s' \ (s, s') \ sr \ Q s \ Q' s')) - r P P' f g" - apply (erule corresK_weaken) - apply simp - done - -private lemma - corresK_my_falseF: - "corres_underlyingK sr nf nf' (my_false undefined) r P P' f f'" - by (simp add: corres_underlyingK_def my_false_def) - -method corresK_pre = - (check_corresK, - (fails \rule corresK_my_false\, - ((succeeds \rule corresK_my_falseF\, rule corresK_weaken_states) | - rule corresK_weaken))) - -method corres_pre = (corres_raw_pre | corresK_pre)? - -lemma corresK_weakenK: - "corres_underlyingK sr nf nf' F' r P P' f f' \ (F \ F') \ corres_underlyingK sr nf nf' F r P P' f f'" - by (simp add: corres_underlyingK_def) - -(* Special corres rules which should only be applied when the return value relation is - concrete, to avoid bare schematics. *) - -named_theorems corres_concrete_r and corres_concrete_rER - -private lemma corres_r_False: - "False \ corres_underlyingK sr nf nf' F (\_. my_false) P P' f f'" - by simp - -private lemma corres_r_FalseE: - "False \ corres_underlyingK sr nf nf' F ((\_. my_false) \ r) P P' f f'" - by simp - -private lemma corres_r_FalseE': - "False \ corres_underlyingK sr nf nf' F (r \ (\_. my_false)) P P' f f'" - by simp - -method corres_concrete_r declares corres_concrete_r corres_concrete_rER = - (fails \rule corres_r_False corres_r_FalseE corres_r_FalseE'\, determ \rule corres_concrete_r\) - | (fails \rule corres_r_FalseE\, determ \rule corres_concrete_rER\) - - -end - - -section \Corresc - Corres over case statements\ - -text - \Based on wpc, corresc examines the split rule for top-level case statements on the left - and right hand sides, propagating backwards the stateless and left/right preconditions.\ - -ML \ - -fun get_split_rule ctxt target = -let - val (hdTarget,args) = strip_comb (Envir.eta_contract target) - val (constNm, _) = dest_Const hdTarget - val constNm_fds = (String.fields (fn c => c = #".") constNm) - - val _ = if String.isPrefix "case_" (List.last constNm_fds) then () - else raise TERM ("Not a case statement",[target]) - - val typeNm = (String.concatWith "." o rev o tl o rev) constNm_fds; - val split = Proof_Context.get_thm ctxt (typeNm ^ ".split"); - val vars = Term.add_vars (Thm.prop_of split) [] - - val datatype_name = List.nth (rev constNm_fds,1) - - fun T_is_datatype (Type (nm,_)) = (Long_Name.base_name nm) = (Long_Name.base_name datatype_name) - | T_is_datatype _ = false - - val datatype_var = - case (find_first (fn ((_,_),T') => (T_is_datatype T')) vars) of - SOME (ix,_) => ix - | NONE => error ("Couldn't find datatype in thm: " ^ datatype_name) - - val split' = Drule.infer_instantiate ctxt - [(datatype_var, Thm.cterm_of ctxt (List.last args))] split - -in - SOME split' end - handle TERM _ => NONE; -\ - -attribute_setup get_split_rule = \Args.term >> - (fn t => Thm.rule_attribute [] (fn context => fn _ => - case (get_split_rule (Context.proof_of context) t) of - SOME thm => thm - | NONE => Drule.free_dummy_thm))\ - -method apply_split for f :: 'a and R :: "'a \ bool"= - (match [[get_split_rule f]] in U: "(?x :: bool) = ?y" \ - \match U[THEN iffD2] in U': "\H. ?A \ H (?z :: 'c)" \ - \match (R) in "R' :: 'c \ bool" for R' \ - \rule U'[where H=R']\\\) - -definition - wpc2_helper :: "(('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ (('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ bool \ bool" where - "wpc2_helper \ \(P, P') (Q, Q') (PP, PP') (QQ,QQ') R. - ((\s. P s \ Q s) \ P' \ Q') \ ((\s. PP s \ QQ s) \ PP' \ QQ') \ R" - -definition - "wpc2_protect B Q \ (Q :: bool)" - -lemma wpc2_helperI: - "wpc2_helper (P, P') (P, P') (PP, PP') (PP, PP') Q \ Q" - by (simp add: wpc2_helper_def) - -lemma wpc2_conj_process: - "\ wpc2_helper (P, P') (A, A') (PP, PP') (AA, AA') C; wpc2_helper (P, P') (B, B') (PP, PP') (BB, BB') D \ - \ wpc2_helper (P, P') (\s. A s \ B s, A' \ B') (PP, PP') (\s. AA s \ BB s, AA' \ BB') (C \ D)" - by (clarsimp simp add: wpc2_helper_def) - -lemma wpc2_all_process: - "\ \x. wpc2_helper (P, P') (Q x, Q' x) (PP, PP') (QQ x, QQ' x) (R x) \ - \ wpc2_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (PP, PP') (\s. \x. QQ x s, {s. \x. s \ QQ' x}) (\x. R x)" - by (clarsimp simp: wpc2_helper_def subset_iff) - -lemma wpc2_imp_process: - "\ wpc2_protect B Q \ wpc2_helper (P, P') (R, R') (PP, PP') (RR, RR') S \ - \ wpc2_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (PP, PP') (\s. Q \ RR s, {s. Q \ s \ RR'}) (Q \ S)" - by (clarsimp simp add: wpc2_helper_def subset_iff wpc2_protect_def) - - - -text \ - Generate quadratic blowup of the case statements on either side of refinement. - Attempt to discharge resulting contradictions. -\ - - -method corresc_body for B :: bool uses helper = - determ \(rule wpc2_helperI, - repeat_new \rule wpc2_conj_process wpc2_all_process wpc2_imp_process[where B=B]\ ; (rule helper))\ - -lemma wpc2_helper_corres_left: - "corres_underlyingK sr nf nf' QQ r Q A f f' \ - wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r P A f f')" - by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) - -method corresc_left_raw = - determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' - \ \apply_split f "\f. corres_underlyingK sr nf nf' F r P P' f f'"\, - corresc_body False helper: wpc2_helper_corres_left)\ - -lemma wpc2_helper_corres_right: - "corres_underlyingK sr nf nf' QQ r A Q f f' \ - wpc2_helper (P, P') (Q, Q') (\_. PP,PP') (\_. QQ,QQ') (corres_underlyingK sr nf nf' PP r A P f f')" - by (clarsimp simp: wpc2_helper_def corres_underlyingK_def elim!: corres_guard_imp) - -method corresc_right_raw = - determ \(match conclusion in "corres_underlyingK sr nf nf' F r P P' f f'" for sr nf nf' F r P P' f f' - \ \apply_split f' "\f'. corres_underlyingK sr nf nf' F r P P' f f'"\, - corresc_body True helper: wpc2_helper_corres_right)\ - -definition - "corres_protect r = (r :: bool)" - -lemma corres_protect_conj_elim[simp]: - "corres_protect (a \ b) = (corres_protect a \ corres_protect b)" - by (simp add: corres_protect_def) - -lemma wpc2_corres_protect: - "wpc2_protect B Q \ corres_protect Q" - by (simp add: wpc2_protect_def corres_protect_def) - -method corresc_left = (corresc_left_raw; (drule wpc2_corres_protect[where B=False])) -method corresc_right = (corresc_right_raw; (drule wpc2_corres_protect[where B=True])) - -named_theorems corresc_simp - -declare wpc2_protect_def[corresc_simp] -declare corres_protect_def[corresc_simp] - -lemma corresK_false_guard_instantiate: - "False \ corres_underlyingK sr nf nf' True r P P' f f'" - by (simp add: corres_underlyingK_def) - -lemma - wpc_contr_helper: - "wpc2_protect False (A = B) \ wpc2_protect True (A = C) \ B \ C \ P" - by (auto simp: wpc2_protect_def) - -method corresc declares corresc_simp = - (check_corresK, corresc_left_raw; corresc_right_raw; - ((solves \rule corresK_false_guard_instantiate, - determ \(erule (1) wpc_contr_helper)?\, simp add: corresc_simp\) - | (drule wpc2_corres_protect[where B=False], drule wpc2_corres_protect[where B=True])))[1] - -section \Corres_rv\ - -text \Corres_rv is used to propagate backwards the stateless precondition (F) from corres_underlyingK. - It's main purpose is to defer the decision of where each condition should go: either continue - through the stateless precondition, or be pushed into the left/right side as a hoare triple.\ - - -(*Don't unfold the definition. Use corres_rv method or associated rules. *) -definition corres_rv :: "bool \ ('a \ 'b \ bool) \ ('s \ bool) \ ('t \ bool) - \ ('s, 'a) nondet_monad \ ('t, 'b) nondet_monad \ - ('a \ 'b \ bool) \ bool" - where - "corres_rv F r P P' f f' Q \ - F \ (\s s'. P s \ P' s' \ - (\sa rv. (rv, sa) \ fst (f s) \ (\sa' rv'. (rv', sa') \ fst (f' s') \ r rv rv' \ Q rv rv')))" - -(*Don't unfold the definition. Use corres_rv method or associated rules. *) -definition "corres_rvE_R F r P P' f f' Q \ - corres_rv F (\_ _. True) P P' f f' - (\rvE rvE'. case (rvE,rvE') of (Inr rv, Inr rv') \ r rv rv' \ Q rv rv' | _ \ True)" - -lemma corres_rvD: - "corres_rv F r P P' f f' Q \ - F \ P s \ P' s' \ (rv,sa) \ fst (f s) \ - (rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_RD: - "corres_rvE_R F r P P' f f' Q \ - F \ P s \ P' s' \ (Inr rv,sa) \ fst (f s) \ - (Inr rv',sa') \ fst (f' s') \ r rv rv' \ Q rv rv'" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_prove: - "(\s s' sa sa' rv rv'. F \ - (rv,sa) \ fst (f s) \ (rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ - corres_rv F r P P' f f' Q" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_prove: - "(\s s' sa sa' rv rv'. F \ - (Inr rv,sa) \ fst (f s) \ (Inr rv',sa') \ fst (f' s') \ P s \ P' s' \ r rv rv' \ Q rv rv') \ - corres_rvE_R F r P P' f f' Q" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_wp_left: - "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\ \ corres_rv True r P \ f f' Q" - by (fastforce simp add: corres_rv_def valid_def) - -lemma corres_rvE_R_wp_left: - "\P\ f \\rv s. \rv'. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r P \ f f' Q" - apply (simp add: corres_rvE_R_def validE_def validE_R_def) - apply (rule corres_rv_wp_left) - apply (erule hoare_strengthen_post) - apply (auto split: sum.splits) - done - -lemma corres_rv_wp_right: - "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\ \ corres_rv True r \ P' f f' Q" - by (fastforce simp add: corres_rv_def valid_def) - -lemma corres_rvE_R_wp_right: - "\P'\ f' \\rv' s. \rv. r rv rv' \ Q rv rv'\, - \ corres_rvE_R True r \ P' f f' Q" - apply (simp add: corres_rvE_R_def validE_def validE_R_def) - apply (rule corres_rv_wp_right) - apply (erule hoare_strengthen_post) - apply (auto split: sum.splits) - done - -lemma corres_rv_weaken: - "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rv F r P P' f f' Q \ corres_rv F r P P' f f' Q'" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_weaken: - "(\rv rv'. Q rv rv' \ Q' rv rv') \ corres_rvE_R F r P P' f f' Q \ corres_rvE_R F r P P' f f' Q'" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemma corres_rv_defer_no_args: - "corres_rv (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" - by (auto simp add: corres_rv_def) - -lemma corres_rvE_R_defer_no_args: - "corres_rvE_R (\rv rv'. r rv rv' \ F) r (\_. True) (\_. True) f f' (\_ _. F)" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -(*UNSAFE*) -lemma corres_rv_defer: - "corres_rv (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" - by (auto simp add: corres_rv_def) - -(*UNSAFE*) -lemma corres_rvE_R_defer: - "corres_rvE_R (\rv rv'. r rv rv' \ Q rv rv') r (\_. True) (\_. True) f f' Q" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemmas corres_rv_proveT = - corres_rv_prove[where P=\ and P'=\ and F=True, simplified] - -lemmas corres_rvE_R_proveT = - corres_rvE_R_prove[where P=\ and P'=\ and F=True,simplified] - -lemma corres_rv_conj_lift: - "corres_rv F r P PP f g Q \ corres_rv F' r P' PP' f g Q' \ - corres_rv (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" - by (clarsimp simp add: corres_rv_def) - -lemma corres_rvE_R_conj_lift: - "corres_rvE_R F r P PP f g Q \ corres_rvE_R F' r P' PP' f g Q' \ - corres_rvE_R (F \ F') r (\s. P s \ P' s) (\s'. PP s' \ PP' s') f g (\rv rv'. Q rv rv' \ Q' rv rv')" - by (auto simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -subsection \Corres_rv method\ - -text \This method propagate corres_rv obligations into each precondition according to the following -heuristic: - For each conjunct in the obligation: - - 1) Try to solve trivially (to handle schematic conditions) - 2) If it does not depend on function return values, propagate it as a stateless precondition - 3) If either side is a corres_noop (used by symbolic execution), propagate as hoare triple - for other side. - 4) If it can be phrased entirely with variables accessible to the left side, propagate it as - a left hoare triple. - 5) As in 3) but on the right. - - Fail if any of 1-5 are unsuccessful for any conjunct. - -In the case where corres_rv fails, the user will need to intervene, either -by specifying where to defer the obligation or solving the goal in-place. -\ - -definition "corres_noop = return undefined" - -context begin - -private lemma corres_rv_defer_left: - "corres_rv F r (\_. \rv rv'. Q rv rv') P' f f' Q" - by (simp add: corres_rv_def) - -private lemma corres_rvE_R_defer_left: - "corres_rvE_R F r (\_. \rv rv'. Q rv rv') P' f f' Q" - by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -private lemma corres_rv_defer_right: - "corres_rv F r P (\_. \rv rv'. Q rv rv') f f' Q" - by (simp add: corres_rv_def) - -private lemma corres_rvE_R_defer_right: - "corres_rvE_R F r P (\_. \rv rv'. Q rv rv') f f' Q" - by (simp add: corres_rv_def corres_rvE_R_def split: sum.splits) - -lemmas corres_rv_proves = - corres_rv_proveT corres_rvE_R_proveT - -(* Try to handle cases where corres_rv obligations have been left schematic *) -lemmas corres_rv_trivials = - corres_rv_proves[where Q="\_ _. True", OF TrueI] - corres_rv_proves[where Q="\rv rv'. F rv rv' \ True" for F, # \simp\] - corres_rv_proves[where Q=r and r=r for r, # \simp\] - -lemmas corres_rv_defers = - corres_rv_defer_no_args corres_rvE_R_defer_no_args - -lemmas corres_rv_wp_lefts = - corres_rv_wp_left corres_rvE_R_wp_left - -lemmas corres_rv_wp_rights = - corres_rv_wp_right corres_rvE_R_wp_right - -lemmas corres_rv_noops = - corres_rv_wp_lefts[where f'=corres_noop] corres_rv_wp_rights[where f=corres_noop] - -lemmas corres_rv_lifts' = - corres_rv_conj_lift corres_rvE_R_conj_lift - -lemmas corres_rv_lifts = - corres_rv_lifts' - corres_rv_lifts'[where P="\_. True" and P'="\_. True" and f="corres_noop", simplified] - corres_rv_lifts'[where PP="\_. True" and PP'="\_. True" and g="corres_noop", simplified] - -lemmas corres_rv_prove_simple = - corres_rv_proveT[# \thin_tac _, thin_tac _\, simplified] - -method corres_rv = - (((repeat_new \rule corres_rv_trivials corres_rv_lifts\)?); - ((rule corres_rv_trivials corres_rv_defers corres_rv_noops | - (succeeds \rule corres_rv_defer_left corres_rvE_R_defer_left\, - rule corres_rv_wp_lefts) | - (succeeds \rule corres_rv_defer_right corres_rvE_R_defer_right\, - rule corres_rv_wp_rights)))) - -end - - -section \CorresK Split rules\ - -text \ - The corresK split allows preconditions to be propagated backward via the extra stateless precondition - (here given as @{term F}. The head function is propagated backward directly, while the tail - is propagated via corres_rv. Using the corres_rv method, this condition is then decomposed and - pushed into the stateless, left, and right preconditions as appropriate. - - The return value relation is now almost never needed directly, and so it is wrapped in corres_protect - to prevent it from being used during simplification. - \ - -lemma corresK_split: - assumes x: "corres_underlyingK sr nf nf' F r' P P' a c" - assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') r (R rv) (R' rv') (b rv) (d rv')" - assumes c: "corres_rv F'' r' PP PP' a c F'" - assumes z: "\Q\ a \R\" "\Q'\ c \R'\" - shows "corres_underlyingK sr nf nf' (F \ F'') r (PP and P and Q) (PP' and P' and Q') (a >>= (\rv. b rv)) (c >>= (\rv'. d rv'))" - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def bind_def) - apply (rule conjI) - apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply (drule (5) corres_rvD[OF c]) - apply (rule_tac x="(ac,bc)" in bexI,clarsimp) - apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) - apply assumption+ - apply (erule(1) use_valid[OF _ z(1)]) - apply (erule(1) use_valid[OF _ z(2)]) - apply fastforce - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply simp - apply (frule (3) x[simplified corres_underlyingK_def, rule_format, THEN corres_underlyingD],simp) - apply clarsimp - apply (drule(1) bspec,clarsimp) - apply (drule (5) corres_rvD[OF c]) - apply (frule_tac s'=baa in y[simplified corres_underlyingK_def corres_protect_def, rule_format, THEN corres_underlyingD]) - apply simp+ - apply (erule(1) use_valid[OF _ z(1)]) - apply (erule(1) use_valid[OF _ z(2)]) - apply fastforce - apply clarsimp - done - -section \Corres_inst\ - -text \Handles rare in-place subgoals generated by corres rules which need to be solved immediately - in order to instantiate a schematic. - We peek into the generated return-value relation to see if we can solve the instantiation. -\ - -definition "corres_inst_eq x y \ x = y" - -lemma corres_inst_eqI[wp]: "corres_inst_eq x x" by (simp add: corres_inst_eq_def) - -lemma corres_inst_test: "False \ corres_inst_eq x y" by simp - -method corres_inst = - (succeeds \rule corres_inst_test\, fails \rule TrueI\, - (rule corres_inst_eqI | - (clarsimp simp: corres_protect_def split del: if_split, rule corres_inst_eqI) - | (clarsimp simp: corres_protect_def split del: if_split, - fastforce intro!: corres_inst_eqI)))[1] - -section \Corres Method\ - -text \Handles structured decomposition of corres goals\ - -named_theorems - corres_splits and (* rules that, one applied, must - eventually yield a successful corres or corresK rule application*) - corres_simp_del and (* bad simp rules that break everything *) - corres and (* solving terminal corres subgoals *) - corresK (* calculational rules that are phrased as corresK rules *) - -context begin - -lemma corres_fold_dc: - "corres_underlyingK sr nf nf' F dc P P' f f' \ corres_underlyingK sr nf nf' F (\_ _. True) P P' f f'" - by (simp add: dc_def[abs_def]) - -private method corres_fold_dc = - (match conclusion in - "corres_underlyingK _ _ _ _ (\_ _. True) _ _ _ _" \ \rule corres_fold_dc\) - -section \Corres_apply method\ - -text \This is a private method that performs an in-place rewrite of corres rules into - corresK rules. This is primarily for backwards-compatibility with the existing corres proofs. - Works by trying to apply a corres rule, then folding the resulting subgoal state into a single - conjunct and atomizing it, then propagating the result into the stateless precondition. -\ - -private definition "guard_collect (F :: bool) \ F" -private definition "maybe_guard F \ True" - -private lemma corresK_assume_guard_guarded: - "(guard_collect F \ corres_underlying sr nf nf' r Q Q' f g) \ - maybe_guard F \ corres_underlyingK sr nf nf' F r Q Q' f g" - by (simp add: corres_underlyingK_def guard_collect_def) - -private lemma guard_collect: "guard_collect F \ F" - by (simp add: guard_collect_def) - -private lemma has_guard: "maybe_guard F" by (simp add: maybe_guard_def) -private lemma no_guard: "maybe_guard True" by (simp add: maybe_guard_def) - -private method corres_apply = - (rule corresK_assume_guard_guarded, - (determ \rule corres\, safe_fold_subgoals)[1], - #break "corres_apply", - ((focus_concl \(atomize (full))?\, erule guard_collect, rule has_guard) | rule no_guard))[1] - -private method corres_alternate = corres_inst | corres_rv - - - -method corres_once declares corres_splits corres corresK corresc_simp = - (no_schematic_concl, - (corres_alternate | - (corres_fold_dc?, - (corres_pre, - #break "corres", - ( (check_corresK, determ \rule corresK\) - | corres_apply - | corres_concrete_r - | corresc - | (rule corres_splits, corres_once) - ))))) - - -method corres declares corres_splits corres corresK corresc_simp = - (corres_once+)[1] - -text \Unconditionally try applying split rules. Useful for determining why corres is not applying - in a given proof.\ - -method corres_unsafe_split declares corres_splits corres corresK corresc_simp = - ((rule corres_splits | corres_pre | corres_once)+)[1] - -end +(* A proof method for automating simple steps in corres proofs. + + While the method might solve some corres proofs completely, the purpose is to make simple + things more automatic, remove boilerplate, and to leave a proof state in which the user can make + more progress. The goal is not to provide full automation or deeper search. + + The main idea is to repeatedly try to apply terminal [corres] rules after splitting off the head + of a bind/bindE statement on both sides of a corres goal. The method provides options for less + safe rules such as moving asserts to guards etc when the user knows that this is safe to do in + a particular instance. + + See description at corres' method below for all parameters and options. +*) + +section \Goal predicates\ + +(* Succeed if the conclusion is a corres goal and also not purely schematic *) +method is_corres = succeeds \rule corres_inst\, fails \rule TrueI\ + +lemma no_fail_triv: "no_fail P f \ no_fail P f" . +lemmas hoare_trivs = hoare_triv hoare_trivE hoare_trivE_R hoare_trivR_R no_fail_triv + +(* Succeed if the conclusion is a wp/no_fail goal and also not purely schematic*) +method is_wp = succeeds \rule hoare_trivs\, fails \rule TrueI\ + +lemmas hoare_post_False = hoare_pre_cont[where P="\_. \"] +lemmas hoareE_post_False = hoare_FalseE[where Q="\_. \" and E="\_. \"] + +(* Succeed if the conclusion has a schematic post condition (assuming a wp goal). *) +method is_hoare_schematic_post = + (* If the post condition matches both \ and \, it must be schematic *) + succeeds \wp_pre, rule hoare_post_False hoareE_post_False\, + succeeds \wp_pre, rule wp_post_taut wp_post_tautE\ + +(* Succeed if wpsimp or wp can safely be applied *) +method is_safe_wp = is_wp, fails \is_hoare_schematic_post\ + +section \Main corres method\ + +named_theorems corres_splits +method corres_split declares corres_splits = no_name_eta, rule corres_splits + +(* This method is called on non-corres, non-wp side conditions after a corres rule has been + applied. At that point, there should be no schematic variables in those side condition goals. + Despite that, we are still careful with simp etc here, in case the user does provide a corres + rule that generates a schematic in those side condition goals. *) +method corres_cleanup methods m uses simp simp_del split split_del cong intro = + #break "corres_cleanup", + ( m + | assumption + | rule refl TrueI + | clarsimp simp del: corres_no_simp simp_del simp: simp split: split split del: split_del + cong: cong intro!: intro + (* enables passing in conjI for terminal goals: *) + | (rule intro; + corres_cleanup m simp: simp simp_del: simp_del split: split split_del: split_del + cong: cong intro: intro)) + +(* Apply a single corres rule and attempt to solve non-corres and non-wp side conditions. + We don't expect any wp side conditions, but check anyway for safety. If the rule is declared + as terminal rule, all side conditions must be solved and no corres or wp side conditions are + allowed. If the rule is declared as a regular corres rule, unsolved side conditions as well as + corres and wp side conditions will be left over unchanged. *) +method corres_rule + methods m uses simp simp_del split split_del cong intro declares corres corres_term = + determ \solves \((rule corres_term | corres_rrel_pre, rule corres_term); + solves \corres_cleanup m simp: simp simp_del: simp_del split: split + split_del: split_del cong: cong\)\ + | (rule corres | corres_rrel_pre, rule corres); + ((fails \is_corres\, fails \is_wp\, + solves \corres_cleanup m simp: simp simp_del: simp_del split: split + split_del: split_del cong: cong\)?)\ + +(* For normalisation of corres terms, e.g. liftE *) +named_theorems corres_simp + +(* The main method: + + After preliminaries such as wpfix and corres_pre, repeatedly try to either solve the goal + outright or split off the head from a bind/bindE statement and apply a corres rule (only + split when a corres rule applies). If none of these works, try a corres rule from the "fallback" + argument. (These are for things like moving asserts to a guard, which we only want to do if no + other corres rule applies). + + Attempt to solve side conditions with the corres_cleanup method. The cleanup method uses the + simp and term_simp arguments. + + Attempt simp on the head corres goal without rewriting guards or return relation when + none of these make progress (to process things such as liftM). Does not use the term_simp + argument. + + Attempt clarsimp on the head side condition and final implications. Does not use the term_simp + argument. + + Attempt wpsimp+ when the head goal is a wp goal (usually present when all corres goals have been + solved). Fail if we somehow ended up with a schematic post condition despite all safety measures. +*) +method corres' + methods m + uses simp term_simp simp_del split split_del cong intro wp wp_del fallback + declares corres corres_term corres_splits = + (((* debug breakpoint *) + #break "corres", + (* introduce schematic guards if they don't exist *) + corres_pre0 + (* fix up schematic guards if they contain constructor parameters *) + | wpfix + (* apply a single corres rule if possible *) + | corres_rule m simp: term_simp simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: corres corres_term: corres_term + (* only split if we can apply a single corres rule afterwards *) + | corres_split corres_splits: corres_splits, + corres_rule m simp: simp term_simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: corres corres_term: corres_term + (* apply potentially unsafe fallback rules if any are provided *) + | corres_rule m simp: simp term_simp simp_del: simp_del split_del: split_del split: split + cong: cong corres: fallback + (* simplify head corres goal, e.g. for things like liftM unfolding if the user provides such + a rule as "simp". Not clarsimp, because clarsimp will still perform hypsubst on assumptions + and might through that rewrite guards *) + | is_corres, + simp (no_asm_use) cong: corres_weaker_cong cong split: split split del: if_split split_del + add: simp corres_simp del: corres_no_simp simp_del + (* simplify any remaining side condition head goal (non-corres, non-wp). This is either a side + condition that was not solved by corres_cleanup, or it is one of the two terminal implication + goals. It is very likely that the method will stop after this and not have solved the goal, + but it also very likely that the first thing we want to do for such a goal is clarsimp. That + means, overall we might solve a few more goals, and not be detrimental to interactive proof + either. *) + | fails \is_corres\, fails \is_wp\, + clarsimp cong: cong simp del: simp_del simp: simp split del: if_split split_del split: split + intro!: intro + (* if (and only if) we get to the state where all corres goals and side conditions are solved, + attempt to solve all wp goals that were generated in order. We are not using then_all_new_fwd + here, because we should only start solving wp goals once *all* corres goals are solved -- + otherwise the goal will still have schematic post conditions. Fail if there is a + free schematic postcondition despite all these measures. + *) + | is_safe_wp, + (wpsimp wp: wp wp_del: wp_del simp: simp simp_del: simp_del split: split split_del: split_del + cong: cong)+ + )+)[1] + +(* Instance of the corres' method with default cleanup tactic. We provide "fail" as default to let + the other cleanup tactis run. "succeed" would stop without progress (useful for debugging). *) +method corres + uses simp term_simp simp_del split split_del cong intro wp wp_del fallback + declares corres corres_term corres_splits = + corres' \fail\ simp: simp term_simp: term_simp simp_del: simp_del split: split + split_del: split_del cong: cong intro: intro wp: wp wp_del: wp_del + fallback: fallback + corres: corres corres_term: corres_term corres_splits: corres_splits + + +section \Corres rule setup\ + +(* Avoid using equations in the assumptions. subst_all gets around (no_asm_use) in some cases, + which we don't want. *) +lemmas [corres_no_simp] = subst_all lemmas [corres_splits] = - corresK_split - -lemma corresK_when [corres_splits]: - "\corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F dc P P' a c\ -\ corres_underlyingK sr nf nf' ((G = G') \ F) dc ((\x. G \ P x)) (\x. G' \ P' x) (when G a) (when G' c)" - apply (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - apply (cases "G = G'"; cases G; simp) - by (clarsimp simp: return_def) - -lemma corresK_return_trivial: - "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) (return ()) (return ())" - by (simp add: corres_underlyingK_def) - -lemma corresK_return_eq: - "corres_underlyingK sr nf nf' True (=) (\_. True) (\_. True) (return x) (return x)" - by (simp add: corres_underlyingK_def) - -lemma corres_lift_to_K: - "corres_underlying sra nfa nf'a ra Pa P'a fa f'a \ corres_underlying sr nf nf' r P P' f f' \ - corres_underlyingK sra nfa nf'a F ra Pa P'a fa f'a \ corres_underlyingK sr nf nf' F r P P' f f'" - by (simp add: corres_underlyingK_def) - -lemmas [THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, simplified o_def, corres_splits] = - corres_liftE_rel_sum - corres_liftM_simp - corres_liftM2_simp - - -lemmas [corresK] = - corresK_return_trivial - corresK_return_eq - -lemma corresK_subst_left: "g = f \ - corres_underlyingK sr nf nf' F r P P' f f' \ - corres_underlyingK sr nf nf' F r P P' g f'" by simp - -lemma corresK_subst_right: "g' = f' \ - corres_underlyingK sr nf nf' F r P P' f f' \ - corres_underlyingK sr nf nf' F r P P' f g'" by simp - -lemmas corresK_fun_app_left[corres_splits] = corresK_subst_left[OF fun_app_def[THEN meta_eq_to_obj_eq]] -lemmas corresK_fun_app_right[corres_splits] = corresK_subst_right[OF fun_app_def[THEN meta_eq_to_obj_eq]] - -lemmas corresK_Let_left[corres_splits] = corresK_subst_left[OF Let_def[THEN meta_eq_to_obj_eq]] -lemmas corresK_Let_right[corres_splits] = corresK_subst_right[OF Let_def[THEN meta_eq_to_obj_eq]] - -lemmas corresK_return_bind_left[corres_splits] = corresK_subst_left[OF return_bind] -lemmas corresK_return_bind_right[corres_splits] = corresK_subst_right[OF return_bind] - -lemmas corresK_liftE_bindE_left[corres_splits] = corresK_subst_left[OF liftE_bindE] -lemmas corresK_liftE_bindE_right[corres_splits] = corresK_subst_right[OF liftE_bindE] - -lemmas corresK_K_bind_left[corres_splits] = - corresK_subst_left[where g="K_bind f rv" and f="f" for f rv, # \simp\] - -lemmas corresK_K_bind_right[corres_splits] = - corresK_subst_right[where g'="K_bind f' rv" and f'="f'" for f' rv, # \simp\] - - -section \Corres Search - find symbolic execution path that allows a given rule to be applied\ - -lemma corresK_if [corres_splits]: - "\(corres_protect G \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); - (corres_protect (\G) \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ -\ corres_underlyingK sr nf nf' ((G = G') \ (G \ F) \ (\G \ F')) r (if G then P else Q) (if G' then P' else Q') (if G then a else b) - (if G' then c else d)" - by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - -lemma corresK_if_rev: - "\(corres_protect (\ G) \ corres_protect G' \ corres_underlyingK sr nf nf' F r P P' a c); - (corres_protect G \ corres_protect (\G') \ corres_underlyingK sr nf nf' F' r Q Q' b d)\ -\ corres_underlyingK sr nf nf' ((\ G = G') \ (\G \ F) \ (G \ F')) r (if G then Q else P) (if G' then P' else Q') (if G then b else a) - (if G' then c else d)" - by (simp add: corres_underlying_def corres_underlyingK_def corres_protect_def) - - - -named_theorems corres_symb_exec_ls and corres_symb_exec_rs - -lemma corresK_symb_exec_l_search[corres_symb_exec_ls]: - fixes x :: "'b \ 'a \ ('d \ 'a) set \ bool" - notes [simp] = corres_noop_def - shows - "\\s. \PP s\ m \\_. (=) s\; \rv. corres_underlyingK sr nf True (F rv) r (Q rv) P' (x rv) y; - corres_rv F' dc RR (\_. True) m (corres_noop) (\rv _. F rv); - empty_fail m; no_fail P m; \R\ m \Q\\ -\ corres_underlyingK sr nf True F' r (RR and P and R and (\s. \s'. s = s' \ PP s' s)) P' (m >>= x) y" - apply (clarsimp simp add: corres_underlyingK_def) - apply (rule corres_name_pre) - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def - bind_def valid_def empty_fail_def no_fail_def) - apply (drule_tac x=a in meta_spec)+ - apply (drule_tac x=a in spec)+ - apply (drule mp, assumption)+ - apply (clarsimp simp: not_empty_eq) - apply (drule corres_rvD; (assumption | simp add: return_def)?) - apply (drule_tac x="(aa,ba)" in bspec,simp)+ - apply clarsimp - apply (drule_tac x=aa in meta_spec) - apply clarsimp - apply (drule_tac x="(ba,b)" in bspec,simp) - apply clarsimp - apply (drule mp, fastforce) - apply clarsimp - apply (drule_tac x="(a,bb)" in bspec,simp) - apply clarsimp - apply (rule_tac x="(aa,ba)" in bexI) - apply (clarsimp) - apply (rule_tac x="(ab,bc)" in bexI) - apply (clarsimp)+ - done - - -lemmas corresK_symb_exec_liftME_l_search[corres_symb_exec_ls] = - corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] + corres_split + corres_splitEE -lemma corresK_symb_exec_r_search[corres_symb_exec_rs]: - fixes y :: "'b \ 'a \ ('e \ 'a) set \ bool" - assumes X: "\s. \PP' s\ m \\r. (=) s\" - assumes corres: "\rv. corres_underlyingK sr nf nf' (F rv) r P (Q' rv) x (y rv)" - assumes Y: "corres_rv F' dc (\_. True) RR (corres_noop) m (\_ rv'. F rv')" - assumes nf: "nf' \ no_fail P' m" - assumes Z: "\R\ m \Q'\" - notes [simp] = corres_noop_def - shows - "corres_underlyingK sr nf nf' F' r P (RR and P' and R and (\s. \s'. s = s' \ PP' s' s)) x (m >>= y)" - apply (insert corres) - apply (simp add: corres_underlyingK_def) - apply (rule impI) - apply (rule corres_name_pre) - apply (clarsimp simp: corres_underlying_def corres_underlyingK_def - bind_def valid_def empty_fail_def no_fail_def) - apply (intro impI conjI ballI) - apply clarsimp - apply (frule(1) use_valid[OF _ X]) - apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) - apply (frule(1) use_valid[OF _ Z]) - apply (drule_tac x=aa in meta_spec) - apply clarsimp - apply (drule_tac x="(a, ba)" in bspec,simp) - apply (clarsimp) - apply (drule(1) bspec) - apply clarsimp - apply clarsimp - apply (frule(1) use_valid[OF _ X]) - apply (drule corres_rvD[OF Y]; (assumption | simp add: return_def)?) - apply (frule(1) use_valid[OF _ Z]) - apply fastforce - apply (rule no_failD[OF nf],simp+) - done +lemmas corres_split_liftE_bindE [corres_splits] = + corres_splitEE[OF corres_liftE_rel_sum[THEN iffD2], simplified] -lemmas corresK_symb_exec_liftME_r_search[corres_symb_exec_rs] = - corresK_symb_exec_r_search[where 'e="'x + 'y", folded liftE_bindE] +(* corres_term are rules that are safe when all side conditions can be solved immediately -- they + might have guards like \ that are too weak in general, but if the goal can be solved with + that weak guard, the rule is safe. This enables us to solve trivial cases without adding + unsafe rules to the [corres] set. *) +lemmas [corres_term] = + corres_return_eq_same corres_gets_trivial select_corres_eq + corres_underlying_assert_assert -context begin +lemmas corres_returnOk_eq_same[corres_term] = corres_returnOkTT[of "(=)"] +lemmas corres_throwError_eq_same[corres_term] = corres_throwErrorTT[of "(=)"] -private method corres_search_wp = solves \((wp | wpc | simp)+)[1]\ - -text \ - Depth-first search via symbolic execution of both left and right hand - sides, handling case statements and - potentially mismatched if statements. The find_goal - method handles searching each branch of case or if statements, while - we rely on backtracking to guess the order of left/right executions. - - According to the above rules, a symbolic execution step can be taken - when the function can be shown to not modify the state. Questions - of wellformedness (i.e. empty_fail or no_fail) are deferred to the user - after the search concludes. -\ - - -private method corres_search_frame methods m uses search = - (#break "corres_search", - ((corres?, corres_once corres: search corresK:search) - | (corresc, find_goal \m\)[1] - | (rule corresK_if, find_goal \m\)[1] - | (rule corresK_if_rev, find_goal \m\)[1] - | (rule corres_symb_exec_ls, corres_search_wp, m) - | (rule corres_symb_exec_rs, corres_search_wp, m))) - -text \ - Set up local context where we make sure we don't know how to - corres our given rule. The search is finished when we can only - make corres progress once we add our rule back in -\ - -method corres_search uses search - declares corres corres_symb_exec_ls corres_symb_exec_rs = - (corres_pre, - use search[corres del] search[corresK del] search[corres_splits del] in - \use in \corres_search_frame \corres_search search: search\ search: search\\)[1] - -end - -chapter \Misc Helper Lemmas\ - - -lemma corresK_assert[corresK]: - "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) dc \ \ (assert P) (assert Q)" - by (auto simp add: corres_underlyingK_def corres_underlying_def return_def assert_def fail_def) - -lemma corres_stateAssert_implied_frame: - assumes A: "\s s'. (s, s') \ sr \ F' \ P' s \ Q' s' \ A s'" - assumes C: "\x. corres_underlyingK sr nf nf' F r P Q f (g x)" - shows - "corres_underlyingK sr nf nf' (F \ F') r (P and P') (Q and Q') f (stateAssert A [] >>= g)" - apply (clarsimp simp: bind_assoc stateAssert_def) - apply (corres_search search: C[THEN corresK_unlift]) - apply (wp corres_rv_defer | simp add: A)+ - done - -lemma corresK_return [corres_concrete_r]: - "corres_underlyingK sr nf nf' (r a b) r \ \ (return a) (return b)" - by (simp add: corres_underlyingK_def) - -lemma corres_throwError_str [corres_concrete_rER]: - "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" - by (simp add: corres_underlyingK_def)+ - -section \Error Monad\ - - - -lemma corresK_splitE [corres_splits]: - assumes x: "corres_underlyingK sr nf nf' F (f \ r') P P' a c" - assumes y: "\rv rv'. corres_protect (r' rv rv') \ corres_underlyingK sr nf nf' (F' rv rv') (f \ r) (R rv) (R' rv') (b rv) (d rv')" - assumes c: "corres_rvE_R F'' r' PP PP' a c F'" - assumes z: "\Q\ a \R\, -" "\Q'\ c \R'\, -" - shows "corres_underlyingK sr nf nf' (F \ F'') (f \ r) (PP and P and Q) (PP' and P' and Q') (a >>=E (\rv. b rv)) (c >>=E (\rv'. d rv'))" - unfolding bindE_def - apply (rule corresK_weakenK) - apply (rule corresK_split[OF x, where F'="\rv rv'. case (rv,rv') of (Inr rva, Inr rva') \ F' rva rva' | _ \ True"]) - apply (simp add: corres_protect_def) - prefer 2 - apply simp - apply (rule corres_rv_prove[where F=F'']) - apply (case_tac rv; case_tac rv'; simp) - apply (rule corres_rvE_RD[OF c]; assumption) - apply (case_tac rv; case_tac rv'; simp) - apply (simp add: corres_underlyingK_def corres_protect_def) - apply (rule corresK_weaken) - apply (rule y) - apply (simp add: corres_protect_def) - apply (subst conj_assoc[symmetric]) - apply (rule conjI) - apply (rule conjI) - apply (subgoal_tac "(case (Inr b) of (Inr b) \ R b s - | _ \ True)"; assumption?) - apply (subgoal_tac "(case (Inr ba) of (Inr ba) \ R' ba s' - | _ \ True)"; assumption?) - apply clarsimp+ - apply (insert z) - by ((fastforce simp: valid_def validE_def validE_R_def split: sum.splits)+) - -lemma corresK_returnOk [corres_concrete_r]: - "corres_underlyingK sr nf nf' (r (Inr a) (Inr b)) r \ \ (returnOk a) (returnOk b)" - by (simp add: returnOk_def corres_underlyingK_def) - -lemma corres_assertE_str[corresK]: - "corres_underlyingK sr nf nf' ((nf' \ Q) \ P) (f \ dc) \ \ (assertE P) (assertE Q)" - by (auto simp add: corres_underlying_def corres_underlyingK_def returnOk_def return_def assertE_def fail_def) - -lemmas corres_symb_exec_whenE_l_search[corres_symb_exec_ls] = - corresK_symb_exec_l_search[where 'd="'x + 'y", folded liftE_bindE] - -lemmas corres_returnOk_liftEs - [folded returnOk_liftE, THEN iffD2, atomized, THEN corresK_lift_rule, rule_format, corresK] = - corres_liftE_rel_sum[where m="return x" for x] - corres_liftE_rel_sum[where m'="return x" for x] - - -(* Failure *) - -lemma corresK_fail[corresK]: - "corres_underlyingK sr nf True False r P P' f fail" - by (simp add: corres_underlyingK_def) - -lemma corresK_fail_no_fail'[corresK]: - "corres_underlyingK sr nf False True (\_ _. False) (\_. True) (\_. True) f fail" - apply (simp add: corres_underlyingK_def) - by (fastforce intro!: corres_fail) - -section \Correswp\ - -text - \This method wraps up wp and wpc to ensure that they don't accidentally generate schematic - assumptions when handling hoare triples that emerge from corres proofs. - This is partially due to wp not being smart enough to avoid applying certain wp_comb rules - when the precondition is schematic, and arises when the schematic precondition doesn't have - access to some meta-variables in the postcondition. - - To solve this, instead of meta-implication in the wp_comb rules we use corres_inst_eq, which - can only be solved by reflexivity. In most cases these comb rules are either never applied or - solved trivially. If users manually apply corres_rv rules to create postconditions with - inaccessible meta-variables (@{method corres_rv} will never do this), then these rules will - be used. Since @{method corres_inst} has access to the protected return-value relation, it has a chance - to unify the generated precondition with the original schematic one.\ - -named_theorems correswp_wp_comb and correswp_wp_comb_del - -lemma corres_inst_eq_imp: - "corres_inst_eq A B \ A \ B" by (simp add: corres_inst_eq_def) - -lemmas corres_hoare_pre = hoare_pre[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] - -method correswp uses wp = - (determ \ - (fails \schematic_hoare_pre\, (wp add: wp | wpc)) - | (schematic_hoare_pre, - (use correswp_wp_comb [wp_comb] - correswp_wp_comb_del[wp_comb del] - hoare_pre[wp_pre del] - corres_hoare_pre[wp_pre] - in - \use in \wp add: wp | wpc\\))\) - -lemmas [correswp_wp_comb_del] = - hoare_vcg_precond_imp - hoare_vcg_precond_impE - hoare_vcg_precond_impE_R - -lemma corres_inst_conj_lift[correswp_wp_comb]: - "\\R\ f \Q\; \P'\ f \Q'\; \s. corres_inst_eq (R s) (P s)\ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - by (rule hoare_vcg_conj_lift; simp add: valid_def corres_inst_eq_def) - -lemmas [correswp_wp_comb] = - correswp_wp_comb_del[# \-\ \atomize (full), rule allI, rule corres_inst_eq_imp\] - valid_validE_R - hoare_vcg_R_conj[OF valid_validE_R] - hoare_vcg_E_elim[OF valid_validE_E] - hoare_vcg_E_conj[OF valid_validE_E] - validE_validE_R - hoare_vcg_R_conj - hoare_vcg_E_elim - hoare_vcg_E_conj - hoare_vcg_conj_lift - -declare hoare_post_comb_imp_conj[correswp_wp_comb_del] - -section \Corressimp\ -text \Combines corres, wp and clarsimp\ - -text -\If clarsimp solves a terminal subgoal, its preconditions are left uninstantiated. We can -try to catch this by first attempting a trivial instantiation before invoking clarsimp, but -only keeping the result if clarsimp solves the goal,\ - -lemmas hoare_True_inst = - hoare_pre[where P="\_. True", of "\_. True", # \-\ \simp\] - asm_rl[of "\\_. True\ f \E\,\R\" for f E R] - -lemmas corres_rv_True_inst = - asm_rl[of "corres_rv True r (\_. True) (\_. True) f f' Q" for r f f' Q] - asm_rl[of "corres_rvE_R True r (\_. True) (\_. True) f f' Q" for r f f' Q] - -lemmas corresK_True_inst = - asm_rl[of "corres_underlyingK sr nf nf' True dc (\_. True) (\_. True) f g" for sr nf nf' f g] - asm_rl[of "corres_underlyingK sr nf nf' True r (\_. True) (\_. True) f g" for sr nf nf' r f g] - asm_rl[of "corres_underlying sr nf nf' dc (\_. True) (\_. True) f g" for sr nf nf' f g] - asm_rl[of "corres_underlying sr nf nf' r (\_. True) (\_. True) f g" for sr nf nf' r f g] - -lemmas calculus_True_insts = hoare_True_inst corres_rv_True_inst corresK_True_inst - -method corressimp uses simp cong search wp - declares corres corresK corres_splits corresc_simp = - ((no_schematic_concl, - (corres corresc_simp: simp - | correswp wp: wp - | (rule calculus_True_insts, solves \clarsimp cong: cong simp: simp corres_protect_def\) - | clarsimp cong: cong simp: simp simp del: corres_simp_del split del: if_split - | (match search in _ \ \corres_search search: search\)))+)[1] - -declare corres_return[corres_simp_del] - -section \Normalize corres rule into corresK rule\ - -lemma corresK_convert: - "A \ corres_underlying sr nf nf' r P Q f f' \ - corres_underlyingK sr nf nf' A r P Q f f'" - by (auto simp add: corres_underlyingK_def) - -method corresK_convert = (((drule uncurry)+)?, drule corresK_convert corresK_drop) - -section \Lifting corres results into wp proofs\ - -lemma use_corresK': - "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \Q\ \ - \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv' s'. \rv. r rv rv' \ ex_abs_underlying sr (Q rv) s'\" - by (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) - -lemma use_corresK [wp]: - "corres_underlyingK sr False nf' F r PP PP' f f' \ \P\ f \\rv s. \rv'. r rv rv' \ Q rv' s\ \ - \K F and PP' and ex_abs_underlying sr (PP and P)\ f' \\rv'. ex_abs_underlying sr (Q rv')\" - apply (fastforce simp: corres_underlying_def corres_underlyingK_def valid_def ex_abs_underlying_def) - done - -lemma hoare_add_post': - "\\P'\ f \Q'\; \P''\ f \\rv s. Q' rv s \ Q rv s\\ \ \P' and P''\ f \Q\" - by (fastforce simp add: valid_def) - -lemma use_corresK_frame: - assumes corres: "corres_underlyingK sr False nf' F r PP P' f f'" - assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" - assumes valid: "\P\ f \Q\" - assumes valid': "\PP'\ f' \Q'\" - shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\" - apply (rule hoare_pre) - apply (rule hoare_add_post'[OF valid']) - apply (rule hoare_strengthen_post) - apply (rule use_corresK'[OF corres valid]) - apply (insert frame)[1] - apply (clarsimp simp: ex_abs_underlying_def) - apply clarsimp - done - -lemma use_corresK_frame_E_R: - assumes corres: "corres_underlyingK sr False nf' F (lf \ r) PP P' f f'" - assumes frame: "(\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s')" - assumes valid: "\P\ f \Q\, -" - assumes valid': "\PP'\ f' \Q'\, -" - shows "\K F and P' and PP' and ex_abs_underlying sr (PP and P)\ f' \QQ'\, -" - apply (simp only: validE_R_def validE_def) - apply (rule use_corresK_frame[OF corres _ valid[simplified validE_R_def validE_def] valid'[simplified validE_R_def validE_def]]) - by (auto simp: frame split: sum.splits) - -lemma K_True: "K True = (\_. True)" by simp -lemma True_And: "((\_. True) and P) = P" by simp - -method use_corres uses frame = - (corresK_convert?, drule use_corresK_frame use_corresK_frame_E_R, rule frame, - (solves \wp\ | defer_tac), (solves \wp\ | defer_tac), (simp only: True_And K_True)?) - -experiment - fixes sr nf' r P P' f f' F G Q Q' QQ' PP PP' g g' - assumes f_corres[corres]: "G \ F \ corres_underlying sr False True r P P' f f'" and - g_corres[corres]: "corres_underlying sr False True dc \ \ g g'" and - wpl [wp]: "\PP\ f \Q\" and wpr [wp]: "\PP'\ f' \Q'\" - and [wp]: "\P\ g \\_. P\" "\PP\ g \\_. PP\" "\P'\ g' \\_. P'\" "\PP'\ g' \\_. PP'\" and - frameA: "\s s' rv rv'. (s,s') \ sr \ r rv rv' \ Q rv s \ Q' rv' s' \ QQ' rv' s'" - begin - - lemmas f_Q' = f_corres[atomized, @\use_corres frame: frameA\] - - lemma "G \ F \ corres_underlying sr False True dc (P and PP) (P' and PP') - (g >>= (K (f >>= K (assert True)))) (g' >>= (K (f' >>= (\rv'. (stateAssert (QQ' rv') [])))))" - apply (simp only: stateAssert_def K_def) - apply corres - apply (corres_search search: corresK_assert) - apply corres_rv - apply (correswp | simp)+ - apply corres_rv - apply (correswp wp: f_Q' | simp)+ - apply corressimp+ - by auto - -end - -section \Corres Argument lifting\ - -text \Used for rewriting corres rules with syntactically equivalent arguments\ - -lemma lift_args_corres: - "corres_underlying sr nf nf' r (P x) (P' x) (f x) (f' x) \ x = x' \ - corres_underlying sr nf nf' r (P x) (P' x') (f x) (f' x')" by simp - -method lift_corres_args = - (match premises in - H[thin]:"corres_underlying _ _ _ _ (P x) (P' x) (f x) (f' x)" (cut 5) for P P' f f' x \ - \match (f) in "\_. g" for g \ \fail\ \ _ \ - \match (f') in "\_. g'" for g' \ \fail\ \ _ \ - \cut_tac lift_args_corres[where f=f and f'=f' and P=P and P'=P', OF H]\\\)+ - -(* Use calculational rules. Don't unfold the definition! *) -lemmas corres_rv_def_I_know_what_I'm_doing = corres_rv_def -lemmas corres_rvE_R_def_I_know_what_I'm_doing = corres_rvE_R_def - -hide_fact corres_rv_def -hide_fact corres_rvE_R_def +lemma corres_get_trivial[corres_term]: + "corres_underlying sr nf nf' (\s s'. (s,s') \ sr) \ \ get get" + by simp -end +lemmas corres_underlying_stateAssert_stateAssert_trivial[corres_term] = + corres_underlying_stateAssert_stateAssert[where P=\ and P'=\, simplified] + +lemma corres_modify_tivial[corres_term]: + "(\s s'. (s, s') \ sr \ (f s, g s') \ sr) \ + corres_underlying sr nf nf' dc \ \ (modify f) (modify g)" + by (simp add: corres_modify) + +(* Regular corres rules are rules where we expect side conditions to be solvable once the rule + matches, but those side conditions might be too hard for automation, so they must be safe to + leave over for later manual proof. *) +lemmas [corres] = + corres_underlying_fail_fail + corres_fail + corres_assert + whenE_throwError_corres (* match this before corres_whenE *) + corres_whenE + corres_when + + (* not in corres_split, because head is usually not solvable by single rule: *) + corres_split_handle + corres_split_catch + corres_if2 + +(* Transform corres terms when no other rules match: *) +lemmas [corres_simp] = + liftE_bindE + unless_when + unlessE_whenE + +end \ No newline at end of file diff --git a/lib/Corres_UL.thy b/lib/Corres_UL.thy index 8aa7c79596..8bd12610f1 100644 --- a/lib/Corres_UL.thy +++ b/lib/Corres_UL.thy @@ -72,39 +72,39 @@ lemma corres_underlying_serial: apply auto done -(* FIXME: duplicated with HOL.iff_allI *) -lemma All_eqI: - assumes ass: "\x. A x = B x" - shows "(\x. A x) = (\x. B x)" - apply (subst ass) - apply (rule refl) - done - lemma corres_singleton: "corres_underlying sr nf nf' r P P' (\s. ({(R s, S s)},x)) (\s. ({(R' s, S' s)},False)) = (\s s'. P s \ P' s' \ (s, s') \ sr \ (nf \ \ x) \ ((S s, S' s') \ sr \ r (R s) (R' s')))" by (auto simp: corres_underlying_def) -lemma corres_return[simp]: +(* Lemmas that should not be [simp] inside automated corres methods. + Shared between Corres_Method and CorresK_Method. *) +named_theorems corres_no_simp + +(* Safe terminal corres rules that instantiate return relation and guards. + Shared between Corres_Method and CorresK_Method. *) +named_theorems corres + +(* Terminal corres rules that instantiate return relation and guards and that are safe if side + conditions case be solved immediately. Used in Corres_Method. *) +named_theorems corres_term + +lemma corres_return[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (return a) (return b) = ((\s s'. P s \ P' s' \ (s, s') \ sr) \ r a b)" by (simp add: return_def corres_singleton) -lemma corres_get[simp]: - "corres_underlying sr nf nf' r P P' get get = - (\ s s'. (s, s') \ sr \ P s \ P' s' \ r s s')" - apply (simp add: get_def corres_singleton) - apply (rule All_eqI)+ - apply safe - done +lemma corres_get[simp, corres_no_simp]: + "corres_underlying sr nf nf' r P P' get get = (\ s s'. (s, s') \ sr \ P s \ P' s' \ r s s')" + by (fastforce simp: get_def corres_singleton) -lemma corres_gets[simp]: +lemma corres_gets[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (gets a) (gets b) = (\ s s'. P s \ P' s' \ (s, s') \ sr \ r (a s) (b s'))" by (simp add: simpler_gets_def corres_singleton) -lemma corres_throwError[simp]: +lemma corres_throwError[simp, corres_no_simp]: "corres_underlying sr nf nf' r P P' (throwError a) (throwError b) = ((\s s'. P s \ P' s' \ (s, s') \ sr) \ r (Inl a) (Inl b))" by (simp add: throwError_def) @@ -301,7 +301,13 @@ end text \The guard weakening rule\ -lemma stronger_corres_guard_imp: +named_theorems corres_pre +(* Introduce schematic corres guards; fail if already schematic *) +method corres_pre0 = WP_Pre.pre_tac corres_pre +(* Optionally introduce schematic corres guards *) +method corres_pre = corres_pre0? + +lemma stronger_corres_guard_imp[corres_pre]: assumes x: "corres_underlying sr nf nf' r Q Q' f g" assumes y: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ Q s" assumes z: "\s s'. \ P s; P' s'; (s, s') \ sr \ \ Q' s'" @@ -312,12 +318,28 @@ lemma corres_guard_imp: assumes x: "corres_underlying sr nf nf' r Q Q' f g" assumes y: "\s. P s \ Q s" "\s. P' s \ Q' s" shows "corres_underlying sr nf nf' r P P' f g" - apply (rule stronger_corres_guard_imp) + apply corres_pre apply (rule x) apply (simp add: y)+ done -lemma corres_rel_imp: +lemma corres_guard_imp2: + "\corres_underlying sr nf nf' r Q P' f g; \s. P s \ Q s\ + \ corres_underlying sr nf nf' r P P' f g" + by corres_pre +(* FIXME: names\ (cf. corres_guard2_imp below) *) +lemmas corres_guard1_imp = corres_guard_imp2 + +lemma corres_guard2_imp: + "\corres_underlying sr nf nf' r P Q' f g; \s. P' s \ Q' s\ + \ corres_underlying sr nf nf' r P P' f g" + by corres_pre + +named_theorems corres_rrel_pre +(* Introduce schematic return relation, fail if already schematic *) +method corres_rrel_pre = WP_Pre.pre_tac corres_rrel_pre + +lemma corres_rel_imp[corres_rrel_pre]: assumes x: "corres_underlying sr nf nf' r' P P' f g" assumes y: "\x y. r' x y \ r x y" shows "corres_underlying sr nf nf' r P P' f g" @@ -436,6 +458,18 @@ lemma corres_splitEE_forwards: lemmas corres_splitEE_forwards' = corres_splitEE_forwards[where P=P and Q=P and P'=P' and Q'=P' and R=Q and R'=Q' for P P' Q Q', simplified] +lemma corres_splitEE_prod: + assumes x: "corres_underlying sr nf nf' (f \ r') P P' a c" + assumes y: "\x y x' y'. r' (x, y) (x', y') + \ corres_underlying sr nf nf' (f \ r) (R x y) (R' x' y') (b x y) (d x' y')" + assumes z: "\Q\ a \\(x, y). R x y \,\\\\" "\Q'\ c \\(x, y). R' x y\,\\\\" + shows "corres_underlying sr nf nf' (f \ r) (P and Q) (P' and Q') (a >>=E (\(x, y). b x y)) (c >>=E (\(x, y). d x y))" + using assms + apply (unfold bindE_def validE_def) + apply (rule corres_split[rotated 2], assumption+) + apply (fastforce simp: lift_def y split: sum.splits) + done + lemma corres_split_handle: assumes "corres_underlying sr nf nf' (f' \ r) P P' a c" assumes y: "\ft ft'. f' ft ft' @@ -585,32 +619,30 @@ lemma abs_ex_lift_corres: text \Some equivalences about liftM and other useful simps\ -lemma snd_liftM [simp]: - "snd (liftM t f s) = snd (f s)" - by (auto simp: liftM_def bind_def return_def) +(* These rules are declared [simp], which in hindsight was not a good decision, because they + change the return relation which often is schematic when these rules apply in the goal. + In those circumstances it is usually safer to unfold liftM_def and proceed with the resulting + substituted term. + (We leave the [simp] attribute here, because too many proofs now depend on it) +*) lemma corres_liftM_simp[simp]: - "(corres_underlying sr nf nf' r P P' (liftM t f) g) - = (corres_underlying sr nf nf' (r \ t) P P' f g)" - apply (simp add: corres_underlying_def - handy_liftM_lemma Ball_def Bex_def) - apply (rule All_eqI)+ - apply blast - done + "corres_underlying sr nf nf' r P P' (liftM t f) g = + corres_underlying sr nf nf' (r \ t) P P' f g" + by (fastforce simp add: corres_underlying_def in_liftM) lemma corres_liftM2_simp[simp]: - "corres_underlying sr nf nf' r P P' f (liftM t g) = - corres_underlying sr nf nf' (\x. r x \ t) P P' f g" - apply (simp add: corres_underlying_def - handy_liftM_lemma Ball_def) - apply (rule All_eqI)+ - apply blast - done + "corres_underlying sr nf nf' r P P' f (liftM t g) = + corres_underlying sr nf nf' (\x. r x \ t) P P' f g" + by (fastforce simp add: corres_underlying_def in_liftM) lemma corres_liftE_rel_sum[simp]: - "corres_underlying sr nf nf' (f \ r) P P' (liftE m) (liftE m') = corres_underlying sr nf nf' r P P' m m'" + "corres_underlying sr nf nf' (f \ r) P P' (liftE m) (liftE m') = + corres_underlying sr nf nf' r P P' m m'" by (simp add: liftE_liftM o_def) +lemmas corres_liftE_lift = corres_liftE_rel_sum[THEN iffD2] + text \Support for proving correspondence to noop with hoare triples\ lemma corres_noop: @@ -674,24 +706,12 @@ text \Support for dividing correspondence along lemma corres_disj_division: "\ P \ Q; P \ corres_underlying sr nf nf' r R S x y; Q \ corres_underlying sr nf nf' r T U x y \ \ corres_underlying sr nf nf' r (\s. (P \ R s) \ (Q \ T s)) (\s. (P \ S s) \ (Q \ U s)) x y" - apply safe - apply (rule corres_guard_imp) - apply simp - apply simp - apply simp - apply (rule corres_guard_imp) - apply simp - apply simp - apply simp - done + by (safe; corres_pre, simp+) lemma corres_weaker_disj_division: "\ P \ Q; P \ corres_underlying sr nf nf' r R S x y; Q \ corres_underlying sr nf nf' r T U x y \ \ corres_underlying sr nf nf' r (R and T) (S and U) x y" - apply (rule corres_guard_imp) - apply (rule corres_disj_division) - apply simp+ - done + by (corres_pre, rule corres_disj_division, simp+) lemma corres_symmetric_bool_cases: "\ P = P'; \ P; P' \ \ corres_underlying srel nf nf' r Q Q' f g; @@ -710,7 +730,7 @@ lemma corres_symb_exec_l: assumes y: "\P\ m \Q\" assumes nf: "nf' \ no_fail P m" shows "corres_underlying sr nf nf' r P P' (m >>= (\rv. x rv)) y" - apply (rule corres_guard_imp) + apply corres_pre apply (subst gets_bind_ign[symmetric], rule corres_split[OF _ z]) apply (rule corres_noop2) apply (erule x) @@ -728,7 +748,7 @@ lemma corres_symb_exec_r: assumes x: "\s. P' s \ \(=) s\ m \\r. (=) s\" assumes nf: "nf' \ no_fail P' m" shows "corres_underlying sr nf nf' r P P' x (m >>= (\rv. y rv))" - apply (rule corres_guard_imp) + apply corres_pre apply (subst gets_bind_ign[symmetric], rule corres_split[OF _ z]) apply (rule corres_noop2) apply (simp add: simpler_gets_def exs_valid_def) @@ -753,7 +773,7 @@ proof - apply (erule nf) done show ?thesis - apply (rule corres_guard_imp) + apply corres_pre apply (subst return_bind[symmetric], rule corres_split [OF P]) apply (rule z) @@ -840,6 +860,22 @@ lemma corres_trivial: "corres_underlying sr nf nf' r \ \ f g \ corres_underlying sr nf nf' r \ \ f g" by assumption +lemma corres_underlying_trivial_gen: + "\ nf' \ no_fail P' f; \x. rr x x \ \ + corres_underlying Id nf nf' rr P P' f f" + by (auto simp add: corres_underlying_def Id_def no_fail_def) + +lemma corres_underlying_trivial[corres]: + "\ nf' \ no_fail P' f \ \ corres_underlying Id nf nf' (=) \ P' f f" + by (erule corres_underlying_trivial_gen, simp) + +(* Instance of corres_underlying_trivial for unit type with dc instead of (=) as return relation, + for nicer return relation instantiation. *) +lemma corres_underlying_trivial_dc[corres]: + "(nf' \ no_fail P' f) \ corres_underlying Id nf nf' dc (\_. True) P' f f" + for f :: "('s, unit) nondet_monad" + by (fastforce intro: corres_underlying_trivial corres_rrel_pre) + lemma corres_assume_pre: assumes R: "\s s'. \ P s; Q s'; (s,s') \ sr \ \ corres_underlying sr nf nf' r P Q f g" shows "corres_underlying sr nf nf' r P Q f g" @@ -849,24 +885,13 @@ lemma corres_assume_pre: apply blast done -lemma corres_guard_imp2: - "\corres_underlying sr nf nf' r Q P' f g; \s. P s \ Q s\ \ corres_underlying sr nf nf' r P P' f g" - by (blast intro: corres_guard_imp) -(* FIXME: names\ (cf. corres_guard2_imp below) *) -lemmas corres_guard1_imp = corres_guard_imp2 - -lemma corres_guard2_imp: - "\corres_underlying sr nf nf' r P Q' f g; \s. P' s \ Q' s\ - \ corres_underlying sr nf nf' r P P' f g" - by (drule (1) corres_guard_imp[where P'=P' and Q=P], assumption+) - lemma corres_initial_splitE: "\ corres_underlying sr nf nf' (f \ r') P P' a c; \rv rv'. r' rv rv' \ corres_underlying sr nf nf' (f \ r) (Q rv) (Q' rv') (b rv) (d rv'); \P\ a \Q\, \\r s. True\; \P'\ c \Q'\, \\r s. True\\ \ corres_underlying sr nf nf' (f \ r) P P' (a >>=E b) (c >>=E d)" - apply (rule corres_guard_imp) + apply corres_pre apply (erule corres_splitEE) apply fastforce+ done @@ -928,6 +953,17 @@ lemma corres_stateAssert_implied: apply (wp | rule no_fail_pre)+ done +lemma corres_stateAssert_r: + "corres_underlying sr nf nf' r P Q f (g ()) \ + corres_underlying sr nf nf' r P (Q and P') f (stateAssert P' [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (rule corres_symb_exec_r [OF _ get_sp]) + apply (rule corres_assert_assume) + apply (rule corres_assume_pre) + apply (erule corres_guard_imp, clarsimp+) + apply (wp | rule no_fail_pre)+ + done + lemma corres_assert: "corres_underlying sr nf nf' dc (%_. P) (%_. Q) (assert P) (assert Q)" by (clarsimp simp add: corres_underlying_def return_def) @@ -1026,6 +1062,31 @@ lemma corres_assert_opt_assume: by (auto simp: bind_def assert_opt_def assert_def fail_def return_def corres_underlying_def split: option.splits) +lemma corres_assert_opt[corres]: + "r x x' \ + corres_underlying sr nf nf' (\x x'. r (Some x) x') (\s. x \ None) \ (assert_opt x) (return x')" + unfolding corres_underlying_def + by (clarsimp simp: assert_opt_def return_def split: option.splits) + +lemma assert_opt_assert_corres[corres]: + "(x = None) = (x' = None) \ + corres_underlying sr nf nf' (\y _. x = Some y) (K (x \ None)) \ + (assert_opt x) (assert (\y. x' = Some y))" + by (simp add: corres_underlying_def assert_opt_def return_def split: option.splits) + +lemma corres_assert_opt_l: + assumes "\x. P' = Some x \ corres_underlying sr nf nf' r (P x) Q (f x) g" + shows "corres_underlying sr nf nf' r (\s. \x. P' = Some x \ P x s) Q (assert_opt P' >>= f) g" + using assms + by (auto simp: bind_def assert_opt_def assert_def fail_def return_def corres_underlying_def + split: option.splits) + +lemma corres_gets_the_gets: + "corres_underlying sr False nf' r P P' (gets_the f) f' \ + corres_underlying sr nf nf' (\x x'. x \ None \ r (the x) x') P P' (gets f) f'" + apply (simp add: gets_the_def bind_def simpler_gets_def assert_opt_def) + apply (fastforce simp: corres_underlying_def in_monad split: option.splits) + done text \Support for proving correspondance by decomposing the state relation\ @@ -1077,9 +1138,15 @@ lemma corres_returnOk: apply wp done -lemmas corres_returnOkTT = corres_trivial [OF corres_returnOk] +lemma corres_returnOkTT: + "r x y \ corres_underlying sr nf nf' (r' \ r) \ \ (returnOk x) (returnOk y)" + by (simp add: corres_returnOk) + +lemma corres_throwErrorTT: + "r x y \ corres_underlying sr nf nf' (r \ r') \ \ (throwError x) (throwError y)" + by simp -lemma corres_False [simp]: +lemma corres_False [simp, corres_no_simp]: "corres_underlying sr nf nf' r P \ f f'" by (simp add: corres_underlying_def) @@ -1135,7 +1202,7 @@ next show ?case apply (simp add: mapME_x_def sequenceE_x_def) apply (fold mapME_x_def sequenceE_x_def dc_def) - apply (rule corres_guard_imp) + apply corres_pre apply (rule corres_splitEE) apply (rule x) apply (rule IH) @@ -1365,7 +1432,7 @@ lemma corres_stateAssert_implied2: assumes g: "\Q\ g \\_. R'\" shows "corres_underlying sr nf nf' dc P Q f (g >>= (\_. stateAssert Q' []))" apply (subst bind_return[symmetric]) - apply (rule corres_guard_imp) + apply corres_pre apply (rule corres_split) apply (rule c) apply (clarsimp simp: corres_underlying_def return_def @@ -1433,16 +1500,15 @@ lemma corres_symb_exec_catch_r: apply (simp split: sum.split_asm) done -lemma corres_return_eq_same: - "a = b \ corres_underlying srel nf' nf (=) \ \ (return a) (return b)" - apply (simp add: corres_underlying_def return_def) - done +lemma corres_returnTT: + "r a b \ corres_underlying sr nf nf' r \ \ (return a) (return b)" + by simp + +lemmas corres_return_eq_same = corres_returnTT[of "(=)"] lemmas corres_discard_r = corres_symb_exec_r [where P'=P' and Q'="\_. P'" for P', simplified] -lemmas corres_returnTT = corres_return[where P=\ and P'=\, THEN iffD2] - lemma corres_assert_gen_asm: "\ F \ corres_underlying sr nf nf' r P Q f (g ()) \ \ corres_underlying sr nf nf' r (P and (\_. F)) Q f (assert F >>= g)" @@ -1469,6 +1535,19 @@ lemma corres_add_guard: corres_underlying sr nf nf' r Q Q' f g" by (auto simp: corres_underlying_def) +lemma corres_stateAssert_r_cross: + assumes A: "\s s'. (s, s') \ sr \ P' s \ Q' s' \ A s'" + assumes C: "corres_underlying sr nf nf' r P Q f (g ())" + shows + "corres_underlying sr nf nf' r (P and P') (Q and Q') f (stateAssert A [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply corres_pre + apply (rule corres_symb_exec_r) + apply (rule corres_assert_gen_asm2, rule C) + apply wpsimp+ + apply (simp add: A) + done + (* safer non-rewrite version of corres_gets *) lemma corres_gets_trivial: "\\s s'. (s,s') \ sr \ f s = f' s' \ @@ -1490,7 +1569,7 @@ lemma corres_underlying_assert_assert: lemma corres_underlying_stateAssert_stateAssert: assumes "\s s'. \ (s,s') \ rf_sr; P s; P' s' \ \ Q' s' = Q s" shows "corres_underlying rf_sr nf False dc P P' (stateAssert Q []) (stateAssert Q' [])" - by (auto simp: stateAssert_def get_def NonDetMonad.bind_def corres_underlying_def + by (auto simp: stateAssert_def get_def Nondet_Monad.bind_def corres_underlying_def assert_def return_def fail_def assms) (* We can ignore a stateAssert in the middle of a computation even if we don't ignore abstract @@ -1500,7 +1579,7 @@ lemma corres_stateAssert_no_fail: corres_underlying S False nf' r P Q (do v \ g; h v od) f \ \ corres_underlying S False nf' r P Q (do v \ g; _ \ stateAssert X []; h v od) f" apply (simp add: corres_underlying_def stateAssert_def get_def assert_def return_def no_fail_def fail_def cong: if_cong) - apply (clarsimp simp: split_def NonDetMonad.bind_def split: if_splits) + apply (clarsimp simp: split_def Nondet_Monad.bind_def split: if_splits) apply (erule allE, erule (1) impE) apply (drule (1) bspec, clarsimp)+ done diff --git a/lib/Crunch_Instances_NonDet.thy b/lib/Crunch_Instances_NonDet.thy index 81ea30b377..8ba1160844 100644 --- a/lib/Crunch_Instances_NonDet.thy +++ b/lib/Crunch_Instances_NonDet.thy @@ -8,8 +8,8 @@ theory Crunch_Instances_NonDet imports Crunch Monads.WPEx - Monads.Empty_Fail - Monads.No_Fail + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail begin lemmas [crunch_param_rules] = Let_def return_bind returnOk_bindE diff --git a/lib/Crunch_Instances_Trace.thy b/lib/Crunch_Instances_Trace.thy index 49ce6f403c..bf8acb44b1 100644 --- a/lib/Crunch_Instances_Trace.thy +++ b/lib/Crunch_Instances_Trace.thy @@ -7,7 +7,8 @@ theory Crunch_Instances_Trace imports Crunch - Monads.TraceMonadVCG + Monads.Trace_No_Fail + Monads.Trace_RG begin lemmas [crunch_param_rules] = Let_def return_bind returnOk_bindE diff --git a/lib/CutMon.thy b/lib/CutMon.thy index 774d3ba34a..e94097e38e 100644 --- a/lib/CutMon.thy +++ b/lib/CutMon.thy @@ -10,8 +10,8 @@ theory CutMon imports - Monads.Empty_Fail - Monads.NonDetMonadVCG + Monads.Nondet_Empty_Fail + Monads.Nondet_VCG begin definition diff --git a/lib/EVTutorial/EquivValidTutorial.thy b/lib/EVTutorial/EquivValidTutorial.thy index 55723d0054..afb87cfd49 100644 --- a/lib/EVTutorial/EquivValidTutorial.thy +++ b/lib/EVTutorial/EquivValidTutorial.thy @@ -55,7 +55,7 @@ In this sense, EquivValid statements could be thought of as \<^emph>\relat text \ This tutorial will introduce some syntactic sugar to emphasise the similarity between Hoare triples and EquivValid statements. -(Here, \\\\ is an alias provided by Monads.NonDetMonad for the trivial binary predicate, +(Here, \\\\ is an alias provided by Monads.Nondet\_Monad for the trivial binary predicate, which always returns \True\; similarly, there is also \\\\ for \False\.) \ abbreviation diff --git a/lib/Eisbach_Tools/Apply_Debug.thy b/lib/Eisbach_Tools/Apply_Debug.thy index ac9104eea0..3a13e03dc1 100644 --- a/lib/Eisbach_Tools/Apply_Debug.thy +++ b/lib/Eisbach_Tools/Apply_Debug.thy @@ -484,14 +484,14 @@ fun maybe_bind st (_,[tok]) ctxt = val local_facts = Facts.dest_static true [(Proof_Context.facts_of target)] local_facts; - val _ = Token.assign (SOME (Token.Declaration (fn phi => - Data.put (SOME (phi,ctxt, {private_dyn_facts = private_dyns, local_facts = local_facts}))))) tok; + val _ = Token.assign (SOME (Token.Declaration (Morphism.entity (fn phi => + Data.put (SOME (phi,ctxt, {private_dyn_facts = private_dyns, local_facts = local_facts})))))) tok; in ctxt end else let val SOME (Token.Declaration decl) = Token.get_value tok; - val dummy_ctxt = decl Morphism.identity (Context.Proof ctxt); + val dummy_ctxt = Morphism.form decl (Context.Proof ctxt); val SOME (phi,static_ctxt,{private_dyn_facts, local_facts}) = Data.get dummy_ctxt; val old_facts = Proof_Context.facts_of static_ctxt; diff --git a/lib/Eisbach_Tools/Apply_Trace.thy b/lib/Eisbach_Tools/Apply_Trace.thy index dba9ed5052..3e7a0943e8 100644 --- a/lib/Eisbach_Tools/Apply_Trace.thy +++ b/lib/Eisbach_Tools/Apply_Trace.thy @@ -225,7 +225,7 @@ let val deps = case query of SOME (raw_query,pos) => let - val pos' = perhaps (try (Position.advance_offsets 1)) pos; + val pos' = perhaps (try (Position.shift_offsets {remove_id = false} 1)) pos; val q = Find_Theorems.read_query pos' raw_query; val results = Find_Theorems.find_theorems_cmd ctxt (SOME thm) (SOME 1000000000) false q |> snd diff --git a/lib/EquivValid.thy b/lib/EquivValid.thy index 9db7065021..3ab513ee3f 100644 --- a/lib/EquivValid.thy +++ b/lib/EquivValid.thy @@ -572,10 +572,10 @@ lemmas pre_ev = hoare_pre equiv_valid_guard_imp -subsection\Tom instantiates wpc\ +subsection\wpc setup\ lemma wpc_helper_equiv_valid: - "equiv_valid D A B Q f \ wpc_helper (P, P') (Q, Q') (equiv_valid D A B P f)" + "equiv_valid D A B Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (equiv_valid D A B P f)" using equiv_valid_guard_imp apply (simp add: wpc_helper_def) apply (blast) diff --git a/lib/ExtraCorres.thy b/lib/ExtraCorres.thy index 69ad87e9d5..8bf21db947 100644 --- a/lib/ExtraCorres.thy +++ b/lib/ExtraCorres.thy @@ -5,9 +5,18 @@ *) theory ExtraCorres -imports Corres_UL Monads.OptionMonadWP +imports Corres_UL\ \ Monads.OptionMonadWP\ begin +(* FIXME: the S in this rule is mainly to make the induction work, we don't actually need it in + application. This means, this form should be hidden and the main form should be resolving the + last assumption with order_refl. *) + +(* The lemma looks weaker than in it could be -- the guards P and P' are not allowed to depend on + list elements. This is fine, because P/P' are a loop invariants that need to be supplied + manually anyway, and we want these to be true for all loop iterations. An instance such as + "\s. \x \ set xs. P x s" is possible and covers the cases the (not really) stronger formulation + would cover. *) lemma corres_mapM: assumes x: "r [] []" assumes y: "\x xs y ys. \ r xs ys; r' x y \ \ r (x # xs) (y # ys)" @@ -69,6 +78,7 @@ next done qed +(* FIXME: see comment for mapM rule. Same applies for lemma strength *) lemma corres_mapM_x: assumes x: "\x y. (x, y) \ S \ corres_underlying sr nf nf' dc P P' (f x) (f' y)" assumes y: "\x y. (x, y) \ S \ \P\ f x \\rv. P\" @@ -83,6 +93,8 @@ lemma corres_mapM_x: apply (simp | wp)+ done +lemmas corres_mapM_x' = corres_mapM_x[OF _ _ _ _ order_refl] + lemma corres_mapM_scheme: assumes x: "r [] []" assumes z: "\x y. (x, y) \ S @@ -131,6 +143,7 @@ lemma corres_mapM_x_scheme: apply (wpsimp wp: y y' simp: z w v)+ done +(* FIXME: see comment for mapM rule. Same applies for lemma strength *) lemma corres_mapME: assumes x: "r [] []" assumes y: "\x xs y ys. \ r xs ys; r' x y \ \ r (x # xs) (y # ys)" @@ -317,17 +330,20 @@ lemma hoare_from_abs_inv: lemma in_whileLoop_corres: assumes body_corres: "\r r'. rrel r r' \ - corres_underlying srel False nf' rrel (P and C r) (P' and C' r') (B r) (B' r')" - and body_inv: "\r. \P and C r\ B r \\_. P\" - "\r'. \P' and C' r'\ B' r' \\_. P'\" - and cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P s; P' s'\ \ C r s = C' r' s'" - and result: "(rv', t') \ fst (whileLoop C' B' r' s')" - shows "\s r. (s, s') \ srel \ rrel r r' \ P s \ P' s' + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes result: "(rv', t') \ fst (whileLoop C' B' r' s')" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" + shows "\s r. (s, s') \ srel \ rrel r r' \ P r s \ P' r' s' \ (\rv t. (rv, t) \ fst (whileLoop C B r s) \ (t, t') \ srel \ rrel rv rv')" apply (rule in_whileLoop_induct[OF result]) apply (force simp: cond whileLoop_def) apply clarsimp - apply (frule (1) corres_underlyingD2[OF body_corres]; (fastforce simp: cond)?) + apply (frule (1) corres_underlyingD2[OF body_corres]; + (fastforce dest: nf simp: cond no_fail_def)?) apply clarsimp apply (frule use_valid[OF _ body_inv(1)]) apply (fastforce dest: cond) @@ -336,21 +352,22 @@ lemma in_whileLoop_corres: apply (fastforce simp: whileLoop_def intro: whileLoop_results.intros(3) dest: cond) done -lemma corres_whileLoop: - assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P s; P' s'\ \ C r s = C' r' s'" - and body_corres: +lemma corres_whileLoop_ret: + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_corres: "\r r'. rrel r r' \ - corres_underlying srel False nf' rrel (P and C r) (P' and C' r') (B r) (B' r')" - and body_inv: "\r. \P and C r\ B r \\_. P\" - "\r'. \P' and C' r'\ B' r' \\_. P'\" - and rel: "rrel r r'" - and nf': "\r'. no_fail (P' and C' r') (B' r')" - and termin: "\r' s'. \P' s'; C' r' s'\ \ whileLoop_terminates C' B' r' s'" - shows "corres_underlying srel False nf' rrel P P' (whileLoop C B r) (whileLoop C' B' r')" + corres_underlying srel False nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes rel: "rrel r r'" + assumes nf': "\r'. no_fail (P' r' and C' r') (B' r')" + assumes termin: "\r' s'. \P' r' s'; C' r' s'\ \ whileLoop_terminates C' B' r' s'" + shows "corres_underlying srel False nf' rrel (P r) (P' r') (whileLoop C B r) (whileLoop C' B' r')" apply (rule corres_no_failI) apply (simp add: no_fail_def) apply (intro impI allI) - apply (erule_tac I="\_ s. P' s" + apply (erule_tac I="\r' s'. P' r' s'" and R="{((r', s'), r, s). C' r s \ (r', s') \ fst (B' r s) \ whileLoop_terminates C' B' r s}" in not_snd_whileLoop) @@ -369,82 +386,98 @@ lemma corres_whileLoop: apply (fastforce intro: assms) done +lemmas corres_whileLoop = + corres_whileLoop_ret[where P="\_. P" for P, where P'="\_. P'" for P', simplified] + lemma whileLoop_terminates_cross: assumes body_corres: "\r r'. rrel r r' \ - corres_underlying srel False nf' rrel (P and C r) (P' and C' r') (B r) (B' r')" - and cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P s; P' s'\ \ C r s = C' r' s'" - and body_inv: "\r. \P and C r\ B r \\_. P\" - "\r'. \P' and C' r'\ B' r' \\_. P'\" - and abs_termination: "\r s. P s \ whileLoop_terminates C B r s" - and ex_abs: "ex_abs_underlying srel P s'" - and rrel: "rrel r r'" - and P': "P' s'" + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes abs_termination: "\r s. \P r s; C r s\ \ whileLoop_terminates C B r s" + assumes ex_abs: "ex_abs_underlying srel (P r) s'" + assumes rrel: "rrel r r'" + assumes P': "P' r' s'" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" shows "whileLoop_terminates C' B' r' s'" proof - - have helper: "\s. P s \ \r' s'. rrel r r' \ (s, s') \ srel \ P s \ P' s' - \ whileLoop_terminates C' B' r' s'" + have helper: "\s. P r s \ C r s \ \r' s'. rrel r r' \ (s, s') \ srel \ P r s \ P' r' s' + \ whileLoop_terminates C' B' r' s'" (is "\s. _ \ ?I r s") apply (rule_tac P="?I" in whileLoop_terminates.induct) apply (fastforce intro: abs_termination) apply (fastforce simp: whileLoop_terminates.intros dest: cond) apply (subst whileLoop_terminates.simps) apply clarsimp - apply (frule (1) corres_underlyingD2[OF body_corres], fastforce+) + apply (frule (1) corres_underlyingD2[OF body_corres], (fastforce dest: nf simp: no_fail_def)+) apply (fastforce dest: use_valid intro: body_inv) done show ?thesis apply (insert assms helper) - apply (clarsimp simp: ex_abs_underlying_def) + apply (cases "C' r' s'") + apply (fastforce simp: ex_abs_underlying_def) + apply (simp add: whileLoop_terminates.intros(1)) done qed -lemma corres_whileLoop_abs: - assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P s; P' s'\ \ C r s = C' r' s'" - and body_corres: +lemma corres_whileLoop_abs_ret: + assumes cond: "\r r' s s'. \rrel r r'; (s, s') \ srel; P r s; P' r' s'\ \ C r s = C' r' s'" + assumes body_corres: "\r r'. rrel r r' \ - corres_underlying srel False nf' rrel (P and C r) (P' and C' r') (B r) (B' r')" - and nf: "\r. no_fail (P and C r) (B r)" - and rrel: "rrel r r'" - and rrel2: "\r'. \r. rrel r r'" - and body_inv: "\r. \P and C r\ B r \\_. P\" - "\r'. \P' and C' r'\ B' r' \\_. P'\" - and abs_termination: "\r s. P s \ whileLoop_terminates C B r s" - shows "corres_underlying srel False nf' rrel P P' (whileLoop C B r) (whileLoop C' B' r')" + corres_underlying srel nf nf' rrel (P r and C r) (P' r' and C' r') (B r) (B' r')" + assumes rrel: "rrel r r'" + assumes body_inv: + "\r. \P r and C r\ B r \P\" + "\r'. \P' r' and C' r'\ B' r' \P'\" + assumes abs_termination: "\r s. \P r s; C r s\ \ whileLoop_terminates C B r s" + assumes nf: "\r. nf \ no_fail (P r and C r) (B r)" + shows "corres_underlying srel nf nf' rrel (P r) (P' r') (whileLoop C B r) (whileLoop C' B' r')" apply (rule corres_underlyingI) apply (frule in_whileLoop_corres[OF body_corres body_inv]; - (fastforce intro: body_corres body_inv rrel dest: cond)) - apply (rule_tac I="\rv' s'. \rv s. (s, s') \ srel \ rrel rv rv' \ P s \ P' s'" - and R="{((r', s'), r, s). C' r s \ (r', s') \ fst (B' r s) - \ whileLoop_terminates C' B' r s}" - in not_snd_whileLoop) + (fastforce intro: body_corres body_inv rrel dest: nf cond)) + apply (rule_tac I="\rv' s'. \rv s. (s, s') \ srel \ rrel rv rv' \ P rv s \ P' rv' s'" + and R="{((r', s'), r, s). C' r s \ (r', s') \ fst (B' r s) + \ whileLoop_terminates C' B' r s}" + in not_snd_whileLoop) apply (fastforce intro: rrel) - apply (rename_tac conc_r s) + apply (rename_tac s s' conc_r new_s) apply (clarsimp simp: validNF_def) apply (rule conjI) apply (intro hoare_vcg_conj_lift_pre_fix; (solves wpsimp)?) - apply (prop_tac "\abs_r. rrel abs_r conc_r") - apply (fastforce simp: rrel2) - apply clarsimp + apply (rule_tac Q="\s'. \rv s. (s, s') \ srel \ rrel rv conc_r + \ P rv s \ (P' conc_r s' \ C' conc_r s') \ s' = new_s" + in hoare_weaken_pre[rotated]) + apply clarsimp + apply (rule hoare_ex_pre) + apply (rename_tac abs_r) apply (rule hoare_weaken_pre) - apply (fastforce intro!: wp_from_corres_u body_inv body_corres) + apply (rule_tac G="rrel abs_r conc_r" in hoare_grab_asm) + apply (wpsimp wp: wp_from_corres_u[OF body_corres] body_inv) + apply (fastforce dest: nf) apply (fastforce dest: cond) apply (fastforce simp: valid_def) apply wpsimp apply (rule whileLoop_terminates_cross[OF body_corres]; - (fastforce dest: cond intro: body_inv abs_termination)) - apply (prop_tac "\abs_r. rrel abs_r conc_r") - apply (fastforce simp: rrel2) - apply clarsimp - apply (rule_tac P="\s'. \s. (s, s') \ srel \ (P and C abs_r) s \ P' s' \ C' conc_r s'" - in no_fail_pre) - apply (insert cond body_corres) - apply (fastforce intro: corres_u_nofail simp: pred_conj_def) - apply fastforce + (fastforce dest: nf cond intro: body_inv abs_termination)) + apply (rule_tac P="\s'. \rv s. (s, s') \ srel \ rrel rv conc_r + \ P rv s \ (P' conc_r s' \ C' conc_r s') \ s' = new_s" + in no_fail_pre[rotated]) + apply fastforce + apply (rule no_fail_ex_lift) + apply (rename_tac abs_r) + apply (rule no_fail_pre) + apply (rule_tac G="rrel abs_r conc_r" in no_fail_grab_asm) + apply (fastforce intro: corres_u_nofail dest: body_corres nf) + apply (fastforce simp: cond) apply (fastforce intro: wf_subset[OF whileLoop_terminates_wf[where C=C']]) done +lemmas corres_whileLoop_abs = + corres_whileLoop_abs_ret[where P="\_. P" for P, where P'="\_. P'" for P', simplified] text \Some corres_underlying rules for monadic combinators\ diff --git a/lib/HaskellLib_H.thy b/lib/HaskellLib_H.thy index dec46a4ac4..47965b00c0 100644 --- a/lib/HaskellLib_H.thy +++ b/lib/HaskellLib_H.thy @@ -13,15 +13,15 @@ theory HaskellLib_H imports Lib More_Numeral_Type - Monads.NonDetMonadVCG - Monads.OptionMonad + Monads.Nondet_VCG + Monads.Reader_Option_Monad begin abbreviation (input) "flip \ swp" abbreviation(input) bind_drop :: "('a, 'c) nondet_monad \ ('a, 'b) nondet_monad \ ('a, 'b) nondet_monad" (infixl ">>'_" 60) - where "bind_drop \ (\x y. NonDetMonad.bind x (K_bind y))" + where "bind_drop \ (\x y. Nondet_Monad.bind x (K_bind y))" lemma bind_drop_test: "foldr bind_drop x (return ()) = sequence_x x" diff --git a/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy b/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy index 34cb78aec1..1cda1fce7d 100644 --- a/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy +++ b/lib/Hoare_Sep_Tactics/Hoare_Sep_Tactics.thy @@ -6,7 +6,7 @@ theory Hoare_Sep_Tactics imports - Monads.NonDetMonadVCG + Monads.Nondet_VCG Sep_Algebra.Sep_Algebra_L4v begin diff --git a/lib/Injection_Handler.thy b/lib/Injection_Handler.thy index c89431f06c..fdae11064b 100644 --- a/lib/Injection_Handler.thy +++ b/lib/Injection_Handler.thy @@ -7,7 +7,7 @@ (* Definition of injection_handler and supporting lemmas. *) theory Injection_Handler - imports Monads.NonDetMonadVCG + imports Monads.Nondet_VCG begin definition injection_handler :: diff --git a/lib/Insulin.thy b/lib/Insulin.thy index 46dfda9c78..aa147b319f 100644 --- a/lib/Insulin.thy +++ b/lib/Insulin.thy @@ -34,11 +34,11 @@ * * Another example (l4v libraries): * > desugar_term "\ P and Q \ f \ \r _. r \ {0..<5} \!" "\" - * NonDetMonad_Total.validNF (P and Q) f (\r _. r \ {0\'b..<5\'b}) + * Nondet_Total.validNF (P and Q) f (\r _. r \ {0\'b..<5\'b}) * * Desugar multiple operators: * > desugar_term "\ P and Q \ f \ \r _. r \ {0..<5} \!" "\" "and" ".." - * NonDetMonad.validNF (Lib.pred_conj P Q) f + * Nondet_Monad.validNF (Lib.pred_conj P Q) f * (\r _. r \ Set_Interval.ord_class.atLeastLessThan (0\'b) (5\'b)) * * diff --git a/lib/LemmaBucket.thy b/lib/LemmaBucket.thy index df1de659e7..c56be98b08 100644 --- a/lib/LemmaBucket.thy +++ b/lib/LemmaBucket.thy @@ -11,15 +11,6 @@ imports SubMonadLib begin -lemma corres_underlying_trivial_gen: - "\ nf' \ no_fail P' f; \x. rr x x \ \ - corres_underlying Id nf nf' rr P P' f f" - by (auto simp add: corres_underlying_def Id_def no_fail_def) - -lemma corres_underlying_trivial: - "\ nf' \ no_fail P' f \ \ corres_underlying Id nf nf' (=) \ P' f f" - by (erule corres_underlying_trivial_gen, simp) - lemma hoare_spec_gen_asm: "\ F \ s \ \P\ f \Q\ \ \ s \ \P and K F\ f \Q\" "\ F \ s \ \P\ f' \Q\,\E\ \ \ s \ \P and K F\ f' \Q\,\E\" diff --git a/lib/Lib.thy b/lib/Lib.thy index 4804ce35af..786df16bfb 100644 --- a/lib/Lib.thy +++ b/lib/Lib.thy @@ -2239,7 +2239,7 @@ lemma map_of_zip_is_index: lemma map_of_zip_take_update: "\i < length xs; length xs \ length ys; distinct xs\ - \ map_of (zip (take i xs) ys)(xs ! i \ (ys ! i)) = map_of (zip (take (Suc i) xs) ys)" + \ (map_of (zip (take i xs) ys)) (xs ! i \ ys ! i) = map_of (zip (take (Suc i) xs) ys)" apply (rule ext, rename_tac x) apply (case_tac "x=xs ! i"; clarsimp) apply (rule map_of_is_SomeI[symmetric]) @@ -2653,6 +2653,14 @@ lemma if_option_None_eq: "((if P then Some x else None) = None) = (\P)" by simp+ +lemma option_case_all_conv: + "(case x of None \ True | Some v \ P v) = (\v. x = Some v \ P v)" + by (auto split: option.split) + +lemma prod_o_comp: + "(case x of (a, b) \ f a b) \ g = (case x of (a, b) \ f a b \ g)" + by (auto simp: split_def) + lemma lhs_sym_eq: "(a = b) = x \ (b = a) = x" by auto diff --git a/lib/ML_Goal.thy b/lib/ML_Goal.thy index 3c9f499177..2121edb5d7 100644 --- a/lib/ML_Goal.thy +++ b/lib/ML_Goal.thy @@ -109,7 +109,7 @@ fun begin_proof ((name, attrs): Attrib.binding, ml_block: Input.source) ctxt = val ((res_name, res), ctxt') = Local_Theory.note (binding, thms) ctxt; val _ = - Proof_Display.print_results true start_pos ctxt' + Proof_Display.print_results { interactive = true, pos = start_pos, proof_state = true } ctxt' (("theorem", res_name), [("", res)]) in ctxt' end in diff --git a/lib/Monad_Commute.thy b/lib/Monad_Commute.thy index 5179e71475..a5bbf317bf 100644 --- a/lib/Monad_Commute.thy +++ b/lib/Monad_Commute.thy @@ -9,7 +9,7 @@ theory Monad_Commute imports - Monads.Monad_Equations + Monads.Nondet_Monad_Equations Monad_Lists (* for mapM_x *) begin diff --git a/lib/Monad_Lists.thy b/lib/Monad_Lists.thy index 151442fdbb..d44fc0f4f7 100644 --- a/lib/Monad_Lists.thy +++ b/lib/Monad_Lists.thy @@ -10,10 +10,10 @@ theory Monad_Lists imports - Monads.In_Monad - Monads.Det - Monads.Empty_Fail - Monads.No_Fail + Monads.Nondet_In_Monad + Monads.Nondet_Det + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail begin lemma mapME_Cons: diff --git a/lib/MonadicRewrite.thy b/lib/MonadicRewrite.thy index 0b869889a7..3058fd921b 100644 --- a/lib/MonadicRewrite.thy +++ b/lib/MonadicRewrite.thy @@ -9,9 +9,9 @@ theory MonadicRewrite imports - Monads.NonDetMonadVCG + Monads.Nondet_VCG Corres_UL - Monads.Empty_Fail + Monads.Nondet_Empty_Fail Rules_Tac begin @@ -40,7 +40,7 @@ lemma monadic_rewrite_impossible: "monadic_rewrite F E \ f g" by (clarsimp simp: monadic_rewrite_def) -lemma monadic_rewrite_guard_imp[wp_pre]: +lemma monadic_rewrite_guard_imp: "\ monadic_rewrite F E Q f g; \s. P s \ Q s \ \ monadic_rewrite F E P f g" by (auto simp add: monadic_rewrite_def) @@ -146,18 +146,23 @@ lemma monadic_rewrite_bindE: apply (case_tac x; simp add: lift_def monadic_rewrite_refl) done +(* in order to preserve bound names in the tail, bind_head must avoid eta on both sides *) +lemma monadic_rewrite_bind_head: + "monadic_rewrite F E P f g \ monadic_rewrite F E P (f >>= h) (g >>= h)" + by (rule monadic_rewrite_bind[OF _ monadic_rewrite_refl hoare_vcg_prop, + simplified pred_top_right_neutral]) + +(* in order to preserve bound names in the tail, bindE_head must avoid eta on both sides *) +lemma monadic_rewrite_bindE_head: + "monadic_rewrite F E P f g \ monadic_rewrite F E (P and (\s. True)) (f >>=E h) (g >>=E h)" + by (rule monadic_rewrite_bindE[OF _ monadic_rewrite_refl hoare_vcg_propE_R]) + lemmas monadic_rewrite_bind_tail = monadic_rewrite_bind[OF monadic_rewrite_refl, simplified pred_top_left_neutral] -lemmas monadic_rewrite_bind_head - = monadic_rewrite_bind[OF _ monadic_rewrite_refl hoare_vcg_prop, simplified pred_top_right_neutral] - lemmas monadic_rewrite_bindE_tail = monadic_rewrite_bindE[OF monadic_rewrite_refl, simplified pred_top_left_neutral] -lemmas monadic_rewrite_bindE_head - = monadic_rewrite_bindE[OF _ monadic_rewrite_refl hoare_vcg_propE_R] - (* Same as monadic_rewrite_bind, but prove hoare triple over head of LHS instead of RHS. *) lemma monadic_rewrite_bind_l: "\ monadic_rewrite F E P f g; \x. monadic_rewrite F E (Q x) (h x) (j x); \R\ f \Q\ \ @@ -194,6 +199,36 @@ lemma monadic_rewrite_do_flip: apply (simp add: bind_assoc) done +text \control of lambda abstractions, bound variables and eta form\ + +(* Preserving bound names while iterating using bind*_tail-style rules is more complicated than + for a head-style binding: + we need an eta on the non-schematic side, and must not have an eta on the schematic side, + otherwise unification can't pick a side for name preservation automatically. + It therefore appears a generic name-preserving tail rule is not possible. + The following rules can eliminate an eta from either the LHS or RHS of a monadic_rewrite, + e.g. monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_r] will remove the RHS eta *) + +lemma monadic_rewrite_bind_eta_r: + "monadic_rewrite F E P f (do x <- g; h x od) + \ monadic_rewrite F E P f (g >>= h)" + by simp + +lemma monadic_rewrite_bind_eta_l: + "monadic_rewrite F E P (do x <- f; h x od) g + \ monadic_rewrite F E P (f >>= h) g" + by simp + +lemma monadic_rewrite_bindE_eta_r: + "monadic_rewrite F E P f (doE x <- g; h x odE) + \ monadic_rewrite F E P f (g >>=E h)" + by simp + +lemma monadic_rewrite_bindE_eta_l: + "monadic_rewrite F E P (doE x <- f; h x odE) g + \ monadic_rewrite F E P (f >>=E h) g" + by simp + text \catch\ lemma monadic_rewrite_catch: @@ -618,6 +653,13 @@ lemma monadic_rewrite_gets_the_gets: apply (auto simp: simpler_gets_def return_def) done +lemma gets_oapply_liftM_rewrite: + "monadic_rewrite False True (\s. f s p \ None) + (gets (oapply p \ f)) (liftM Some (gets_map f p))" + unfolding monadic_rewrite_def + by (simp add: liftM_def simpler_gets_def bind_def gets_map_def assert_opt_def return_def + split: option.splits) + text \Option cases\ lemma monadic_rewrite_case_option: @@ -710,17 +752,27 @@ lemmas corres_gets_the_bind text \Tool integration\ lemma wpc_helper_monadic_rewrite: - "monadic_rewrite F E Q' m m' - \ wpc_helper (P, P') (Q, {s. Q' s}) (monadic_rewrite F E (\s. s \ P') m m')" + "monadic_rewrite F E Q m m' + \ wpc_helper (P, P', P'') (Q, Q', Q'') (monadic_rewrite F E P m m')" by (auto simp: wpc_helper_def elim!: monadic_rewrite_guard_imp) wpc_setup "\m. monadic_rewrite F E Q' m m'" wpc_helper_monadic_rewrite wpc_setup "\m. monadic_rewrite F E Q' (m >>= c) m'" wpc_helper_monadic_rewrite +wpc_setup "\m. monadic_rewrite F E Q' (m >>=E c) m'" wpc_helper_monadic_rewrite text \Tactics\ -method monadic_rewrite_step = - determ \rule monadic_rewrite_bind_tail monadic_rewrite_bindE_tail\ +named_theorems monadic_rewrite_pre +declare monadic_rewrite_guard_imp[monadic_rewrite_pre] +method monadic_rewrite_pre = (WP_Pre.pre_tac monadic_rewrite_pre)? + +lemmas monadic_rewrite_step_l = + monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_r] + monadic_rewrite_bindE_tail[THEN monadic_rewrite_bindE_eta_r] + +lemmas monadic_rewrite_step_r = + monadic_rewrite_bind_tail[THEN monadic_rewrite_bind_eta_l] + monadic_rewrite_bindE_tail[THEN monadic_rewrite_bindE_eta_l] method monadic_rewrite_solve_head methods m = (rule monadic_rewrite_bind_head monadic_rewrite_bindE_head)?, @@ -756,15 +808,15 @@ method monadic_rewrite_single_pass methods start step action finalise = (* Step over LHS until action applies, then finalise. *) method monadic_rewrite_l_method methods action finalise = - monadic_rewrite_single_pass \wp_pre, rule monadic_rewrite_trans\ - monadic_rewrite_step + monadic_rewrite_single_pass \monadic_rewrite_pre, rule monadic_rewrite_trans\ + \determ \rule monadic_rewrite_step_l\\ action finalise (* Step over RHS until action applies, then finalise. *) method monadic_rewrite_r_method methods action finalise = - monadic_rewrite_single_pass \wp_pre, rule monadic_rewrite_trans[rotated]\ - monadic_rewrite_step + monadic_rewrite_single_pass \monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]\ + \determ \rule monadic_rewrite_step_r\\ action finalise @@ -783,7 +835,7 @@ method monadic_rewrite_symb_exec_resolutions methods m = conditions should be solvable by wpsimp, but the _m versions allow specifying a method or wpsimp options. *) method monadic_rewrite_symb_exec methods r m = - (wp_pre, no_name_eta, r; (monadic_rewrite_symb_exec_resolutions m)?) + (monadic_rewrite_pre, no_name_eta, r; (monadic_rewrite_symb_exec_resolutions m)?) ML \ structure Monadic_Rewrite = struct diff --git a/lib/Monads/Fun_Pred_Syntax.thy b/lib/Monads/Fun_Pred_Syntax.thy index 6675f35f29..691b929717 100644 --- a/lib/Monads/Fun_Pred_Syntax.thy +++ b/lib/Monads/Fun_Pred_Syntax.thy @@ -173,6 +173,17 @@ lemmas pred_neg_bot_eq[simp] = entirely in the future *) +subsection "Simplification Rules for Lifted And/Or" + +lemma bipred_disj_op_eq[simp]: + "reflp R \ ((=) or R) = R" + "reflp R \ (R or (=)) = R" + by (auto simp: reflp_def) + +lemma bipred_le_true[simp]: "R \ \\" + by clarsimp + + section \Examples\ experiment diff --git a/lib/Monads/Less_Monad_Syntax.thy b/lib/Monads/Less_Monad_Syntax.thy index 28464acfd4..17c4c093d6 100644 --- a/lib/Monads/Less_Monad_Syntax.thy +++ b/lib/Monads/Less_Monad_Syntax.thy @@ -12,7 +12,7 @@ begin no_syntax "_thenM" :: "['a, 'b] \ 'c" (infixl ">>" 54) -(* remove input version of >>= from Monad_Syntax, avoid clash with NonDetMonad *) +(* remove input version of >>= from Monad_Syntax, avoid clash with Nondet_Monad *) no_notation Monad_Syntax.bind (infixl ">>=" 54) diff --git a/lib/Monads/Monad_Lib.thy b/lib/Monads/Monad_Lib.thy index 7ea3876f40..d1dc62120a 100644 --- a/lib/Monads/Monad_Lib.thy +++ b/lib/Monads/Monad_Lib.thy @@ -5,7 +5,7 @@ *) (* This theory collects the minimum constant definitions and lemmas for the monad definition - theories (NonDetMonad, TraceMonad etc). Only things that are necessary for these and needed + theories (Nondet_Monad, Trace_Monad etc). Only things that are necessary for these and needed by more than one of them should go in here. *) theory Monad_Lib @@ -69,4 +69,8 @@ lemma sum_all_ex[simp]: "(\a. x \ Inr a) = (\a. x = Inl a)" by (metis Inr_not_Inl sum.exhaust)+ +lemma context_disjE: + "\P \ Q; P \ R; \\P; Q\ \ R\ \ R" + by auto + end \ No newline at end of file diff --git a/lib/Monads/README.md b/lib/Monads/README.md index 819aafe341..28d00f00e1 100644 --- a/lib/Monads/README.md +++ b/lib/Monads/README.md @@ -33,7 +33,7 @@ The directory [`wp/`](./wp/) contains proof methods to reason about these monads in weakest-precondition style. [l4v]: https://github.com/seL4/l4v/ -[nondet]: ./NonDetMonad.thy -[option]: ./OptionMonad.thy -[trace]: ./TraceMonad.thy +[nondet]: ./nondet/Nondet_Monad.thy +[option]: ./reader_option/Reader_Option_Monad.thy +[trace]: ./trace/Trace_Monad.thy [AutoCorres]: ../../tools/autocorres/ \ No newline at end of file diff --git a/lib/Monads/ROOT b/lib/Monads/ROOT index d9c28f70b5..5fe32ad49e 100644 --- a/lib/Monads/ROOT +++ b/lib/Monads/ROOT @@ -16,35 +16,39 @@ session Monads (lib) = HOL + directories wp + nondet + reader_option + trace theories - WhileLoopRules - TraceMonad - OptionMonadND - OptionMonadWP - Strengthen_Demo - TraceMonadLemmas + WPBang + WPFix + Eisbach_WP + WPI + WPC + WP_Pre + WP Datatype_Schematic - WhileLoopRulesCompleteness + Nondet_Monad + Nondet_Lemmas + Nondet_VCG + Nondet_More_VCG + Nondet_In_Monad + Nondet_Sat + Nondet_Det + Nondet_No_Fail + Nondet_No_Throw + Nondet_Empty_Fail + Nondet_Monad_Equations + Nondet_While_Loop_Rules + Nondet_While_Loop_Rules_Completeness + Reader_Option_Monad + Reader_Option_ND + Reader_Option_VCG + Trace_Monad + Trace_Lemmas + Trace_VCG + Trace_Det Strengthen - Strengthen_Setup - OptionMonad - TraceMonadVCG - In_Monad - NonDetMonadVCG - NonDetMonad_Sat - More_NonDetMonadVCG - NonDetMonad - NonDetMonadLemmas - Det - No_Fail - No_Throw - Empty_Fail - Monad_Equations - "wp/WPBang" - "wp/WPFix" - "wp/Eisbach_WP" - "wp/WPI" - "wp/WPC" - "wp/WP_Pre" - "wp/WP" + Nondet_Strengthen_Setup + Strengthen_Demo diff --git a/lib/Monads/Strengthen_Setup.thy b/lib/Monads/Strengthen_Setup.thy deleted file mode 100644 index d06feba55f..0000000000 --- a/lib/Monads/Strengthen_Setup.thy +++ /dev/null @@ -1,83 +0,0 @@ -(* - * Copyright 2023, Proofcraft Pty Ltd - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) - -theory Strengthen_Setup - imports - Strengthen - No_Fail - NonDetMonadVCG -begin - -section \Strengthen setup.\ - -context strengthen_implementation begin - -lemma strengthen_hoare [strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\) (\P\ f \R\)" - by (cases F, auto elim: hoare_strengthen_post) - -lemma strengthen_validE_R_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" - by (cases F, auto intro: hoare_post_imp_R) - -lemma strengthen_validE_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" - by (cases F, auto elim: hoare_post_impErr) - -lemma strengthen_validE_E_cong[strg]: - "(\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" - by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) - -lemma wpfix_strengthen_hoare: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (Q' r s)) - \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" - by (cases F, auto elim: hoare_chain) - -lemma wpfix_strengthen_validE_R_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (Q' r s)) - \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" - by (cases F, auto elim: hoare_chainE simp: validE_R_def) - -lemma wpfix_strengthen_validE_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" - by (cases F, auto elim: hoare_chainE) - -lemma wpfix_strengthen_validE_E_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" - by (cases F, auto elim: hoare_chainE simp: validE_E_def) - -lemma wpfix_no_fail_cong: - "(\s. st (\ F) (\) (P s) (P' s)) - \ st F (\) (no_fail P f) (no_fail P' f)" - by (cases F, auto elim: no_fail_pre) - -lemmas nondet_wpfix_strgs = - wpfix_strengthen_validE_R_cong - wpfix_strengthen_validE_E_cong - wpfix_strengthen_validE_cong - wpfix_strengthen_hoare - wpfix_no_fail_cong - -end - -lemmas nondet_wpfix_strgs[wp_fix_strgs] - = strengthen_implementation.nondet_wpfix_strgs - - -end \ No newline at end of file diff --git a/lib/Monads/TraceMonad.thy b/lib/Monads/TraceMonad.thy deleted file mode 100644 index 891d17ebec..0000000000 --- a/lib/Monads/TraceMonad.thy +++ /dev/null @@ -1,1154 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonad -imports - Monad_Lib - Strengthen -begin - -text \ -The ``Interference Trace Monad''. This nondeterministic monad -records the state at every interference point, permitting -nondeterministic interference by the environment at interference -points. - -The trace set initially includes all possible environment behaviours. -Trace steps are tagged as environment or self actions, and can then -be constrained to a smaller set where the environment acts according -to a rely constraint (i.e. rely-guarantee reasoning), or to set the -environment actions to be the self actions of another program (parallel -composition). -\ - -section "The Trace Monad" - -text \Trace monad identifier. Me corresponds to the current thread running and Env to the environment.\ -datatype tmid = Me | Env - -text \Results associated with traces. Traces may correspond to incomplete, failed, or completed executions.\ -datatype ('s, 'a) tmres = Failed | Incomplete | Result "('a \ 's)" - -abbreviation - map_tmres_rv :: "('a \ 'b) \ ('s, 'a) tmres \ ('s, 'b) tmres" -where - "map_tmres_rv f \ map_tmres id f" - -section "The Monad" - -text \ tmonad returns a set of non-deterministic computations, including - a trace as a list of "thread identifier" \ state, and an optional - pair result, state when the computation did not fail. \ -type_synonym ('s, 'a) tmonad = "'s \ ((tmid \ 's) list \ ('s, 'a) tmres) set" - -text \Returns monad results, ignoring failures and traces.\ -definition - mres :: "((tmid \ 's) list \ ('s, 'a) tmres) set \ ('a \ 's) set" -where - "mres r = Result -` (snd ` r)" - -text \ - The definition of fundamental monad functions @{text return} and - @{text bind}. The monad function @{text "return x"} does not change - the state, does not fail, and returns @{text "x"}. -\ -definition - return :: "'a \ ('s,'a) tmonad" -where - "return a \ \s. ({([], Result (a, s))})" - -text \ - The monad function @{text "bind f g"}, also written @{text "f >>= g"}, - is the execution of @{term f} followed by the execution of @{text g}. - The function @{text g} takes the result value \emph{and} the result - state of @{text f} as parameter. The definition says that the result of - the combined operation is the union of the set of sets that is created - by @{text g} applied to the result sets of @{text f}. The combined - operation may have failed, if @{text f} may have failed or @{text g} may - have failed on any of the results of @{text f}. -\ - -abbreviation (input) - fst_upd :: "('a \ 'c) \ 'a \ 'b \ 'c \ 'b" -where - "fst_upd f \ \(a,b). (f a, b)" - -abbreviation (input) - snd_upd :: "('b \ 'c) \ 'a \ 'b \ 'a \ 'c" -where - "snd_upd f \ \(a,b). (a, f b)" - -definition - bind :: "('s, 'a) tmonad \ ('a \ ('s, 'b) tmonad) \ - ('s, 'b) tmonad" (infixl ">>=" 60) -where - "bind f g \ \s. \(xs, r) \ (f s). case r of Failed \ {(xs, Failed)} - | Incomplete \ {(xs, Incomplete)} - | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g rv s" - -text \Sometimes it is convenient to write @{text bind} in reverse order.\ -abbreviation(input) - bind_rev :: "('c \ ('a, 'b) tmonad) \ ('a, 'c) tmonad \ - ('a, 'b) tmonad" (infixl "=<<" 60) -where - "g =<< f \ f >>= g" - -text \ - The basic accessor functions of the state monad. @{text get} returns - the current state as result, does not fail, and does not change the state. - @{text "put s"} returns nothing (@{typ unit}), changes the current state - to @{text s} and does not fail. -\ -definition - get :: "('s,'s) tmonad" -where - "get \ \s. {([], Result (s, s))}" - -definition - put :: "'s \ ('s, unit) tmonad" -where - "put s \ \st. {([], Result ((), s))}" - -definition - put_trace_elem :: "(tmid \ 's) \ ('s, unit) tmonad" -where - "put_trace_elem x = (\s. {([], Incomplete), ([x], Result ((), s))})" - -primrec - put_trace :: "(tmid \ 's) list \ ('s, unit) tmonad" -where - "put_trace [] = return ()" - | "put_trace (x # xs) = (put_trace xs >>= (\_. put_trace_elem x))" - -subsection "Nondeterminism" - -text \ - Basic nondeterministic functions. @{text "select A"} chooses an element - of the set @{text A}, does not change the state, and does not fail - (even if the set is empty). @{text "f \ g"} executes @{text f} or - executes @{text g}. It retuns the union of results of @{text f} and - @{text g}, and may have failed if either may have failed. -\ -definition - select :: "'a set \ ('s, 'a) tmonad" -where - (* Should we have Failed when A = {} ? *) - "select A \ \s. (Pair [] ` Result ` (A \ {s}))" - -definition - alternative :: "('s,'a) tmonad \ ('s,'a) tmonad \ - ('s,'a) tmonad" - (infixl "\" 20) -where - "f \ g \ \s. (f s \ g s)" - - -text \ The @{text selet_f} function was left out here until we figure - out what variant we actually need. -\ - -subsection "Failure" - -text \ The monad function that always fails. Returns an empty set of -results and sets the failure flag. \ -definition - fail :: "('s, 'a) tmonad" -where - "fail \ \s. {([], Failed)}" - -text \ Assertions: fail if the property @{text P} is not true \ -definition - assert :: "bool \ ('a, unit) tmonad" -where - "assert P \ if P then return () else fail" - -text \ Fail if the value is @{const None}, - return result @{text v} for @{term "Some v"} \ -definition - assert_opt :: "'a option \ ('b, 'a) tmonad" -where - "assert_opt v \ case v of None \ fail | Some v \ return v" - -text \ An assertion that also can introspect the current state. \ - -definition - state_assert :: "('s \ bool) \ ('s, unit) tmonad" -where - "state_assert P \ get >>= (\s. assert (P s))" - -subsection "Generic functions on top of the state monad" - -text \ Apply a function to the current state and return the result -without changing the state. \ -definition - gets :: "('s \ 'a) \ ('s, 'a) tmonad" -where - "gets f \ get >>= (\s. return (f s))" - -text \ Modify the current state using the function passed in. \ -definition - modify :: "('s \ 's) \ ('s, unit) tmonad" -where - "modify f \ get >>= (\s. put (f s))" - -lemma simpler_gets_def: "gets f = (\s. {([], Result ((f s), s))})" - by (simp add: fun_eq_iff gets_def return_def bind_def get_def split_def) - -lemma simpler_modify_def: - "modify f = (\s. {([], Result ((),(f s)))})" - by (simp add: fun_eq_iff modify_def bind_def get_def put_def split_def) - -text \ Execute the given monad when the condition is true, - return @{text "()"} otherwise. \ -definition - "when" :: "bool \ ('s, unit) tmonad \ - ('s, unit) tmonad" -where - "when P m \ if P then m else return ()" - -text \ Execute the given monad unless the condition is true, - return @{text "()"} otherwise. \ -definition - unless :: "bool \ ('s, unit) tmonad \ - ('s, unit) tmonad" -where - "unless P m \ when (\P) m" - -text \ - Perform a test on the current state, performing the left monad if - the result is true or the right monad if the result is false. -\ -definition - condition :: "('s \ bool) \ ('s, 'r) tmonad \ ('s, 'r) tmonad \ ('s, 'r) tmonad" -where - "condition P L R \ \s. if (P s) then (L s) else (R s)" - -notation (output) - condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) - -text \ -Apply an option valued function to the current state, fail -if it returns @{const None}, return @{text v} if it returns -@{term "Some v"}. -\ -definition - gets_the :: "('s \ 'a option) \ ('s, 'a) tmonad" -where - "gets_the f \ gets f >>= assert_opt" - - -subsection \ The Monad Laws \ - -text \An alternative definition of bind, sometimes more convenient.\ -lemma bind_def2: - "bind f g \ (\s. ((\xs. (xs, Failed)) ` {xs. (xs, Failed) \ f s}) - \ ((\xs. (xs, Incomplete)) ` {xs. (xs, Incomplete) \ f s}) - \ (\(xs, rv, s) \ {(xs, rv, s'). (xs, Result (rv, s')) \ f s}. fst_upd (\ys. ys @ xs) ` g rv s))" - apply (clarsimp simp add: bind_def fun_eq_iff - Un_Union_image split_def - intro!: eq_reflection) - apply (auto split: tmres.splits elim!:rev_bexI[where A="f x" for x]) - apply (fastforce intro: image_eqI[rotated]) - done - -lemma elem_bindE: - "(tr, res) \ bind f (\x. g x) s - \ ((res = Incomplete | res = Failed) \ (tr, map_tmres undefined undefined res) \ f s \ P) - \ (\tr' tr'' x s'. (tr', Result (x, s')) \ f s \ (tr'', res) \ g x s' - \ tr = tr'' @ tr' \ P) - \ P" - by (auto simp: bind_def2) - -text \ Each monad satisfies at least the following three laws. \ - -text \ @{term return} is absorbed at the left of a @{term bind}, - applying the return value directly: \ - -declare map_option.identity[simp] - -lemma return_bind [simp]: "(return x >>= f) = f x" - by (auto simp add: return_def bind_def split_def split:if_splits) - -text \ @{term return} is absorbed on the right of a @{term bind} \ -lemma bind_return [simp]: "(m >>= return) = m" - by (auto simp add: fun_eq_iff bind_def return_def - split: tmres.splits) - -text \ @{term bind} is associative \ -lemma bind_assoc: - fixes m :: "('a,'b) tmonad" - fixes f :: "'b \ ('a,'c) tmonad" - fixes g :: "'c \ ('a,'d) tmonad" - shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" - apply (unfold bind_def Let_def split_def) - apply (rule ext) - apply clarsimp - apply (rule SUP_cong[OF refl], clarsimp) - apply (split tmres.split; intro conjI impI; clarsimp) - apply (simp add: image_Union) - apply (rule SUP_cong[OF refl], clarsimp) - apply (split tmres.split; intro conjI impI; clarsimp) - apply (simp add: image_image) - done - -section \ Adding Exceptions \ - -text \ - The type @{typ "('s,'a) tmonad"} gives us nondeterminism and - failure. We now extend this monad with exceptional return values - that abort normal execution, but can be handled explicitly. - We use the sum type to indicate exceptions. - - In @{typ "('s, 'e + 'a) tmonad"}, @{typ "'s"} is the state, - @{typ 'e} is an exception, and @{typ 'a} is a normal return value. - - This new type itself forms a monad again. Since type classes in - Isabelle are not powerful enough to express the class of monads, - we provide new names for the @{term return} and @{term bind} functions - in this monad. We call them @{text returnOk} (for normal return values) - and @{text bindE} (for composition). We also define @{text throwError} - to return an exceptional value. -\ -definition - returnOk :: "'a \ ('s, 'e + 'a) tmonad" -where - "returnOk \ return o Inr" - -definition - throwError :: "'e \ ('s, 'e + 'a) tmonad" -where - "throwError \ return o Inl" - -text \ - Lifting a function over the exception type: if the input is an - exception, return that exception; otherwise continue execution. -\ -definition - lift :: "('a \ ('s, 'e + 'b) tmonad) \ - 'e +'a \ ('s, 'e + 'b) tmonad" -where - "lift f v \ case v of Inl e \ throwError e - | Inr v' \ f v'" - -text \ - The definition of @{term bind} in the exception monad (new - name @{text bindE}): the same as normal @{term bind}, but - the right-hand side is skipped if the left-hand side - produced an exception. -\ -definition - bindE :: "('s, 'e + 'a) tmonad \ - ('a \ ('s, 'e + 'b) tmonad) \ - ('s, 'e + 'b) tmonad" (infixl ">>=E" 60) -where - "bindE f g \ bind f (lift g)" - - -text \ - Lifting a normal nondeterministic monad into the - exception monad is achieved by always returning its - result as normal result and never throwing an exception. -\ -definition - liftE :: "('s,'a) tmonad \ ('s, 'e+'a) tmonad" -where - "liftE f \ f >>= (\r. return (Inr r))" - - -text \ - Since the underlying type and @{text return} function changed, - we need new definitions for when and unless: -\ -definition - whenE :: "bool \ ('s, 'e + unit) tmonad \ - ('s, 'e + unit) tmonad" -where - "whenE P f \ if P then f else returnOk ()" - -definition - unlessE :: "bool \ ('s, 'e + unit) tmonad \ - ('s, 'e + unit) tmonad" -where - "unlessE P f \ if P then returnOk () else f" - - -text \ - Throwing an exception when the parameter is @{term None}, otherwise - returning @{term "v"} for @{term "Some v"}. -\ -definition - throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) tmonad" -where - "throw_opt ex x \ - case x of None \ throwError ex | Some v \ returnOk v" - - -text \ - Failure in the exception monad is redefined in the same way - as @{const whenE} and @{const unlessE}, with @{term returnOk} - instead of @{term return}. -\ -definition - assertE :: "bool \ ('a, 'e + unit) tmonad" -where - "assertE P \ if P then returnOk () else fail" - -subsection "Monad Laws for the Exception Monad" - -text \ More direct definition of @{const liftE}: \ -lemma liftE_def2: - "liftE f = (\s. snd_upd (map_tmres_rv Inr) ` (f s))" - apply (clarsimp simp: fun_eq_iff liftE_def return_def split_def bind_def image_def) - apply (rule set_eqI) - apply (rule iffI) - apply clarsimp - apply (erule rev_bexI[where A="f s" for s]) - apply (clarsimp split: tmres.splits) - apply clarsimp - apply (rule exI) - apply (rule conjI) - apply (erule rev_bexI[where A="f s" for s]) - apply (rule refl) - apply (clarsimp split: tmres.splits) - done - -text \ Left @{const returnOk} absorbtion over @{term bindE}: \ -lemma returnOk_bindE [simp]: "(returnOk x >>=E f) = f x" - apply (unfold bindE_def returnOk_def) - apply (clarsimp simp: lift_def) - done - -lemma lift_return [simp]: - "lift (return \ Inr) = return" - by (simp add: fun_eq_iff lift_def throwError_def split: sum.splits) - -text \ Right @{const returnOk} absorbtion over @{term bindE}: \ -lemma bindE_returnOk [simp]: "(m >>=E returnOk) = m" - by (simp add: bindE_def returnOk_def) - -text \ Associativity of @{const bindE}: \ -lemma bindE_assoc: - "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" - apply (simp add: bindE_def bind_assoc) - apply (rule arg_cong [where f="\x. m >>= x"]) - apply (rule ext) - apply (case_tac x, simp_all add: lift_def throwError_def) - done - -text \ @{const returnOk} could also be defined via @{const liftE}: \ -lemma returnOk_liftE: - "returnOk x = liftE (return x)" - by (simp add: liftE_def returnOk_def) - -text \ Execution after throwing an exception is skipped: \ -lemma throwError_bindE [simp]: - "(throwError E >>=E f) = throwError E" - by (simp add: fun_eq_iff bindE_def bind_def throwError_def lift_def return_def split_def) - - -section "Syntax" - -text \ This section defines traditional Haskell-like do-syntax - for the state monad in Isabelle. \ - -subsection "Syntax for the Nondeterministic State Monad" - -text \ We use @{text K_bind} to syntactically indicate the - case where the return argument of the left side of a @{term bind} - is ignored \ -definition - K_bind_def [iff]: "K_bind \ \x y. x" - -nonterminal - dobinds and dobind and nobind - -syntax - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) - "" :: "dobind => dobinds" ("_") - "_nobind" :: "'a => dobind" ("_") - "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") - - "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) -syntax (xsymbols) - "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) - -translations - "_do (_dobinds b bs) e" == "_do b (_do bs e)" - "_do (_nobind b) e" == "b >>= (CONST K_bind e)" - "do x <- a; e od" == "a >>= (\x. e)" - -text \ Syntax examples: \ -lemma "do x \ return 1; - return (2::nat); - return x - od = - return 1 >>= - (\x. return (2::nat) >>= - K_bind (return x))" - by (rule refl) - -lemma "do x \ return 1; - return 2; - return x - od = return 1" - by simp - -subsection "Interference command" - -text \Interference commands must be inserted in between actions that can be interfered with commands -running in other threads. \ - -definition - last_st_tr :: "(tmid * 's) list \ 's \ 's" -where - "last_st_tr tr s0 = (hd (map snd tr @ [s0]))" - -definition - env_steps :: "('s,unit) tmonad" -where - "env_steps \ - do - s \ get; - \ \Add unfiltered environment events to the trace\ - xs \ select UNIV; - tr \ return (map (Pair Env) xs); - put_trace tr; - \ \Pick the last event of the trace as the final state\ - put (last_st_tr tr s) - od" - -definition - commit_step :: "('s, unit) tmonad" -where - "commit_step \ - do - s \ get; - put_trace [(Me,s)] - od" - -definition - interference :: "('s,unit) tmonad" -where - "interference \ - do - commit_step; - env_steps - od" - -subsection "Syntax for the Exception Monad" - -text \ - Since the exception monad is a different type, we - need to syntactically distinguish it in the syntax. - We use @{text doE}/@{text odE} for this, but can re-use - most of the productions from @{text do}/@{text od} - above. -\ - -syntax - "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) - -translations - "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" - "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" - "doE x <- a; e odE" == "a >>=E (\x. e)" - -text \ Syntax examples: \ -lemma "doE x \ returnOk 1; - returnOk (2::nat); - returnOk x - odE = - returnOk 1 >>=E - (\x. returnOk (2::nat) >>=E - K_bind (returnOk x))" - by (rule refl) - -lemma "doE x \ returnOk 1; - returnOk 2; - returnOk x - odE = returnOk 1" - by simp - - - -section "Library of additional Monadic Functions and Combinators" - - -text \ Lifting a normal function into the monad type: \ -definition - liftM :: "('a \ 'b) \ ('s,'a) tmonad \ ('s, 'b) tmonad" -where - "liftM f m \ do x \ m; return (f x) od" - -text \ The same for the exception monad: \ -definition - liftME :: "('a \ 'b) \ ('s,'e+'a) tmonad \ ('s,'e+'b) tmonad" -where - "liftME f m \ doE x \ m; returnOk (f x) odE" - -text \ Run a sequence of monads from left to right, ignoring return values. \ -definition - sequence_x :: "('s, 'a) tmonad list \ ('s, unit) tmonad" -where - "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" - -text \ - Map a monadic function over a list by applying it to each element - of the list from left to right, ignoring return values. -\ -definition - mapM_x :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, unit) tmonad" -where - "mapM_x f xs \ sequence_x (map f xs)" - -text \ - Map a monadic function with two parameters over two lists, - going through both lists simultaneously, left to right, ignoring - return values. -\ -definition - zipWithM_x :: "('a \ 'b \ ('s,'c) tmonad) \ - 'a list \ 'b list \ ('s, unit) tmonad" -where - "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" - - -text \ The same three functions as above, but returning a list of -return values instead of @{text unit} \ -definition - sequence :: "('s, 'a) tmonad list \ ('s, 'a list) tmonad" -where - "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) - in foldr mcons xs (return [])" - -definition - mapM :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, 'b list) tmonad" -where - "mapM f xs \ sequence (map f xs)" - -definition - zipWithM :: "('a \ 'b \ ('s,'c) tmonad) \ - 'a list \ 'b list \ ('s, 'c list) tmonad" -where - "zipWithM f xs ys \ sequence (zipWith f xs ys)" - -definition - foldM :: "('b \ 'a \ ('s, 'a) tmonad) \ 'b list \ 'a \ ('s, 'a) tmonad" -where - "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " - -definition - foldME ::"('b \ 'a \ ('s,('e + 'b)) tmonad) \ 'b \ 'a list \ ('s, ('e + 'b)) tmonad" -where "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" - -text \ The sequence and map functions above for the exception monad, -with and without lists of return value \ -definition - sequenceE_x :: "('s, 'e+'a) tmonad list \ ('s, 'e+unit) tmonad" -where - "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" - -definition - mapME_x :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ - ('s,'e+unit) tmonad" -where - "mapME_x f xs \ sequenceE_x (map f xs)" - -definition - sequenceE :: "('s, 'e+'a) tmonad list \ ('s, 'e+'a list) tmonad" -where - "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) - in foldr mcons xs (returnOk [])" - -definition - mapME :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ - ('s,'e+'b list) tmonad" -where - "mapME f xs \ sequenceE (map f xs)" - - -text \ Filtering a list using a monadic function as predicate: \ -primrec - filterM :: "('a \ ('s, bool) tmonad) \ 'a list \ ('s, 'a list) tmonad" -where - "filterM P [] = return []" -| "filterM P (x # xs) = do - b <- P x; - ys <- filterM P xs; - return (if b then (x # ys) else ys) - od" - -text \ @{text select_state} takes a relationship between - states, and outputs nondeterministically a state - related to the input state. \ - -definition - state_select :: "('s \ 's) set \ ('s, unit) tmonad" -where - "state_select r \ (do - s \ get; - S \ return {s'. (s, s') \ r}; - assert (S \ {}); - s' \ select S; - put s' - od)" -section "Catching and Handling Exceptions" - -text \ - Turning an exception monad into a normal state monad - by catching and handling any potential exceptions: -\ -definition - catch :: "('s, 'e + 'a) tmonad \ - ('e \ ('s, 'a) tmonad) \ - ('s, 'a) tmonad" (infix "" 10) -where - "f handler \ - do x \ f; - case x of - Inr b \ return b - | Inl e \ handler e - od" - -text \ - Handling exceptions, but staying in the exception monad. - The handler may throw a type of exceptions different from - the left side. -\ -definition - handleE' :: "('s, 'e1 + 'a) tmonad \ - ('e1 \ ('s, 'e2 + 'a) tmonad) \ - ('s, 'e2 + 'a) tmonad" (infix "" 10) -where - "f handler \ - do - v \ f; - case v of - Inl e \ handler e - | Inr v' \ return (Inr v') - od" - -text \ - A type restriction of the above that is used more commonly in - practice: the exception handle (potentially) throws exception - of the same type as the left-hand side. -\ -definition - handleE :: "('s, 'x + 'a) tmonad \ - ('x \ ('s, 'x + 'a) tmonad) \ - ('s, 'x + 'a) tmonad" (infix "" 10) -where - "handleE \ handleE'" - - -text \ - Handling exceptions, and additionally providing a continuation - if the left-hand side throws no exception: -\ -definition - handle_elseE :: "('s, 'e + 'a) tmonad \ - ('e \ ('s, 'ee + 'b) tmonad) \ - ('a \ ('s, 'ee + 'b) tmonad) \ - ('s, 'ee + 'b) tmonad" - ("_ _ _" 10) -where - "f handler continue \ - do v \ f; - case v of Inl e \ handler e - | Inr v' \ continue v' - od" - -subsection "Loops" - -text \ - Loops are handled using the following inductive predicate; - non-termination is represented using the failure flag of the - monad. -\ -inductive_set - whileLoop_results :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ (('r \ 's) \ ((tmid \ 's) list \ ('s, 'r) tmres)) set" - for C B -where - "\ \ C r s \ \ ((r, s), ([], Result (r, s))) \ whileLoop_results C B" - | "\ C r s; (ts, Failed) \ B r s \ \ ((r, s), (ts, Failed)) \ whileLoop_results C B" - | "\ C r s; (ts, Incomplete) \ B r s \ \ ((r, s), (ts, Incomplete)) \ whileLoop_results C B" - | "\ C r s; (ts, Result (r', s')) \ B r s; ((r', s'), (ts',z)) \ whileLoop_results C B \ - \ ((r, s), (ts'@ts,z)) \ whileLoop_results C B" - -inductive_cases whileLoop_results_cases_result_end: "((x,y), ([],Result r)) \ whileLoop_results C B" -inductive_cases whileLoop_results_cases_fail: "((x,y), (ts, Failed)) \ whileLoop_results C B" -inductive_cases whileLoop_results_cases_incomplete: "((x,y), (ts, Incomplete)) \ whileLoop_results C B" - -inductive_simps whileLoop_results_simps_valid: "((x,y), ([], Result z)) \ whileLoop_results C B" - -inductive - whileLoop_terminates :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ 's \ bool" - for C B -where - "\ C r s \ whileLoop_terminates C B r s" - | "\ C r s; \(r', s') \ Result -` snd ` (B r s). whileLoop_terminates C B r' s' \ - \ whileLoop_terminates C B r s" - - -inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" -inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" - -definition - whileLoop :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" -where - "whileLoop C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B})" - -notation (output) - whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) - -definition - whileLoopT :: "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" -where - "whileLoopT C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B - \ whileLoop_terminates C B r s})" - -notation (output) - whileLoopT ("(whileLoopT (_)// (_))" [1000, 1000] 1000) - -definition - whileLoopE :: "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) tmonad) - \ 'r \ ('s, ('e + 'r)) tmonad" -where - "whileLoopE C body \ - \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" - -notation (output) - whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) - -subsection "Await command" - -text \ @{term "Await c f"} blocks the execution until the @{term "c"} is true, - and atomically executes @{term "f"}. -\ - -definition - Await :: "('s \ bool) \ ('s,unit) tmonad" -where - "Await c \ - do - s \ get; - \ \Add unfiltered environment events, with the last one - satisfying the `c' state predicate\ - xs \ select {xs. c (last_st_tr (map (Pair Env) xs) s)}; - tr \ return (map (Pair Env) xs); - put_trace tr; - \ \Pick the last event of the trace\ - put (last_st_tr tr s) - od" - -section "Hoare Logic" - -subsection "Validity" - -text \ This section defines a Hoare logic for partial correctness for - the nondeterministic state monad as well as the exception monad. - The logic talks only about the behaviour part of the monad and ignores - the failure flag. - - The logic is defined semantically. Rules work directly on the - validity predicate. - - In the nondeterministic state monad, validity is a triple of precondition, - monad, and postcondition. The precondition is a function from state to - bool (a state predicate), the postcondition is a function from return value - to state to bool. A triple is valid if for all states that satisfy the - precondition, all result values and result states that are returned by - the monad satisfy the postcondition. Note that if the computation returns - the empty set, the triple is trivially valid. This means @{term "assert P"} - does not require us to prove that @{term P} holds, but rather allows us - to assume @{term P}! Proving non-failure is done via separate predicate and - calculus (see below). -\ - - -definition - valid :: "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\") -where - "\P\ f \Q\ \ \s. P s \ (\(r,s') \ mres (f s). Q r s')" - -text \ - We often reason about invariant predicates. The following provides shorthand syntax - that avoids repeating potentially long predicates. -\ -abbreviation (input) - invariant :: "('s,'a) tmonad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) -where - "invariant f P \ \P\ f \\_. P\" - -text \rg_pred type: Rely-Guaranty predicates (state before => state after => bool)\ -type_synonym 's rg_pred = "'s \ 's \ bool" - - -text \ - Validity for the exception monad is similar and build on the standard - validity above. Instead of one postcondition, we have two: one for - normal and one for exceptional results. -\ -definition - validE :: "('s \ bool) \ ('s, 'a + 'b) tmonad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" -("\_\/ _ /(\_\,/ \_\)" ) -where - "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" -(* -text \ Validity for exception monad with interferences. Not as easy to phrase - as we need to \ -definition - validIE :: "('s, 'a + 'b) tmonad \ - 's rg_pred \ - 's rg_pred \ 's rg_pred \ - ('b \ 's rg_pred) \ - ('a \ 's rg_pred) \ bool" - ("_ //PRE _//RELY _//GUAR _//POST _//EXC _" [59,0,0,0,0,0] 60) -where - "validIE f P R G Q E \ f SAT [P,R,G,\v. case v of Inr r \ Q r | Inl e \ E e]" - -abbreviation (input) - validIEsat :: "('s, 'a + 'b) tmonad \ - 's rg_pred \ - 's rg_pred \ 's rg_pred \ - ('b \ 's rg_pred) \ - ('a \ 's rg_pred) \ bool" - ("_ //SAT [_, _, _, _, _]" [59,0,0,0,0,0] 60) - where - "validIEsat f P R G Q E \ validIE f P R G Q E" - *) -text \ - The following two instantiations are convenient to separate reasoning - for exceptional and normal case. -\ -definition - validE_R :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\, -") -where - "\P\ f \Q\,- \ validE P f Q (\x y. True)" - -definition - validE_E :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ - ('e \ 's \ bool) \ bool" - ("\_\/ _ /-, \_\") -where - "\P\ f -,\Q\ \ validE P f (\x y. True) Q" - - -text \ Abbreviations for trivial postconditions (taking three arguments): \ -abbreviation(input) - toptoptop :: "'a \ 'b \ 'b \ bool" ("\\\") -where - "\\\ \ \_ _ _. True" - -abbreviation(input) - botbotbot :: "'a \ 'b \ 'b \ bool" ("\\\") -where - "\\\ \ \_ _ _. False" - - -subsection "Determinism" - -text \ A monad of type @{text tmonad} is deterministic iff it -returns an empty trace, exactly one state and result and does not fail \ -definition - det :: "('a,'s) tmonad \ bool" -where - "det f \ \s. \r. f s = {([], Result r)}" - -text \ A deterministic @{text tmonad} can be turned - into a normal state monad: \ -definition - the_run_state :: "('s,'a) tmonad \ 's \ 'a \ 's" -where - "the_run_state M \ \s. THE s'. mres (M s) = {s'}" - - -subsection "Non-Failure" - -text \ - We can formulate non-failure separately from validity. -\ -definition - no_fail :: "('s \ bool) \ ('s,'a) tmonad \ bool" -where - "no_fail P m \ \s. P s \ Failed \ snd ` (m s)" - -text \ - It is often desired to prove non-failure and a Hoare triple - simultaneously, as the reasoning is often similar. The following - definitions allow such reasoning to take place. -\ - -definition - validNF ::"('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" - ("\_\/ _ /\_\!") -where - "validNF P f Q \ valid P f Q \ no_fail P f" - -definition - validE_NF :: "('s \ bool) \ ('s, 'a + 'b) tmonad \ - ('b \ 's \ bool) \ - ('a \ 's \ bool) \ bool" - ("\_\/ _ /(\_\,/ \_\!)") -where - "validE_NF P f Q E \ validE P f Q E \ no_fail P f" - -lemma validE_NF_alt_def: - "\ P \ B \ Q \,\ E \! = \ P \ B \ \v s. case v of Inl e \ E e s | Inr r \ Q r s \!" - by (clarsimp simp: validE_NF_def validE_def validNF_def) - -(* text \ - Usually, well-formed monads constructed from the primitives - above will have the following property: if they return an - empty set of results, they will have the failure flag set. -\ -definition - empty_fail :: "('s,'a) tmonad \ bool" -where - "empty_fail m \ \s. fst (m s) = {} \ snd (m s)" - -text \ - Useful in forcing otherwise unknown executions to have - the @{const empty_fail} property. -\ -definition - mk_ef :: "'a set \ bool \ 'a set \ bool" -where - "mk_ef S \ (fst S, fst S = {} \ snd S)" - *) -section "Basic exception reasoning" - -text \ - The following predicates @{text no_throw} and @{text no_return} allow - reasoning that functions in the exception monad either do - no throw an exception or never return normally. -\ - -definition "no_throw P A \ \ P \ A \ \_ _. True \,\ \_ _. False \" - -definition "no_return P A \ \ P \ A \\_ _. False\,\\_ _. True \" - -section "Trace monad Parallel" - -definition - parallel :: "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" -where - "parallel f g = (\s. {(xs, rv). \f_steps. length f_steps = length xs - \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps xs), rv) \ f s - \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \ g s})" - -abbreviation(input) - "parallel_mrg \ ((\((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)))" - -lemma parallel_def2: - "parallel f g = (\s. {(xs, rv). \ys zs. (ys, rv) \ f s \ (zs, rv) \ g s - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs - \ xs = map parallel_mrg (zip ys zs)})" - apply (simp add: parallel_def fun_eq_iff set_eq_iff) - apply safe - apply (rule exI, rule conjI, assumption)+ - apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff) - apply clarsimp - apply (rule_tac x="map (((\) Env) o fst) ys" in exI) - apply (simp add: zip_map1 o_def split_def) - apply (strengthen subst[where P="\xs. (xs, v) \ S" for v S, mk_strg I _ E]) - apply (clarsimp simp: list_all2_conv_all_nth list_eq_iff_nth_eq - split_def prod_eq_iff - split del: if_split cong: if_cong) - apply auto - done - -lemma parallel_def3: - "parallel f g = (\s. (\(ys, zs, rv). (map parallel_mrg (zip ys zs), rv)) - ` {(ys, zs, rv). (ys, rv) \ f s \ (zs, rv) \ g s - \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs})" - by (simp add: parallel_def2, rule ext, auto simp: image_def) - -primrec - trace_steps :: "(tmid \ 's) list \ 's \ (tmid \ 's \ 's) set" -where - "trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \ trace_steps trace (snd elem)" -| "trace_steps [] s0 = {}" - -lemma trace_steps_nth: - "trace_steps xs s0 = (\i. (fst (xs ! i), (if i = 0 then s0 else snd (xs ! (i - 1))), snd (xs ! i))) ` {..< length xs}" -proof (induct xs arbitrary: s0) - case Nil - show ?case by simp -next - case (Cons a xs) - show ?case - apply (simp add: lessThan_Suc_eq_insert_0 Cons image_image nth_Cons') - apply (intro arg_cong2[where f=insert] refl image_cong) - apply simp - done -qed - -definition - rely_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" -where - "rely_cond R s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Env \ R s0 s)" - -definition - guar_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" -where - "guar_cond G s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Me \ G s0 s)" - -lemma rg_empty_conds[simp]: - "rely_cond R s0s []" - "guar_cond G s0s []" - by (simp_all add: rely_cond_def guar_cond_def) - -definition - rely :: "('s, 'a) tmonad \ 's rg_pred \ 's \ ('s, 'a) tmonad" -where - "rely f R s0s \ (\s. f s \ ({tr. rely_cond R s0s tr} \ UNIV))" - -definition - prefix_closed :: "('s, 'a) tmonad \ bool" -where - "prefix_closed f = (\s. \x xs. (x # xs) \ fst ` f s \ (xs, Incomplete) \ f s)" - -definition - validI :: "('s \ 's \ bool) \ 's rg_pred \ ('s,'a) tmonad - \ 's rg_pred \ ('a \ 's \ 's \ bool) \ bool" - ("(\_\,/ \_\)/ _ /(\_\,/ \_\)") -where - "\P\,\R\ f \G\,\Q\ \ prefix_closed f \ (\s0 s. P s0 s - \ (\tr res. (tr, res) \ (rely f R s0 s) \ guar_cond G s0 tr - \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" - -lemma in_rely: - "\ (tr, res) \ f s; rely_cond R s0s tr \ \ (tr, res) \ rely f R s0s s" - by (simp add: rely_def) - -lemmas validI_D = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, - THEN conjunct2, rule_format, OF _ _ in_rely] -lemmas validI_GD = validI_D[THEN conjunct1] -lemmas validI_rvD = validI_D[THEN conjunct2, rule_format, rotated -1, OF refl] -lemmas validI_prefix_closed = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct1] -lemmas validI_prefix_closed_T = validI_prefix_closed[where P="\_ _. False" and R="\_ _. False" - and G="\_ _. True" and Q="\_ _ _. True"] - -lemmas prefix_closedD1 = prefix_closed_def[THEN iffD1, rule_format] - -lemma in_fst_snd_image_eq: - "x \ fst ` S = (\y. (x, y) \ S)" - "y \ snd ` S = (\x. (x, y) \ S)" - by (auto elim: image_eqI[rotated]) - -lemma in_fst_snd_image: - "(x, y) \ S \ x \ fst ` S" - "(x, y) \ S \ y \ snd ` S" - by (auto simp: in_fst_snd_image_eq) - -lemmas prefix_closedD = prefix_closedD1[OF _ in_fst_snd_image(1)] - -end diff --git a/lib/Monads/TraceMonadLemmas.thy b/lib/Monads/TraceMonadLemmas.thy deleted file mode 100644 index 45e4aedbeb..0000000000 --- a/lib/Monads/TraceMonadLemmas.thy +++ /dev/null @@ -1,379 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonadLemmas -imports TraceMonadVCG -begin - -lemma mapM_Cons: - "mapM f (x # xs) = do - y \ f x; - ys \ mapM f xs; - return (y # ys) - od" - and mapM_Nil: - "mapM f [] = return []" - by (simp_all add: mapM_def sequence_def) - -lemma mapM_x_Cons: - "mapM_x f (x # xs) = do - y \ f x; - mapM_x f xs - od" - and mapM_x_Nil: - "mapM_x f [] = return ()" - by (simp_all add: mapM_x_def sequence_x_def) - -lemma mapM_append: - "mapM f (xs @ ys) = (do - fxs \ mapM f xs; - fys \ mapM f ys; - return (fxs @ fys) - od)" - by (induct xs, simp_all add: mapM_Cons mapM_Nil bind_assoc) - -lemma mapM_x_append: - "mapM_x f (xs @ ys) = (do - x \ mapM_x f xs; - mapM_x f ys - od)" - by (induct xs, simp_all add: mapM_x_Cons mapM_x_Nil bind_assoc) - -lemma mapM_map: - "mapM f (map g xs) = mapM (f o g) xs" - by (induct xs; simp add: mapM_Nil mapM_Cons) - -lemma mapM_x_map: - "mapM_x f (map g xs) = mapM_x (f o g) xs" - by (induct xs; simp add: mapM_x_Nil mapM_x_Cons) - -primrec - repeat_n :: "nat \ ('s, unit) tmonad \ ('s, unit) tmonad" -where - "repeat_n 0 f = return ()" - | "repeat_n (Suc n) f = do f; repeat_n n f od" - -lemma repeat_n_mapM_x: - "repeat_n n f = mapM_x (\_. f) (replicate n ())" - by (induct n, simp_all add: mapM_x_Cons mapM_x_Nil) - -definition - repeat :: "('s, unit) tmonad \ ('s, unit) tmonad" -where - "repeat f = do n \ select UNIV; repeat_n n f od" - -definition - env_step :: "('s,unit) tmonad" -where - "env_step = - (do - s' \ select UNIV; - put_trace_elem (Env, s'); - put s' - od)" - -abbreviation - "env_n_steps n \ repeat_n n env_step" - -lemma elem_select_bind: - "(tr, res) \ (do x \ select S; f x od) s - = (\x \ S. (tr, res) \ f x s)" - by (simp add: bind_def select_def) - -lemma select_bind_UN: - "(do x \ select S; f x od) = (\s. \x \ S. f x s)" - by (rule ext, auto simp: elem_select_bind) - -lemma select_early: - "S \ {} - \ do x \ f; y \ select S; g x y od - = do y \ select S; x \ f; g x y od" - apply (simp add: bind_def select_def Sigma_def) - apply (rule ext) - apply (fastforce elim: rev_bexI image_eqI[rotated] split: tmres.split_asm) - done - -lemma repeat_n_choice: - "S \ {} - \ repeat_n n (do x \ select S; f x od) - = (do xs \ select {xs. set xs \ S \ length xs = n}; mapM_x f xs od)" - apply (induct n; simp?) - apply (simp add: select_def bind_def mapM_x_Nil cong: conj_cong) - apply (simp add: select_early bind_assoc) - apply (subst select_early) - apply simp - apply (auto intro: exI[where x="replicate n xs" for n xs])[1] - apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) - apply (simp only: conj_comms[where Q="length xs = n" for xs n]) - apply (simp only: ex_simps[symmetric] conj_assoc length_Suc_conv, simp) - apply (auto simp: mapM_x_Cons) - done - -lemma repeat_choice: - "S \ {} - \ repeat (do x \ select S; f x od) - = (do xs \ select {xs. set xs \ S}; mapM_x f xs od)" - apply (simp add: repeat_def repeat_n_choice) - apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) - done - -lemma put_trace_append: - "put_trace (xs @ ys) = do put_trace ys; put_trace xs od" - by (induct xs; simp add: bind_assoc) - -lemma put_trace_elem_put_comm: - "do y \ put_trace_elem x; put s od - = do y \ put s; put_trace_elem x od" - by (simp add: put_def put_trace_elem_def bind_def insert_commute) - -lemma put_trace_put_comm: - "do y \ put_trace xs; put s od - = do y \ put s; put_trace xs od" - apply (rule sym; induct xs; simp) - apply (simp add: bind_assoc put_trace_elem_put_comm) - apply (simp add: bind_assoc[symmetric]) - done - -lemma mapM_x_comm: - "(\x \ set xs. do y \ g; f x od = do y \ f x; g od) - \ do y \ g; mapM_x f xs od = do y \ mapM_x f xs; g od" - apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons) - apply (simp add: bind_assoc[symmetric], simp add: bind_assoc) - done - -lemma mapM_x_split: - "(\x \ set xs. \y \ set xs. do z \ g y; f x od = do (z :: unit) \ f x; g y od) - \ mapM_x (\x. do z \ f x; g x od) xs = do y \ mapM_x f xs; mapM_x g xs od" - apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons bind_assoc) - apply (subst bind_assoc[symmetric], subst mapM_x_comm[where f=f and g="g x" for x]) - apply simp - apply (simp add: bind_assoc) - done - -lemma mapM_x_put: - "mapM_x put xs = unless (xs = []) (put (last xs))" - apply (induct xs rule: rev_induct) - apply (simp add: mapM_x_Nil unless_def when_def) - apply (simp add: mapM_x_append mapM_x_Cons mapM_x_Nil) - apply (simp add: bind_def unless_def when_def put_def return_def) - done - -lemma put_trace_mapM_x: - "put_trace xs = mapM_x put_trace_elem (rev xs)" - by (induct xs; simp add: mapM_x_Nil mapM_x_append mapM_x_Cons) - -lemma rev_surj: - "surj rev" - by (rule_tac f=rev in surjI, simp) - -lemma select_image: - "select (f ` S) = do x \ select S; return (f x) od" - by (auto simp add: bind_def select_def return_def Sigma_def) - -lemma env_steps_repeat: - "env_steps = repeat env_step" - apply (simp add: env_step_def repeat_choice env_steps_def - select_early) - apply (simp add: put_trace_elem_put_comm) - apply (simp add: mapM_x_split put_trace_elem_put_comm put_trace_put_comm - mapM_x_put) - apply (simp add: put_trace_mapM_x rev_map mapM_x_map o_def) - apply (subst rev_surj[symmetric], simp add: select_image bind_assoc) - apply (rule arg_cong2[where f=bind, OF refl ext]) - apply (simp add: bind_def get_def put_def unless_def when_def return_def) - apply (simp add: last_st_tr_def hd_map hd_rev) - done - -lemma repeat_n_plus: - "repeat_n (n + m) f = do repeat_n n f; repeat_n m f od" - by (induct n; simp add: bind_assoc) - -lemma repeat_eq_twice[simp]: - "(do x \ repeat f; repeat f od) = repeat f" - apply (simp add: repeat_def select_early) - apply (simp add: bind_assoc repeat_n_plus[symmetric, simplified]) - apply (simp add: bind_def select_def Sigma_def) - apply (rule ext, fastforce intro: exI[where x=0]) - done - -lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] -lemmas repeat_eq_twice_then[simp] - = repeat_eq_twice[THEN bind_then_eq, simplified bind_assoc] - -lemmas env_steps_eq_twice[simp] - = repeat_eq_twice[where f=env_step, folded env_steps_repeat] -lemmas env_steps_eq_twice_then[simp] - = env_steps_eq_twice[THEN bind_then_eq, simplified bind_assoc] - -lemmas mapM_collapse_append = mapM_append[symmetric, THEN bind_then_eq, - simplified bind_assoc, simplified] - -lemma prefix_closed_mapM[rule_format, wp_split]: - "(\x \ set xs. prefix_closed (f x)) \ prefix_closed (mapM f xs)" - apply (induct xs) - apply (simp add: mapM_def sequence_def) - apply (clarsimp simp: mapM_Cons) - apply (intro prefix_closed_bind allI; clarsimp) - done - -lemma modify_id: - "modify id = return ()" - by (simp add: modify_def get_def bind_def put_def return_def) - -lemma modify_modify: - "(do x \ modify f; modify (g x) od) = modify (g () o f)" - by (simp add: bind_def simpler_modify_def) - -lemmas modify_modify_bind = arg_cong2[where f=bind, - OF modify_modify refl, simplified bind_assoc] - -lemma select_single: - "select {x} = return x" - by (simp add: select_def return_def) - -lemma put_then_get[unfolded K_bind_def]: - "do put s; get od = do put s; return s od" - by (simp add: put_def bind_def get_def return_def) - -lemmas put_then_get_then - = put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] - -lemmas bind_promote_If - = if_distrib[where f="\f. bind f g" for g] - if_distrib[where f="\g. bind f g" for f] - -lemma bind_promote_If2: - "do x \ f; if P then g x else h x od - = (if P then bind f g else bind f h)" - by simp - -lemma exec_put_trace[unfolded K_bind_def]: - "(do put_trace xs; f od) s - = (\n \ {n. 0 < n \ n \ length xs}. {(drop n xs, Incomplete)}) - \ ((\(a, b). (a @ xs, b)) ` f s)" - apply (simp add: put_trace_eq_drop bind_def) - apply (safe; (clarsimp split: if_split_asm)?; - fastforce intro: bexI[where x=0] rev_bexI) - done - -lemma if_fun_lift: - "(if P then f else g) x = (if P then f x else g x)" - by simp - -lemma UN_If_distrib: - "(\x \ S. if P x then A x else B x) - = ((\x \ S \ {x. P x}. A x) \ (\x \ S \ {x. \ P x}. B x))" - by (fastforce split: if_split_asm) - -lemma Await_redef: - "Await P = do - s1 \ select {s. P s}; - env_steps; - s \ get; - select (if P s then {()} else {}) - od" - apply (simp add: Await_def env_steps_def bind_assoc) - apply (cases "{s. P s} = {}") - apply (simp add: select_def bind_def get_def) - apply (rule ext) - apply (simp add: exec_get select_bind_UN put_then_get_then) - apply (simp add: bind_promote_If2 if_fun_lift if_distrib[where f=select]) - apply (simp add: exec_put_trace cong: if_cong) - apply (simp add: put_def bind_def select_def cong: if_cong) - apply (strengthen equalityI) - apply clarsimp - apply (strengthen exI[where x="s # xs" for s xs]) - apply (strengthen exI[where x="Suc n" for n]) - apply simp - apply blast - done - -lemma bind_apply_cong': - "f s = f' s - \ (\rv s'. (rv, s') \ mres (f s) \ g rv s' = g' rv s') - \ bind f g s = bind f' g' s" - apply (simp add: bind_def) - apply (rule SUP_cong; simp?) - apply (clarsimp split: tmres.split) - apply (drule spec2, drule mp, erule in_mres) - apply simp - done - -lemmas bind_apply_cong = bind_apply_cong'[rule_format] - -lemma select_empty_bind[simp]: - "select {} >>= f = select {}" - by (simp add: select_def bind_def) - -lemma fail_bind[simp]: - "fail >>= f = fail" - by (simp add: bind_def fail_def) - -lemma eq_Me_neq_Env: - "(x = Me) = (x \ Env)" - by (cases x; simp) - -lemma validI_invariant_imp: - assumes v: "\P\,\R\ f \G\,\Q\" - and P: "\s0 s. P s0 s \ I s0" - and R: "\s0 s. I s0 \ R s0 s \ I s" - and G: "\s0 s. I s0 \ G s0 s \ I s" - shows "\P\,\R\ f \\s0 s. I s0 \ I s \ G s0 s\,\\rv s0 s. I s0 \ Q rv s0 s\" -proof - - { fix tr s0 i - assume r: "rely_cond R s0 tr" and g: "guar_cond G s0 tr" - and I: "I s0" - hence imp: "\(_, s, s') \ trace_steps (rev tr) s0. I s \ I s'" - apply (clarsimp simp: guar_cond_def rely_cond_def) - apply (drule(1) bspec)+ - apply (clarsimp simp: eq_Me_neq_Env) - apply (metis R G) - done - hence "i < length tr \ I (snd (rev tr ! i))" - using I - apply (induct i) - apply (clarsimp simp: neq_Nil_conv[where xs="rev tr" for tr, simplified]) - apply clarsimp - apply (drule bspec, fastforce simp add: trace_steps_nth) - apply simp - done - } - note I = this - show ?thesis - using v - apply (clarsimp simp: validI_def rely_def imp_conjL) - apply (drule spec2, drule(1) mp)+ - apply clarsimp - apply (frule P[rule_format]) - apply (clarsimp simp: guar_cond_def trace_steps_nth I last_st_tr_def - hd_append last_rev[symmetric] - last_conv_nth rev_map) - done -qed - -lemma validI_guar_post_conj_lift: - "\P\,\R\ f \G1\,\Q1\ - \ \P\,\R\ f \G2\,\Q2\ - \ \P\,\R\ f \\s0 s. G1 s0 s \ G2 s0 s\,\\rv s0 s. Q1 rv s0 s \ Q2 rv s0 s\" - apply (frule validI_prefix_closed) - apply (subst validI_def, clarsimp simp: rely_def) - apply (drule(3) validI_D)+ - apply (auto simp: guar_cond_def) - done - -lemmas modify_prefix_closed[simp] = - modify_wp[THEN valid_validI_wp[OF no_trace_all(3)], THEN validI_prefix_closed] -lemmas await_prefix_closed[simp] = Await_sync_twp[THEN validI_prefix_closed] - -lemma repeat_prefix_closed[intro!]: - "prefix_closed f \ prefix_closed (repeat f)" - apply (simp add: repeat_def) - apply (rule prefix_closed_bind; clarsimp) - apply (rename_tac n) - apply (induct_tac n; simp) - apply (auto intro: prefix_closed_bind) - done - -end diff --git a/lib/Monads/TraceMonadVCG.thy b/lib/Monads/TraceMonadVCG.thy deleted file mode 100644 index e88d79b9b5..0000000000 --- a/lib/Monads/TraceMonadVCG.thy +++ /dev/null @@ -1,2587 +0,0 @@ -(* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) - * - * SPDX-License-Identifier: BSD-2-Clause - *) -theory TraceMonadVCG -imports - TraceMonad - Fun_Pred_Syntax - WPSimp -begin - -lemma trace_steps_append: - "trace_steps (xs @ ys) s - = trace_steps xs s \ trace_steps ys (last_st_tr (rev xs) s)" - by (induct xs arbitrary: s, simp_all add: last_st_tr_def hd_append) - -lemma rely_cond_append: - "rely_cond R s (xs @ ys) = (rely_cond R s ys \ rely_cond R (last_st_tr ys s) xs)" - by (simp add: rely_cond_def trace_steps_append ball_Un conj_comms) - -lemma guar_cond_append: - "guar_cond G s (xs @ ys) = (guar_cond G s ys \ guar_cond G (last_st_tr ys s) xs)" - by (simp add: guar_cond_def trace_steps_append ball_Un conj_comms) - -lemma prefix_closed_bind: - "prefix_closed f \ (\x. prefix_closed (g x)) \ prefix_closed (f >>= g)" - apply (subst prefix_closed_def, clarsimp simp: bind_def) - apply (auto simp: Cons_eq_append_conv split: tmres.split_asm - dest!: prefix_closedD[rotated]; - fastforce elim: rev_bexI) - done - -lemmas prefix_closed_bind[rule_format, wp_split] - -lemma last_st_tr_append[simp]: - "last_st_tr (tr @ tr') s = last_st_tr tr (last_st_tr tr' s)" - by (simp add: last_st_tr_def hd_append) - -lemma last_st_tr_Nil[simp]: - "last_st_tr [] s = s" - by (simp add: last_st_tr_def) - -lemma last_st_tr_Cons[simp]: - "last_st_tr (x # xs) s = snd x" - by (simp add: last_st_tr_def) - -lemma bind_twp[wp_split]: - "\ \r. \Q' r\,\R\ g r \G\,\Q\; \P\,\R\ f \G\,\Q'\ \ - \ \P\,\R\ f >>= (\r. g r) \G\,\Q\" - apply (subst validI_def, rule conjI) - apply (blast intro: prefix_closed_bind validI_prefix_closed) - apply (clarsimp simp: bind_def rely_def) - apply (drule(2) validI_D) - apply (clarsimp simp: rely_cond_append split: tmres.split_asm) - apply (clarsimp split: tmres.split_asm) - apply (drule meta_spec, frule(2) validI_D) - apply (clarsimp simp: rely_cond_append split: if_split_asm) - apply (clarsimp simp: guar_cond_append) - done - -lemma trace_steps_rev_drop_nth: - "trace_steps (rev (drop n tr)) s - = (\i. (fst (rev tr ! i), (if i = 0 then s else snd (rev tr ! (i - 1))), - snd (rev tr ! i))) ` {..< length tr - n}" - apply (simp add: trace_steps_nth) - apply (intro image_cong refl) - apply (simp add: rev_nth) - done - -lemma prefix_closed_drop: - "(tr, res) \ f s \ prefix_closed f \ \res'. (drop n tr, res') \ f s" -proof (induct n arbitrary: res) - case 0 then show ?case by auto -next - case (Suc n) - have drop_1: "\tr res. (tr, res) \ f s \ \res'. (drop 1 tr, res') \ f s" - by (case_tac tr; fastforce dest: prefix_closedD[rotated] intro: Suc) - show ?case - using Suc.hyps[OF Suc.prems] - by (metis drop_1[simplified] drop_drop add_0 add_Suc) -qed - -lemma validI_GD_drop: - "\ \P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; - rely_cond R s0 (drop n tr) \ - \ guar_cond G s0 (drop n tr)" - apply (drule prefix_closed_drop[where n=n], erule validI_prefix_closed) - apply (auto dest: validI_GD) - done - -lemma parallel_prefix_closed[wp_split]: - "prefix_closed f \ prefix_closed g - \ prefix_closed (parallel f g)" - apply (subst prefix_closed_def, clarsimp simp: parallel_def) - apply (case_tac f_steps; clarsimp) - apply (drule(1) prefix_closedD)+ - apply fastforce - done - -lemma rely_cond_drop: - "rely_cond R s0 xs \ rely_cond R s0 (drop n xs)" - using rely_cond_append[where xs="take n xs" and ys="drop n xs"] - by simp - -lemma rely_cond_is_drop: - "rely_cond R s0 xs - \ (ys = drop (length xs - length ys) xs) - \ rely_cond R s0 ys" - by (metis rely_cond_drop) - -lemma bounded_rev_nat_induct: - "(\n. N \ n \ P n) \ (\n. n < N \ P (Suc n) \ P n) - \ P n" - by (induct diff\"N - n" arbitrary: n; simp) - -lemma drop_n_induct: - "P [] \ (\n. n < length xs \ P (drop (Suc n) xs) \ P (drop n xs)) - \ P (drop n xs)" - by (induct len\"length (drop n xs)" arbitrary: n xs; simp) - -lemma guar_cond_Cons_eq: - "guar_cond R s0 (x # xs) - = (guar_cond R s0 xs \ (fst x \ Env \ R (last_st_tr xs s0) (snd x)))" - by (cases "fst x"; simp add: guar_cond_def trace_steps_append conj_comms) - -lemma guar_cond_Cons: - "guar_cond R s0 xs - \ fst x \ Env \ R (last_st_tr xs s0) (snd x) - \ guar_cond R s0 (x # xs)" - by (simp add: guar_cond_Cons_eq) - -lemma guar_cond_drop_Suc_eq: - "n < length xs - \ guar_cond R s0 (drop n xs) = (guar_cond R s0 (drop (Suc n) xs) - \ (fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" - apply (rule trans[OF _ guar_cond_Cons_eq]) - apply (simp add: Cons_nth_drop_Suc) - done - -lemma guar_cond_drop_Suc: - "guar_cond R s0 (drop (Suc n) xs) - \ fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n)) - \ guar_cond R s0 (drop n xs)" - by (case_tac "n < length xs"; simp add: guar_cond_drop_Suc_eq) - -lemma rely_cond_Cons_eq: - "rely_cond R s0 (x # xs) - = (rely_cond R s0 xs \ (fst x = Env \ R (last_st_tr xs s0) (snd x)))" - by (simp add: rely_cond_def trace_steps_append conj_comms) - -lemma rely_cond_Cons: - "rely_cond R s0 xs - \ fst x = Env \ R (last_st_tr xs s0) (snd x) - \ rely_cond R s0 (x # xs)" - by (simp add: rely_cond_Cons_eq) - -lemma rely_cond_drop_Suc_eq: - "n < length xs - \ rely_cond R s0 (drop n xs) = (rely_cond R s0 (drop (Suc n) xs) - \ (fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" - apply (rule trans[OF _ rely_cond_Cons_eq]) - apply (simp add: Cons_nth_drop_Suc) - done - -lemma rely_cond_drop_Suc: - "rely_cond R s0 (drop (Suc n) xs) - \ fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n)) - \ rely_cond R s0 (drop n xs)" - by (cases "n < length xs"; simp add: rely_cond_drop_Suc_eq) - -lemma last_st_tr_map_zip_hd: - "length flags = length tr - \ tr \ [] \ snd (f (hd flags, hd tr)) = snd (hd tr) - \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" - apply (cases tr; simp) - apply (cases flags; simp) - apply (simp add: fun_eq_iff) - done - -lemma last_st_tr_drop_map_zip_hd: - "length flags = length tr - \ n < length tr \ snd (f (flags ! n, tr ! n)) = snd (tr ! n) - \ last_st_tr (drop n (map f (zip flags tr))) = last_st_tr (drop n tr)" - apply (simp add: drop_map drop_zip) - apply (rule last_st_tr_map_zip_hd; clarsimp) - apply (simp add: hd_drop_conv_nth) - done - -lemma last_st_tr_map_zip: - "length flags = length tr - \ \fl tmid s. snd (f (fl, (tmid, s))) = s - \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" - apply (erule last_st_tr_map_zip_hd) - apply (clarsimp simp: neq_Nil_conv) - done - -lemma parallel_rely_induct: - assumes preds: "(tr, v) \ parallel f g s" "Pf s0 s" "Pg s0 s" - assumes validI: "\Pf\,\Rf\ f' \Gf\,\Qf\" - "\Pg\,\Rg\ g' \Gg\,\Qg\" - "f s \ f' s" "g s \ g' s" - and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" - "Gf \ Rg" "Gg \ Rf" - and rely: "rely_cond R s0 (drop n tr)" - shows "\tr_f tr_g. (tr_f, v) \ f s \ (tr_g, v) \ g s - \ rely_cond Rf s0 (drop n tr_f) \ rely_cond Rg s0 (drop n tr_g) - \ map snd tr_f = map snd tr \ map snd tr_g = map snd tr - \ (\i \ length tr. last_st_tr (drop i tr_g) s0 = last_st_tr (drop i tr_f) s0) - \ guar_cond G s0 (drop n tr)" - (is "\ys zs. _ \ _ \ ?concl ys zs") -proof - - obtain ys zs where tr: "tr = map parallel_mrg (zip ys zs)" - and all2: "list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs" - and ys: "(ys, v) \ f s" and zs: "(zs, v) \ g s" - using preds - by (clarsimp simp: parallel_def2) - note len[simp] = list_all2_lengthD[OF all2] - - have ys': "(ys, v) \ f' s" and zs': "(zs, v) \ g' s" - using ys zs validI by auto - - note rely_f_step = validI_GD_drop[OF validI(1) preds(2) ys' rely_cond_drop_Suc] - note rely_g_step = validI_GD_drop[OF validI(2) preds(3) zs' rely_cond_drop_Suc] - - note snd[simp] = list_all2_nthD[OF all2, THEN conjunct2] - - have "?concl ys zs" - using rely tr all2 rely_f_step rely_g_step - apply (induct n rule: bounded_rev_nat_induct) - apply (subst drop_all, assumption) - apply clarsimp - apply (simp add: list_all2_conv_all_nth last_st_tr_def drop_map[symmetric] - hd_drop_conv_nth hd_append) - apply (fastforce simp: split_def intro!: nth_equalityI) - apply clarsimp - apply (erule_tac x=n in meta_allE)+ - apply (drule meta_mp, erule rely_cond_is_drop, simp) - apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp) - apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\x. x = Env"] - split_def) - apply (intro conjI; (rule guar_cond_drop_Suc rely_cond_drop_Suc, assumption)) - apply (auto simp: guar_cond_drop_Suc_eq last_st_tr_drop_map_zip_hd - intro: compat[THEN predicate2D]) - done - - thus ?thesis - using ys zs - by auto -qed - -lemmas parallel_rely_induct0 = parallel_rely_induct[where n=0, simplified] - -lemma rg_validI: - assumes validI: "\Pf\,\Rf\ f \Gf\,\Qf\" - "\Pg\,\Rg\ g \Gg\,\Qg\" - and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" - "Gf \ Rg" "Gg \ Rf" - shows "\Pf and Pg\,\R\ parallel f g \G\,\\rv. Qf rv and Qg rv\" - apply (clarsimp simp: validI_def rely_def pred_conj_def - parallel_prefix_closed validI[THEN validI_prefix_closed]) - apply (drule(3) parallel_rely_induct0[OF _ _ _ validI order_refl order_refl compat]) - apply clarsimp - apply (drule(2) validI[THEN validI_rvD])+ - apply (simp add: last_st_tr_def) - done - -lemma validI_weaken_pre[wp_pre]: - "\P'\,\R\ f \G\,\Q\ - \ (\s0 s. P s0 s \ P' s0 s) - \ \P\,\R\ f \G\,\Q\" - by (simp add: validI_def, blast) - -lemma rely_weaken: - "(\s0 s. R s0 s \ R' s0 s) - \ (tr, res) \ rely f R s s0 \ (tr, res) \ rely f R' s s0" - by (simp add: rely_def rely_cond_def, blast) - -lemma validI_weaken_rely[wp_pre]: - "\P\,\R'\ f \G\,\Q\ - \ (\s0 s. R s0 s \ R' s0 s) - \ \P\,\R\ f \G\,\Q\" - apply (simp add: validI_def) - by (metis rely_weaken) - -lemma validI_strengthen_post: - "\P\,\R\ f \G\,\Q'\ - \ (\v s0 s. Q' v s0 s \ Q v s0 s) - \ \P\,\R\ f \G\,\Q\" - by (simp add: validI_def) - -lemma validI_strengthen_guar: - "\P\, \R\ f \G'\, \Q\ - \ (\s0 s. G' s0 s \ G s0 s) - \ \P\, \R\ f \G\, \Q\" - by (force simp: validI_def guar_cond_def) - -lemma rely_prim[simp]: - "rely (\s. insert (v s) (f s)) R s0 = (\s. {x. x = v s \ rely_cond R s0 (fst x)} \ (rely f R s0 s))" - "rely (\s. {}) R s0 = (\_. {})" - by (auto simp: rely_def prod_eq_iff) - -lemma prefix_closed_put_trace_elem[iff]: - "prefix_closed (put_trace_elem x)" - by (clarsimp simp: prefix_closed_def put_trace_elem_def) - -lemma prefix_closed_return[iff]: - "prefix_closed (return x)" - by (simp add: prefix_closed_def return_def) - -lemma prefix_closed_put_trace[iff]: - "prefix_closed (put_trace tr)" - by (induct tr; clarsimp simp: prefix_closed_bind) - -lemma put_trace_eq_drop: - "put_trace xs s - = ((\n. (drop n xs, if n = 0 then Result ((), s) else Incomplete)) ` {.. length xs})" - apply (induct xs) - apply (clarsimp simp: return_def) - apply (clarsimp simp: put_trace_elem_def bind_def) - apply (simp add: atMost_Suc_eq_insert_0 image_image) - apply (rule equalityI; clarsimp) - apply (split if_split_asm; clarsimp) - apply (auto intro: image_eqI[where x=0])[1] - apply (rule rev_bexI, simp) - apply clarsimp - done - -lemma put_trace_res: - "(tr, res) \ put_trace xs s - \ \n. tr = drop n xs \ n \ length xs - \ res = (case n of 0 \ Result ((), s) | _ \ Incomplete)" - apply (clarsimp simp: put_trace_eq_drop) - apply (case_tac n; auto intro: exI[where x=0]) - done - -lemma put_trace_twp[wp]: - "\\s0 s. (\n. rely_cond R s0 (drop n xs) \ guar_cond G s0 (drop n xs)) - \ (rely_cond R s0 xs \ Q () (last_st_tr xs s0) s)\,\R\ put_trace xs \G\,\Q\" - apply (clarsimp simp: validI_def rely_def) - apply (drule put_trace_res) - apply (clarsimp; clarsimp split: nat.split_asm) - done - -lemmas put_trace_elem_twp = put_trace_twp[where xs="[x]" for x, simplified] - -lemma prefix_closed_select[iff]: - "prefix_closed (select S)" - by (simp add: prefix_closed_def select_def image_def) - -lemma select_wp[wp]: "\\s. \x \ S. Q x s\ select S \Q\" - by (simp add: select_def valid_def mres_def image_def) - -lemma rely_cond_rtranclp: - "rely_cond R s (map (Pair Env) xs) \ rtranclp R s (last_st_tr (map (Pair Env) xs) s)" - apply (induct xs arbitrary: s rule: rev_induct) - apply simp - apply (clarsimp simp add: rely_cond_def) - apply (erule converse_rtranclp_into_rtranclp) - apply simp - done - -lemma put_wp[wp]: - "\\_. Q () s\ put s \Q\" - by (simp add: put_def valid_def mres_def) - -lemma get_wp[wp]: - "\\s. Q s s\ get \Q\" - by (simp add: get_def valid_def mres_def) - -lemma bind_wp[wp_split]: - "\ \r. \Q' r\ g r \Q\; \P\f \Q'\ \ - \ \P\ f >>= (\r. g r) \Q\" - by (fastforce simp: valid_def bind_def2 mres_def intro: image_eqI[rotated]) - -lemma modify_wp[wp]: - "\\s. Q () (f s)\ modify f \Q\" - unfolding modify_def - by wp - -definition - no_trace :: "('s,'a) tmonad \ bool" -where - "no_trace f = (\tr res s. (tr, res) \ f s \ tr = [] \ res \ Incomplete)" - -lemmas no_traceD = no_trace_def[THEN iffD1, rule_format] - -lemma no_trace_emp: - "no_trace f \ (tr, r) \ f s \ tr = []" - by (simp add: no_traceD) - -lemma no_trace_Incomplete_mem: - "no_trace f \ (tr, Incomplete) \ f s" - by (auto dest: no_traceD) - -lemma no_trace_Incomplete_eq: - "no_trace f \ (tr, res) \ f s \ res \ Incomplete" - by (auto dest: no_traceD) - -lemma no_trace_prefix_closed: - "no_trace f \ prefix_closed f" - by (auto simp add: prefix_closed_def dest: no_trace_emp) - -(* Attempt to define triple_judgement to use valid_validI_wp as wp_comb rule. - It doesn't work. It seems that wp_comb rules cannot take more than 1 assumption *) -lemma validI_is_triple: - "\P\,\R\ f \G\,\Q\ = - triple_judgement (\(s0, s). prefix_closed f \ P s0 s) f - (\(s0,s) f. prefix_closed f \ (\tr res. (tr, res) \ rely f R s0 s - \ guar_cond G s0 tr - \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" - apply (simp add: triple_judgement_def validI_def ) - apply (cases "prefix_closed f"; simp) - done - -lemma valid_is_triple: - "valid P f Q = - triple_judgement P f - (\s f. (\(r,s') \ (mres (f s)). Q r s'))" - by (simp add: triple_judgement_def valid_def mres_def) - -lemma no_trace_is_triple: - "no_trace f = triple_judgement \ f (\() f. id no_trace f)" - by (simp add: triple_judgement_def split: unit.split) - -lemmas [wp_trip] = valid_is_triple validI_is_triple no_trace_is_triple - -lemma valid_validI_wp[wp_comb]: - "no_trace f \ (\s0. \P s0\ f \\v. Q v s0 \) - \ \P\,\R\ f \G\,\Q\" - by (fastforce simp: rely_def validI_def valid_def mres_def no_trace_prefix_closed dest: no_trace_emp - elim: image_eqI[rotated]) - -(* Since valid_validI_wp in wp_comb doesn't work, we add the rules directly in the wp set *) -lemma no_trace_prim: - "no_trace get" - "no_trace (put s)" - "no_trace (modify f)" - "no_trace (return v)" - "no_trace fail" - by (simp_all add: no_trace_def get_def put_def modify_def bind_def - return_def select_def fail_def) - -lemma no_trace_select: - "no_trace (select S)" - by (clarsimp simp add: no_trace_def select_def) - -lemma no_trace_bind: - "no_trace f \ \rv. no_trace (g rv) - \ no_trace (do rv \ f; g rv od)" - apply (subst no_trace_def) - apply (clarsimp simp add: bind_def split: tmres.split_asm; - auto dest: no_traceD[rotated]) - done - -lemma no_trace_extra: - "no_trace (gets f)" - "no_trace (assert P)" - "no_trace (assert_opt Q)" - by (simp_all add: gets_def assert_def assert_opt_def no_trace_bind no_trace_prim - split: option.split) - -lemmas no_trace_all[wp, iff] = no_trace_prim no_trace_select no_trace_extra - -lemma env_steps_twp[wp]: - "\\s0 s. (\s'. R\<^sup>*\<^sup>* s0 s' \ Q () s' s') \ Q () s0 s\,\R\ env_steps \G\,\Q\" - apply (simp add: interference_def env_steps_def) - apply wp - apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) - apply (drule rely_cond_rtranclp) - apply (clarsimp simp add: last_st_tr_def hd_append) - done - -lemma interference_twp[wp]: - "\\s0 s. (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s\,\R\ interference \G\,\Q\" - apply (simp add: interference_def commit_step_def del: put_trace.simps) - apply wp - apply clarsimp - apply (simp add: drop_Cons nat.split rely_cond_def guar_cond_def) - done - -(* what Await does if we haven't committed our step is a little - strange. this assumes we have, which means s0 = s. we should - revisit this if we find a use for Await when this isn't the - case *) -lemma Await_sync_twp: - "\\s0 s. s = s0 \ (\x. R\<^sup>*\<^sup>* s0 x \ c x \ Q () x x)\,\R\ Await c \G\,\Q\" - apply (simp add: Await_def split_def) - apply wp - apply clarsimp - apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) - apply (drule rely_cond_rtranclp) - apply (simp add: o_def) - done - -(* Wrap up the standard usage pattern of wp/wpc/simp into its own command: *) -method wpsimp uses wp simp split split_del cong = - ((determ \wp add: wp|wpc|clarsimp simp: simp split: split split del: split_del cong: cong\)+)[1] - -section "Satisfiability" - -text \ - The dual to validity: an existential instead of a universal - quantifier for the post condition. In refinement, it is - often sufficient to know that there is one state that - satisfies a condition. -\ -definition - exs_valid :: "('a \ bool) \ ('a, 'b) tmonad \ - ('b \ 'a \ bool) \ bool" - ("\_\ _ \\_\") -where - "exs_valid P f Q \ (\s. P s \ (\(rv, s') \ mres (f s). Q rv s'))" - - -text \The above for the exception monad\ -definition - ex_exs_validE :: "('a \ bool) \ ('a, 'e + 'b) tmonad \ - ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" - ("\_\ _ \\_\, \_\") -where - "ex_exs_validE P f Q E \ - exs_valid P f (\rv. case rv of Inl e \ E e | Inr v \ Q v)" - - -section "Lemmas" - -subsection \Determinism\ - -lemma det_set_iff: - "det f \ (r \ mres (f s)) = (mres (f s) = {r})" - apply (simp add: det_def mres_def) - apply (fastforce elim: allE[where x=s]) - done - -lemma return_det [iff]: - "det (return x)" - by (simp add: det_def return_def) - -lemma put_det [iff]: - "det (put s)" - by (simp add: det_def put_def) - -lemma get_det [iff]: - "det get" - by (simp add: det_def get_def) - -lemma det_gets [iff]: - "det (gets f)" - by (auto simp add: gets_def det_def get_def return_def bind_def) - -lemma det_UN: - "det f \ (\x \ mres (f s). g x) = (g (THE x. x \ mres (f s)))" - unfolding det_def mres_def - apply simp - apply (drule spec [of _ s]) - apply (clarsimp simp: vimage_def) - done - -lemma bind_detI [simp, intro!]: - "\ det f; \x. det (g x) \ \ det (f >>= g)" - apply (simp add: bind_def det_def split_def) - apply clarsimp - apply (erule_tac x=s in allE) - apply clarsimp - done - -lemma det_modify[iff]: - "det (modify f)" - by (simp add: modify_def) - -lemma the_run_stateI: - "mres (M s) = {s'} \ the_run_state M s = s'" - by (simp add: the_run_state_def) - -lemma the_run_state_det: - "\ s' \ mres (M s); det M \ \ the_run_state M s = s'" - by (simp only: the_run_stateI det_set_iff[where f=M and s=s]) - -subsection "Lifting and Alternative Basic Definitions" - -lemma liftE_liftM: "liftE = liftM Inr" - apply (rule ext) - apply (simp add: liftE_def liftM_def) - done - -lemma liftME_liftM: "liftME f = liftM (case_sum Inl (Inr \ f))" - apply (rule ext) - apply (simp add: liftME_def liftM_def bindE_def returnOk_def lift_def) - apply (rule_tac f="bind x" in arg_cong) - apply (rule ext) - apply (case_tac xa) - apply (simp_all add: lift_def throwError_def) - done - -lemma liftE_bindE: - "(liftE a) >>=E b = a >>= b" - apply (simp add: liftE_def bindE_def lift_def bind_assoc) - done - -lemma liftM_id[simp]: "liftM id = id" - apply (rule ext) - apply (simp add: liftM_def) - done - -lemma liftM_bind: - "(liftM t f >>= g) = (f >>= (\x. g (t x)))" - by (simp add: liftM_def bind_assoc) - -lemma gets_bind_ign: "gets f >>= (\x. m) = m" - apply (rule ext) - apply (simp add: bind_def simpler_gets_def) - done - -lemma get_bind_apply: "(get >>= f) x = f x x" - by (simp add: get_def bind_def) - -lemma exec_gets: - "(gets f >>= m) s = m (f s) s" - by (simp add: simpler_gets_def bind_def) - -lemma exec_get: - "(get >>= m) s = m s s" - by (simp add: get_def bind_def) - -lemma bind_eqI: - "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" - apply (rule ext) - apply (simp add: bind_def) - done - -subsection "Simplification Rules for Lifted And/Or" - -lemma bipred_disj_op_eq[simp]: - "reflp R \ ((=) or R) = R" - apply (rule ext, rule ext) - apply (auto simp: reflp_def) - done - -lemma bipred_le_true[simp]: "R \ \\" - by clarsimp - -subsection "Hoare Logic Rules" - -lemma validE_def2: - "validE P f Q R \ \s. P s \ (\(r,s') \ mres (f s). case r of Inr b \ Q b s' - | Inl a \ R a s')" - by (unfold valid_def validE_def) - -lemma seq': - "\ \A\ f \B\; - \x. P x \ \C\ g x \D\; - \x s. B x s \ P x \ C s \ \ - \A\ do x \ f; g x od \D\" - apply (erule bind_wp[rotated]) - apply (clarsimp simp: valid_def) - apply (fastforce elim: rev_bexI image_eqI[rotated]) - done - -lemma seq: - assumes f_valid: "\A\ f \B\" - assumes g_valid: "\x. P x \ \C\ g x \D\" - assumes bind: "\x s. B x s \ P x \ C s" - shows "\A\ do x \ f; g x od \D\" -apply (insert f_valid g_valid bind) -apply (blast intro: seq') -done - -lemma seq_ext': - "\ \A\ f \B\; - \x. \B x\ g x \C\ \ \ - \A\ do x \ f; g x od \C\" - by (metis bind_wp) - -lemmas seq_ext = bind_wp[rotated] - -lemma seqE': - "\ \A\ f \B\,\E\ ; - \x. \B x\ g x \C\,\E\ \ \ - \A\ doE x \ f; g x odE \C\,\E\" - apply (simp add: bindE_def validE_def) - apply (erule seq_ext') - apply (auto simp: lift_def valid_def throwError_def return_def mres_def - split: sum.splits) - done - -lemma seqE: - assumes f_valid: "\A\ f \B\,\E\" - assumes g_valid: "\x. \B x\ g x \C\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') - done - -lemma hoare_TrueI: "\P\ f \\_. \\" - by (simp add: valid_def) - -lemma hoareE_TrueI: "\P\ f \\_. \\, \\r. \\" - by (simp add: validE_def valid_def) - -lemma hoare_True_E_R [simp]: - "\P\ f \\r s. True\, -" - by (auto simp add: validE_R_def validE_def valid_def split: sum.splits) - -lemma hoare_post_conj [intro!]: - "\ \ P \ a \ Q \; \ P \ a \ R \ \ \ \ P \ a \ Q and R \" - by (fastforce simp: valid_def split_def pred_conj_def) - -lemma hoare_pre_disj [intro!]: - "\ \ P \ a \ R \; \ Q \ a \ R \ \ \ \ P or Q \ a \ R \" - by (simp add:valid_def pred_disj_def) - -lemma hoare_conj: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q and Q'\" - unfolding valid_def - by (auto) - -lemma hoare_post_taut: "\ P \ a \ \\ \" - by (simp add:valid_def) - -lemma wp_post_taut: "\\r. True\ f \\r s. True\" - by (rule hoare_post_taut) - -lemma wp_post_tautE: "\\r. True\ f \\r s. True\,\\f s. True\" -proof - - have P: "\r. (case r of Inl a \ True | _ \ True) = True" - by (case_tac r, simp_all) - show ?thesis - by (simp add: validE_def P wp_post_taut) -qed - -lemma hoare_pre_cont [simp]: "\ \ \ a \ P \" - by (simp add:valid_def) - - -subsection \Strongest Postcondition Rules\ - -lemma get_sp: - "\P\ get \\a s. s = a \ P s\" - by(simp add:get_def valid_def mres_def) - -lemma put_sp: - "\\\ put a \\_ s. s = a\" - by(simp add:put_def valid_def mres_def) - -lemma return_sp: - "\P\ return a \\b s. b = a \ P s\" - by(simp add:return_def valid_def mres_def) - -lemma assert_sp: - "\ P \ assert Q \ \r s. P s \ Q \" - by (simp add: assert_def fail_def return_def valid_def mres_def) - -lemma hoare_gets_sp: - "\P\ gets f \\rv s. rv = f s \ P s\" - by (simp add: valid_def simpler_gets_def mres_def) - -lemma hoare_return_drop_var [iff]: "\ Q \ return x \ \r. Q \" - by (simp add:valid_def return_def mres_def) - -lemma hoare_gets [intro!]: "\ \s. P s \ Q (f s) s \ \ \ P \ gets f \ Q \" - by (simp add:valid_def gets_def get_def bind_def return_def mres_def) - -lemma hoare_modifyE_var [intro!]: - "\ \s. P s \ Q (f s) \ \ \ P \ modify f \ \r s. Q s \" - by(simp add: valid_def modify_def put_def get_def bind_def mres_def) - -lemma hoare_if [intro!]: - "\ P \ \ Q \ a \ R \; \ P \ \ Q \ b \ R \ \ \ - \ Q \ if P then a else b \ R \" - by (simp add:valid_def) - -lemma hoare_pre_subst: "\ A = B; \A\ a \C\ \ \ \B\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_post_subst: "\ B = C; \A\ a \B\ \ \ \A\ a \C\" - by(clarsimp simp:valid_def split_def) - -lemma hoare_pre_tautI: "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" - by(fastforce simp:valid_def split_def pred_conj_def pred_neg_def) - -lemma hoare_pre_imp: "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" - by (fastforce simp add:valid_def) - -lemma hoare_post_imp: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by(fastforce simp:valid_def split_def) - -lemma hoare_post_impErr': "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (simp add: validE_def) - apply (rule_tac Q="\r s. case r of Inl a \ E a s | Inr b \ Q b s" in hoare_post_imp) - apply (case_tac r) - apply simp_all - done - -lemma hoare_post_impErr: "\ \P\ a \Q\,\E\; - \r s. Q r s \ R r s; - \e s. E e s \ F e s \ \ - \P\ a \R\,\F\" - apply (blast intro: hoare_post_impErr') - done - -lemma hoare_validE_cases: - "\ \ P \ f \ Q \, \ \_ _. True \; \ P \ f \ \_ _. True \, \ R \ \ - \ \ P \ f \ Q \, \ R \" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r. R\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r. R\,\\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) blast - -lemma hoare_post_imp_dc2E: - "\\P\ a \\r. Q\; \s. Q s \ R s\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2E_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r s. True\, \\r. R\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_imp_dc2_actual: - "\\P\ a \\r. R\\ \ \P\ a \\r. R\, \\r s. True\" - by (simp add: validE_def valid_def split: sum.splits) fast - -lemma hoare_post_impE: "\ \r s. Q r s \ R r s; \P\ a \Q\ \ \ \P\ a \R\" - by (fastforce simp:valid_def split_def) - -lemma hoare_conjD1: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" - unfolding valid_def by auto - -lemma hoare_conjD2: - "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI1: - "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_post_disjI2: - "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" - unfolding valid_def by auto - -lemma hoare_weaken_pre: - "\\Q\ a \R\; \s. P s \ Q s\ \ \P\ a \R\" - apply (rule hoare_pre_imp) - prefer 2 - apply assumption - apply blast - done - -lemma hoare_strengthen_post: - "\\P\ a \Q\; \r s. Q r s \ R r s\ \ \P\ a \R\" - apply (rule hoare_post_imp) - prefer 2 - apply assumption - apply blast - done - -lemma use_valid: "\(r, s') \ mres (f s); \P\ f \Q\; P s \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -lemma use_validE_norm: "\ (Inr r', s') \ mres (B s); \ P \ B \ Q \,\ E \; P s \ \ Q r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma use_validE_except: "\ (Inl r', s') \ mres (B s); \ P \ B \ Q \,\ E \; P s \ \ E r' s'" - apply (clarsimp simp: validE_def valid_def) - apply force - done - -lemma in_inv_by_hoareD: - "\ \P. \P\ f \\_. P\; (x,s') \ mres (f s) \ \ s' = s" - apply (drule_tac x="(=) s" in meta_spec) - apply (auto simp add: valid_def mres_def) - done - -subsection "Satisfiability" - -lemma exs_hoare_post_imp: "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" - apply (simp add: exs_valid_def) - apply safe - apply (erule_tac x=s in allE, simp) - apply blast - done - -lemma use_exs_valid: "\\P\ f \\Q\; P s \ \ \(r, s') \ mres (f s). Q r s'" - by (simp add: exs_valid_def) - -definition "exs_postcondition P f \ (\a b. \(rv, s)\ f a b. P rv s)" - -lemma exs_valid_is_triple: - "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. mres (f s)))" - by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) - -lemmas [wp_trip] = exs_valid_is_triple - -lemma exs_valid_weaken_pre [wp_comb]: - "\ \ P' \ f \\ Q \; \s. P s \ P' s \ \ \ P \ f \\ Q \" - apply atomize - apply (clarsimp simp: exs_valid_def) - done - -lemma exs_valid_chain: - "\ \ P \ f \\ Q \; \s. R s \ P s; \r s. Q r s \ S r s \ \ \ R \ f \\ S \" - by (fastforce simp only: exs_valid_def Bex_def ) - -lemma exs_valid_assume_pre: - "\ \s. P s \ \ P \ f \\ Q \ \ \ \ P \ f \\ Q \" - apply (fastforce simp: exs_valid_def) - done - -lemma exs_valid_bind [wp_split]: - "\ \x. \B x\ g x \\C\; \A\ f \\B\ \ \ \ A \ f >>= (\x. g x) \\ C \" - apply atomize - apply (clarsimp simp: exs_valid_def bind_def2 mres_def) - apply (drule spec, drule(1) mp, clarsimp) - apply (drule spec2, drule(1) mp, clarsimp) - apply (simp add: image_def bex_Un) - apply (strengthen subst[where P="\x. x \ f s" for s, mk_strg I _ E], simp) - apply (fastforce elim: rev_bexI) - done - -lemma exs_valid_return [wp]: - "\ Q v \ return v \\ Q \" - by (clarsimp simp: exs_valid_def return_def mres_def) - -lemma exs_valid_select [wp]: - "\ \s. \r \ S. Q r s \ select S \\ Q \" - apply (clarsimp simp: exs_valid_def select_def mres_def) - apply (auto simp add: image_def) - done - -lemma exs_valid_get [wp]: - "\ \s. Q s s \ get \\ Q \" - by (clarsimp simp: exs_valid_def get_def mres_def) - -lemma exs_valid_gets [wp]: - "\ \s. Q (f s) s \ gets f \\ Q \" - by (clarsimp simp: gets_def) wp - -lemma exs_valid_put [wp]: - "\ Q v \ put v \\ Q \" - by (clarsimp simp: put_def exs_valid_def mres_def) - -lemma exs_valid_state_assert [wp]: - "\ \s. Q () s \ G s \ state_assert G \\ Q \" - by (clarsimp simp: state_assert_def exs_valid_def get_def - assert_def bind_def2 return_def mres_def) - -lemmas exs_valid_guard = exs_valid_state_assert - -lemma exs_valid_fail [wp]: - "\ \_. False \ fail \\ Q \" - by (clarsimp simp: fail_def exs_valid_def) - -lemma exs_valid_condition [wp]: - "\ \ P \ L \\ Q \; \ P' \ R \\ Q \ \ \ - \ \s. (C s \ P s) \ (\ C s \ P' s) \ condition C L R \\ Q \" - by (clarsimp simp: condition_def exs_valid_def split: sum.splits) - - -subsection MISC - -lemma hoare_return_simp: - "\P\ return x \Q\ = (\s. P s \ Q x s)" - by (simp add: valid_def return_def mres_def) - -lemma hoare_gen_asm: - "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" - by (fastforce simp add: valid_def) - -lemma when_wp [wp]: - "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" - by (clarsimp simp: when_def valid_def return_def mres_def) - -lemma hoare_conjI: - "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" - unfolding valid_def by blast - -lemma hoare_disjI1: - "\ \P\ f \Q\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_disjI2: - "\ \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s \" - unfolding valid_def by blast - -lemma hoare_assume_pre: - "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" - by (auto simp: valid_def) - -lemma hoare_returnOk_sp: - "\P\ returnOk x \\r s. r = x \ P s\, \Q\" - by (simp add: valid_def validE_def returnOk_def return_def mres_def) - -lemma hoare_assume_preE: - "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" - by (auto simp: valid_def validE_def) - -lemma hoare_allI: - "(\x. \P\f\Q x\) \ \P\f\\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma validE_allI: - "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\r s. \x. Q x r s\,\E\" - by (fastforce simp: valid_def validE_def split: sum.splits) - -lemma hoare_exI: - "\P\ f \Q x\ \ \P\ f \\r s. \x. Q x r s\" - by (simp add: valid_def) blast - -lemma hoare_impI: - "(R \ \P\f\Q\) \ \P\f\\r s. R \ Q r s\" - by (simp add: valid_def) blast - -lemma validE_impI: - " \\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ - \P\ f \\r s. P' \ Q r s\, \E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_case_option_wp: - "\ \P\ f None \Q\; - \x. \P' x\ f (Some x) \Q' x\ \ - \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" - by (cases v) auto - -subsection "Reasoning directly about states" - -lemma in_throwError: - "((v, s') \ mres (throwError e s)) = (v = Inl e \ s' = s)" - by (simp add: throwError_def return_def mres_def) - -lemma in_returnOk: - "((v', s') \ mres (returnOk v s)) = (v' = Inr v \ s' = s)" - by (simp add: returnOk_def return_def mres_def) - -lemma in_bind: - "((r,s') \ mres ((do x \ f; g x od) s)) = - (\s'' x. (x, s'') \ mres (f s) \ (r, s') \ mres (g x s''))" - apply (simp add: bind_def split_def mres_def) - apply (auto split: tmres.splits; force elim: rev_bexI image_eqI[rotated]) - done - -lemma in_bindE_R: - "((Inr r,s') \ mres ((doE x \ f; g x odE) s)) = - (\s'' x. (Inr x, s'') \ mres (f s) \ (Inr r, s') \ mres (g x s''))" - apply (simp add: bindE_def in_bind) - apply (simp add: lift_def split_def) - apply (clarsimp simp: throwError_def return_def lift_def mres_def split: sum.splits) - apply force - done - -lemma in_bindE_L: - "((Inl r, s') \ mres ((doE x \ f; g x odE) s)) \ - (\s'' x. (Inr x, s'') \ mres (f s) \ (Inl r, s') \ mres (g x s'')) \ ((Inl r, s') \ mres (f s))" - apply (simp add: bindE_def in_bind lift_def) - apply safe - apply (simp add: return_def throwError_def lift_def split_def mres_def split: sum.splits if_split_asm) - apply force+ - done - -lemma in_return: - "(r, s') \ mres (return v s) = (r = v \ s' = s)" - by (simp add: return_def mres_def) - -lemma in_liftE: - "((v, s') \ mres (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ mres (f s))" - by (auto simp add: liftE_def in_bind in_return) - -lemma in_whenE: "((v, s') \ mres (whenE P f s)) = ((P \ (v, s') \ mres (f s)) \ - (\P \ v = Inr () \ s' = s))" - by (simp add: whenE_def in_returnOk) - -lemma inl_whenE: - "((Inl x, s') \ mres (whenE P f s)) = (P \ (Inl x, s') \ mres (f s))" - by (auto simp add: in_whenE) - -lemma in_fail: - "r \ mres (fail s) = False" - by (simp add: fail_def mres_def) - -lemma in_assert: - "(r, s') \ mres (assert P s) = (P \ s' = s)" - by (auto simp add: assert_def return_def fail_def mres_def) - -lemma in_assertE: - "(r, s') \ mres (assertE P s) = (P \ r = Inr () \ s' = s)" - by (auto simp add: assertE_def returnOk_def return_def fail_def mres_def) - -lemma in_assert_opt: - "(r, s') \ mres (assert_opt v s) = (v = Some r \ s' = s)" - by (auto simp: assert_opt_def in_fail in_return split: option.splits) - -lemma in_get: - "(r, s') \ mres (get s) = (r = s \ s' = s)" - by (simp add: get_def mres_def) - -lemma in_gets: - "(r, s') \ mres (gets f s) = (r = f s \ s' = s)" - by (simp add: simpler_gets_def mres_def) - -lemma in_put: - "(r, s') \ mres (put x s) = (s' = x \ r = ())" - by (simp add: put_def mres_def) - -lemma in_when: - "(v, s') \ mres (when P f s) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = () \ s' = s))" - by (simp add: when_def in_return) - -lemma in_modify: - "(v, s') \ mres (modify f s) = (s'=f s \ v = ())" - by (auto simp add: modify_def bind_def get_def put_def mres_def) - -lemma gets_the_in_monad: - "((v, s') \ mres (gets_the f s)) = (s' = s \ f s = Some v)" - by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) - -lemma in_alternative: - "(r,s') \ mres ((f \ g) s) = ((r,s') \ mres (f s) \ (r,s') \ mres (g s))" - by (auto simp add: alternative_def mres_def) - -lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L - in_bindE_R in_returnOk in_throwError in_fail - in_assertE in_assert in_return in_assert_opt - in_get in_gets in_put in_when (* unlessE_whenE *) - (* unless_when *) in_modify gets_the_in_monad - in_alternative - -subsection "Non-Failure" - -lemma no_failD: - "\ no_fail P m; P s \ \ Failed \ snd ` m s" - by (simp add: no_fail_def) - -lemma no_fail_modify [wp,simp]: - "no_fail \ (modify f)" - by (simp add: no_fail_def modify_def get_def put_def bind_def) - -lemma no_fail_gets_simp[simp]: - "no_fail P (gets f)" - unfolding no_fail_def gets_def get_def return_def bind_def - by simp - -lemma no_fail_gets: - "no_fail \ (gets f)" - by simp - -lemma snd_pair_image: - "snd ` Pair x ` S = S" - by (simp add: image_def) - -lemma no_fail_select [simp]: - "no_fail \ (select S)" - by (simp add: no_fail_def select_def image_def) - -lemma no_fail_pre: - "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" - by (simp add: no_fail_def) - -lemma no_fail_alt [wp]: - "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" - by (auto simp add: no_fail_def alternative_def) - -lemma no_fail_return [simp, wp]: - "no_fail \ (return x)" - by (simp add: return_def no_fail_def) - -lemma no_fail_get [simp, wp]: - "no_fail \ get" - by (simp add: get_def no_fail_def) - -lemma no_fail_put [simp, wp]: - "no_fail \ (put s)" - by (simp add: put_def no_fail_def) - -lemma no_fail_when [wp]: - "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" - by (simp add: when_def) - -lemma no_fail_unless [wp]: - "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" - by (simp add: unless_def when_def) - -lemma no_fail_fail [simp, wp]: - "no_fail \ fail" - by (simp add: fail_def no_fail_def) - -lemmas [wp] = no_fail_gets - -lemma no_fail_assert [simp, wp]: - "no_fail (\_. P) (assert P)" - by (simp add: assert_def) - -lemma no_fail_assert_opt [simp, wp]: - "no_fail (\_. P \ None) (assert_opt P)" - by (simp add: assert_opt_def split: option.splits) - -lemma no_fail_case_option [wp]: - assumes f: "no_fail P f" - assumes g: "\x. no_fail (Q x) (g x)" - shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" - by (clarsimp simp add: f g) - -lemma no_fail_if [wp]: - "\ P \ no_fail Q f; \P \ no_fail R g \ \ - no_fail (if P then Q else R) (if P then f else g)" - by simp - -lemma no_fail_apply [wp]: - "no_fail P (f (g x)) \ no_fail P (f $ g x)" - by simp - -lemma no_fail_undefined [simp, wp]: - "no_fail \ undefined" - by (simp add: no_fail_def) - -lemma no_fail_returnOK [simp, wp]: - "no_fail \ (returnOk x)" - by (simp add: returnOk_def) - -(* text {* Empty results implies non-failure *} - -lemma empty_fail_modify [simp]: - "empty_fail (modify f)" - by (simp add: empty_fail_def simpler_modify_def) - -lemma empty_fail_gets [simp]: - "empty_fail (gets f)" - by (simp add: empty_fail_def simpler_gets_def) - -lemma empty_failD: - "\ empty_fail m; fst (m s) = {} \ \ snd (m s)" - by (simp add: empty_fail_def) - -lemma empty_fail_select_f [simp]: - assumes ef: "fst S = {} \ snd S" - shows "empty_fail (select_f S)" - by (fastforce simp add: empty_fail_def select_f_def intro: ef) - -lemma empty_fail_bind [simp]: - "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" - apply (simp add: bind_def empty_fail_def split_def) - apply clarsimp - apply (case_tac "fst (a s) = {}") - apply blast - apply (clarsimp simp: ex_in_conv [symmetric]) - done - -lemma empty_fail_return [simp]: - "empty_fail (return x)" - by (simp add: empty_fail_def return_def) - -lemma empty_fail_mapM [simp]: - assumes m: "\x. empty_fail (m x)" - shows "empty_fail (mapM m xs)" -proof (induct xs) - case Nil - thus ?case by (simp add: mapM_def sequence_def) -next - case Cons - have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" - by (simp add: mapM_def sequence_def Let_def) - from Cons - show ?case by (simp add: P m) -qed - -lemma empty_fail [simp]: - "empty_fail fail" - by (simp add: fail_def empty_fail_def) - -lemma empty_fail_assert_opt [simp]: - "empty_fail (assert_opt x)" - by (simp add: assert_opt_def split: option.splits) - -lemma empty_fail_mk_ef: - "empty_fail (mk_ef o m)" - by (simp add: empty_fail_def mk_ef_def) - *) -subsection "Failure" - -lemma fail_wp: "\\x. True\ fail \Q\" - by (simp add: valid_def fail_def mres_def vimage_def) - -lemma failE_wp: "\\x. True\ fail \Q\,\E\" - by (simp add: validE_def fail_wp) - -lemma fail_update [iff]: - "fail (f s) = fail s" - by (simp add: fail_def) - - -text \We can prove postconditions using hoare triples\ - -lemma post_by_hoare: "\ \P\ f \Q\; P s; (r, s') \ mres (f s) \ \ Q r s'" - apply (simp add: valid_def) - apply blast - done - -text \Weakest Precondition Rules\ - -lemma hoare_vcg_prop: - "\\s. P\ f \\rv s. P\" - by (simp add: valid_def) - -lemma return_wp: - "\P x\ return x \P\" - by(simp add:valid_def return_def mres_def) - -(* lemma get_wp: - "\\s. P s s\ get \P\" - by(auto simp add:valid_def split_def get_def mres_def) - *) -lemma gets_wp: - "\\s. P (f s) s\ gets f \P\" - by(simp add:valid_def split_def gets_def return_def get_def bind_def mres_def) - -(* lemma modify_wp: - "\\s. P () (f s)\ modify f \P\" - by(simp add:valid_def split_def modify_def get_def put_def bind_def ) - *) -(* lemma put_wp: - "\\s. P () x\ put x \P\" - by(simp add:valid_def put_def) - *) -lemma returnOk_wp: - "\P x\ returnOk x \P\,\E\" - by(simp add:validE_def2 returnOk_def return_def mres_def) - -lemma throwError_wp: - "\E e\ throwError e \P\,\E\" - by(simp add:validE_def2 throwError_def return_def mres_def) - -lemma returnOKE_R_wp : "\P x\ returnOk x \P\, -" - by (simp add: validE_R_def validE_def valid_def returnOk_def return_def mres_def) - -lemma catch_wp: - "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ - \P\ catch f handler \Q\" - apply (unfold catch_def validE_def) - apply (erule seq_ext) - apply (simp add: return_wp split: sum.splits) - done - -lemma handleE'_wp: - "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ - \P\ f handler \Q\,\E\" - apply (unfold handleE'_def validE_def) - apply (erule seq_ext) - apply (clarsimp split: sum.splits) - apply (simp add: valid_def return_def mres_def) - done - -lemma handleE_wp: - assumes x: "\x. \F x\ handler x \Q\,\E\" - assumes y: "\P\ f \Q\,\F\" - shows "\P\ f handler \Q\,\E\" - by (simp add: handleE_def handleE'_wp [OF x y]) - -lemma hoare_vcg_split_if: - "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" - by simp - -lemma hoare_vcg_split_ifE: - "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" - by simp - -lemma in_image_constant: - "(x \ (\_. v) ` S) = (x = v \ S \ {})" - by (simp add: image_constant_conv) - -lemma hoare_liftM_subst: "\P\ liftM f m \Q\ = \P\ m \Q \ f\" - apply (simp add: liftM_def bind_def2 return_def split_def mres_def) - apply (simp add: valid_def Ball_def mres_def image_Un) - apply (simp add: image_image in_image_constant) - apply (rule_tac f=All in arg_cong) - apply (rule ext) - apply force - done - -lemma liftE_validE[simp]: "\P\ liftE f \Q\,\E\ = \P\ f \Q\" - apply (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) - done - -lemma liftE_wp: - "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" - by simp - -lemma liftM_wp: "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" - by (simp add: hoare_liftM_subst) - -lemma hoare_liftME_subst: "\P\ liftME f m \Q\,\E\ = \P\ m \Q \ f\,\E\" - apply (simp add: validE_def liftME_liftM hoare_liftM_subst o_def) - apply (rule_tac f="valid P m" in arg_cong) - apply (rule ext)+ - apply (case_tac x, simp_all) - done - -lemma liftME_wp: "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" - by (simp add: hoare_liftME_subst) - -(* FIXME: Move *) -lemma o_const_simp[simp]: "(\x. C) \ f = (\x. C)" - by (simp add: o_def) - -lemma hoare_vcg_split_case_option: - "\ \x. x = None \ \P x\ f x \R x\; - \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ - \\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_optionE: - assumes none_case: "\x. x = None \ \P x\ f x \R x\,\E x\" - assumes some_case: "\x y. x = Some y \ \Q x y\ g x y \R x\,\E x\" - shows "\\s. (x = None \ P x s) \ - (\y. x = Some y \ Q x y s)\ - case x of None \ f x - | Some y \ g x y - \R x\,\E x\" - apply(case_tac x, simp_all) - apply(rule none_case, simp) - apply(rule some_case, simp) -done - -lemma hoare_vcg_split_case_sum: - "\ \x a. x = Inl a \ \P x a\ f x a \R x\; - \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ - \\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(simp add:valid_def split_def) - apply(case_tac x, simp_all) -done - -lemma hoare_vcg_split_case_sumE: - assumes left_case: "\x a. x = Inl a \ \P x a\ f x a \R x\" - assumes right_case: "\x b. x = Inr b \ \Q x b\ g x b \R x\" - shows "\\s. (\a. x = Inl a \ P x a s) \ - (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a - | Inr b \ g x b - \R x\" - apply(case_tac x, simp_all) - apply(rule left_case, simp) - apply(rule right_case, simp) -done - -lemma hoare_vcg_precond_imp: - "\ \Q\ f \R\; \s. P s \ Q s \ \ \P\ f \R\" - by (fastforce simp add:valid_def) - -lemma hoare_vcg_precond_impE: - "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" - by (fastforce simp add:validE_def2) - -lemma hoare_seq_ext: - assumes g_valid: "\x. \B x\ g x \C\" - assumes f_valid: "\A\ f \B\" - shows "\A\ do x \ f; g x od \C\" - apply(insert f_valid g_valid) - apply(blast intro: seq_ext') -done - -lemma hoare_vcg_seqE: - assumes g_valid: "\x. \B x\ g x \C\,\E\" - assumes f_valid: "\A\ f \B\,\E\" - shows "\A\ doE x \ f; g x odE \C\,\E\" - apply(insert f_valid g_valid) - apply(blast intro: seqE') -done - -lemma hoare_seq_ext_nobind: - "\ \B\ g \C\; - \A\ f \\r s. B s\ \ \ - \A\ do f; g od \C\" - apply (erule seq_ext) - apply (clarsimp simp: valid_def) - done - -lemma hoare_seq_ext_nobindE: - "\ \B\ g \C\,\E\; - \A\ f \\r s. B s\,\E\ \ \ - \A\ doE f; g odE \C\,\E\" - apply (erule seqE) - apply (clarsimp simp:validE_def) - done - -lemma hoare_chain: - "\ \P\ f \Q\; - \s. R s \ P s; - \r s. Q r s \ S r s \ \ - \R\ f \S\" - by(fastforce simp add:valid_def split_def) - -lemma validE_weaken: - "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \r s. Q' r s \ Q r s; \r s. E' r s \ E r s \ \ \P\ A \Q\,\E\" - by (fastforce simp: validE_def2 split: sum.splits) - -lemmas hoare_chainE = validE_weaken - -lemma hoare_vcg_handle_elseE: - "\ \P\ f \Q\,\E\; - \e. \E e\ g e \R\,\F\; - \x. \Q x\ h x \R\,\F\ \ \ - \P\ f g h \R\,\F\" - apply (simp add: handle_elseE_def validE_def) - apply (rule seq_ext) - apply assumption - apply (simp split: sum.split) - done - -lemma in_mres: - "(tr, Result (rv, s)) \ S \ (rv, s) \ mres S" - by (fastforce simp: mres_def intro: image_eqI[rotated]) - -lemma alternative_wp: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f' \Q\" - shows "\P and P'\ f \ f' \Q\" - unfolding valid_def alternative_def mres_def - using post_by_hoare[OF x _ in_mres] post_by_hoare[OF y _ in_mres] - by fastforce - -lemma alternativeE_wp: - assumes x: "\P\ f \Q\,\E\" and y: "\P'\ f' \Q\,\E\" - shows "\P and P'\ f \ f' \Q\,\E\" - apply (unfold validE_def) - apply (wp add: x y alternative_wp | simp | fold validE_def)+ - done - -lemma alternativeE_R_wp: - "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" - apply (simp add: validE_R_def) - apply (rule alternativeE_wp) - apply assumption+ - done - -lemma alternative_R_wp: - "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" - apply (simp add: validE_E_def) - apply (rule alternativeE_wp) - apply assumption+ - done - -lemma state_select_wp [wp]: "\ \s. \t. (s, t) \ f \ P () t \ state_select f \ P \" - apply (clarsimp simp: state_select_def assert_def) - apply (rule hoare_weaken_pre) - apply (wp select_wp hoare_vcg_split_if return_wp fail_wp) - apply simp - done - -lemma condition_wp [wp]: - "\ \ Q \ A \ P \; \ R \ B \ P \ \ \ \ \s. if C s then Q s else R s \ condition C A B \ P \" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: valid_def pred_conj_def pred_neg_def split_def) - done - -lemma conditionE_wp [wp]: - "\ \ P \ A \ Q \,\ R \; \ P' \ B \ Q \,\ R \ \ \ \ \s. if C s then P s else P' s \ condition C A B \Q\,\R\" - apply (clarsimp simp: condition_def) - apply (clarsimp simp: validE_def valid_def) - done - -lemma state_assert_wp [wp]: "\ \s. f s \ P () s \ state_assert f \ P \" - apply (clarsimp simp: state_assert_def get_def - assert_def bind_def valid_def return_def fail_def mres_def) - done - -text \The weakest precondition handler which works on conjunction\ - -lemma hoare_vcg_conj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (subst pred_conj_def[symmetric], subst pred_conj_def[symmetric], rule hoare_post_conj) - apply (rule hoare_pre_imp [OF _ x], simp) - apply (rule hoare_pre_imp [OF _ y], simp) - done - -lemma hoare_vcg_conj_liftE1: - "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ - \P and P'\ f \\r s. Q r s \ Q' r s\,\E\" - unfolding valid_def validE_R_def validE_def - apply (clarsimp simp: split_def split: sum.splits) - apply (erule allE, erule (1) impE) - apply (erule allE, erule (1) impE) - apply (drule (1) bspec) - apply (drule (1) bspec) - apply clarsimp - done - -lemma hoare_vcg_disj_lift: - assumes x: "\P\ f \Q\" - assumes y: "\P'\ f \Q'\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - apply (simp add: valid_def) - apply safe - apply (erule(1) post_by_hoare [OF x]) - apply (erule notE) - apply (erule(1) post_by_hoare [OF y]) - done - -lemma hoare_vcg_const_Ball_lift: - "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_const_Ball_lift_R: - "\ \x. x \ S \ \P x\ f \Q x\,- \ \ - \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" - apply (simp add: validE_R_def validE_def) - apply (rule hoare_strengthen_post) - apply (erule hoare_vcg_const_Ball_lift) - apply (simp split: sum.splits) - done - -lemma hoare_vcg_all_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_all_lift_R: - "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" - by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) - -lemma hoare_vcg_const_imp_lift: - "\ P \ \Q\ m \R\ \ \ - \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: hoare_vcg_prop) - -lemma hoare_vcg_const_imp_lift_R: - "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_weak_lift_imp: - "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" - by (auto simp add: valid_def split_def) - -lemma hoare_vcg_ex_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (clarsimp simp: valid_def, blast) - -lemma hoare_vcg_ex_lift_R1: - "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" - by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) - -(* for instantiations *) -lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . -lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . -lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . -lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . - -lemma hoare_weaken_preE_E: - "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" - by (fastforce simp add: validE_E_def validE_def valid_def) - -lemma hoare_vcg_E_conj: - "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ - \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" - apply (unfold validE_def validE_E_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma hoare_vcg_E_elim: - "\ \P\ f -,\E\; \P'\ f \Q\,- \ - \ \\s. P s \ P' s\ f \Q\,\E\" - by (rule hoare_post_impErr [OF hoare_vcg_E_conj], - (simp add: validE_R_def)+) - -lemma hoare_vcg_R_conj: - "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ - \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - apply (unfold validE_R_def validE_def) - apply (rule hoare_post_imp [OF _ hoare_vcg_conj_lift], simp_all) - apply (case_tac r, simp_all) - done - -lemma valid_validE: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,\\rv. Q\" - apply (simp add: validE_def) - done - -lemma valid_validE2: - "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\,\\_. E\" - unfolding valid_def validE_def - by (clarsimp split: sum.splits) blast - -lemma validE_valid: "\P\ f \\rv. Q\,\\rv. Q\ \ \P\ f \\rv. Q\" - apply (unfold validE_def) - apply (rule hoare_post_imp) - defer - apply assumption - apply (case_tac r, simp_all) - done - -lemma valid_validE_R: - "\P\ f \\rv. Q\ \ \P\ f \\rv. Q\,-" - by (simp add: validE_R_def hoare_post_impErr [OF valid_validE]) - -lemma valid_validE_E: - "\P\ f \\rv. Q\ \ \P\ f -,\\rv. Q\" - by (simp add: validE_E_def hoare_post_impErr [OF valid_validE]) - -lemma validE_validE_R: "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" - by (simp add: validE_R_def) - -lemma validE_R_validE: "\P\ f \Q\,- \ \P\ f \Q\,\\\\" - by (simp add: validE_R_def) - -lemma hoare_post_imp_R: "\ \P\ f \Q'\,-; \r s. Q' r s \ Q r s \ \ \P\ f \Q\,-" - apply (unfold validE_R_def) - apply (rule hoare_post_impErr, simp+) - done - -lemma hoare_post_comb_imp_conj: - "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" - apply (rule hoare_pre_imp) - defer - apply (rule hoare_vcg_conj_lift) - apply assumption+ - apply simp - done - -lemma hoare_vcg_precond_impE_R: "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" - by (unfold validE_R_def, rule hoare_vcg_precond_impE, simp+) - -(* lemma valid_is_triple: - "valid P f Q = triple_judgement P f (postcondition Q (\s f. fst (f s)))" - by (simp add: triple_judgement_def valid_def postcondition_def) - *) - -lemma validE_is_triple: - "validE P f Q E = triple_judgement P f - (postconditions (\s f. (\(r,s') \ {(rv, s'). (Inr rv, s') \ (mres (f s))}. Q r s')) - (\s f. (\(r,s') \ {(rv, s'). (Inl rv, s') \ (mres (f s))}. E r s')))" - apply (simp add: validE_def triple_judgement_def valid_def postcondition_def - postconditions_def split_def split: sum.split) - apply (fastforce elim: image_eqI[rotated]) - done - -lemma validE_R_is_triple: - "validE_R P f Q = triple_judgement P f - (\s f. (\(r,s') \ {(rv, s'). (Inr rv, s') \ mres (f s)}. Q r s'))" - by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) - -lemma validE_E_is_triple: - "validE_E P f E = triple_judgement P f - (\s f. (\(r,s') \ {(rv, s'). (Inl rv, s') \ mres (f s)}. E r s'))" - by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) - -lemmas hoare_wp_combs = - hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_vcg_conj_lift - -lemmas hoare_wp_combsE = - hoare_vcg_precond_impE - hoare_vcg_precond_impE_R - validE_validE_R - hoare_vcg_R_conj - hoare_vcg_E_elim - hoare_vcg_E_conj - -lemmas hoare_wp_state_combsE = - hoare_vcg_precond_impE[OF valid_validE] - hoare_vcg_precond_impE_R[OF valid_validE_R] - valid_validE_R - hoare_vcg_R_conj[OF valid_validE_R] - hoare_vcg_E_elim[OF valid_validE_E] - hoare_vcg_E_conj[OF valid_validE_E] - -lemmas hoare_wp_splits [wp_split] = - hoare_seq_ext hoare_vcg_seqE handleE'_wp handleE_wp - validE_validE_R [OF hoare_vcg_seqE [OF validE_R_validE]] - validE_validE_R [OF handleE'_wp [OF validE_R_validE]] - validE_validE_R [OF handleE_wp [OF validE_R_validE]] - catch_wp hoare_vcg_split_if hoare_vcg_split_ifE - validE_validE_R [OF hoare_vcg_split_ifE [OF validE_R_validE validE_R_validE]] - liftM_wp liftME_wp - validE_validE_R [OF liftME_wp [OF validE_R_validE]] - validE_valid - -lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs - -lemmas [wp] = hoare_vcg_prop - wp_post_taut - return_wp - put_wp - get_wp - gets_wp - modify_wp - returnOk_wp - throwError_wp - fail_wp - failE_wp - liftE_wp - -lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple - - -text \Simplifications on conjunction\ - -lemma hoare_post_eq: "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" - by simp -lemma hoare_post_eqE1: "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE2: "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" - by simp -lemma hoare_post_eqE_R: "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" - by simp - -lemma pred_conj_apply_elim: "(\r. Q r and Q' r) = (\r s. Q r s \ Q' r s)" - by (simp add: pred_conj_def) -lemma pred_conj_conj_elim: "(\r s. (Q r and Q' r) s \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma conj_assoc_apply: "(\r s. (Q r s \ Q' r s) \ Q'' r s) = (\r s. Q r s \ Q' r s \ Q'' r s)" - by simp -lemma all_elim: "(\rv s. \x. P rv s) = P" - by simp -lemma all_conj_elim: "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" - by simp - -lemmas vcg_rhs_simps = pred_conj_apply_elim pred_conj_conj_elim - conj_assoc_apply all_elim all_conj_elim - -lemma if_apply_reduct: "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" - by (cases P', simp_all) -lemma if_apply_reductE: "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" - by (cases P', simp_all) -lemma if_apply_reductE_R: "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" - by (cases P', simp_all) - -lemmas hoare_wp_simps [wp_split] = - vcg_rhs_simps [THEN hoare_post_eq] vcg_rhs_simps [THEN hoare_post_eqE1] - vcg_rhs_simps [THEN hoare_post_eqE2] vcg_rhs_simps [THEN hoare_post_eqE_R] - if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI - -schematic_goal if_apply_test: "\?Q\ (if A then returnOk else K fail) x \P\,\E\" - by wpsimp - -lemma hoare_elim_pred_conj: - "\P\ f \\r s. Q r s \ Q' r s\ \ \P\ f \\r. Q r and Q' r\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE1: - "\P\ f \\r s. Q r s \ Q' r s\,\E\ \ \P\ f \\r. Q r and Q' r\,\E\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE2: - "\P\ f \Q\, \\x s. E x s \ E' x s\ \ \P\ f \Q\,\\x. E x and E' x\" - by (unfold pred_conj_def) - -lemma hoare_elim_pred_conjE_R: - "\P\ f \\r s. Q r s \ Q' r s\,- \ \P\ f \\r. Q r and Q' r\,-" - by (unfold pred_conj_def) - -lemmas hoare_wp_pred_conj_elims = - hoare_elim_pred_conj hoare_elim_pred_conjE1 - hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R - -lemmas hoare_weaken_preE = hoare_vcg_precond_impE - -lemmas hoare_pre [wp_pre] = - hoare_weaken_pre - hoare_weaken_preE - hoare_vcg_precond_impE_R - hoare_weaken_preE_E - -declare no_fail_pre [wp_pre] - -bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] - -text \Miscellaneous lemmas on hoare triples\ - -lemma hoare_vcg_mp: - assumes a: "\P\ f \Q\" - assumes b: "\P\ f \\r s. Q r s \ Q' r s\" - shows "\P\ f \Q'\" - using assms - by (auto simp: valid_def split_def) - -(* note about this precond stuff: rules get a chance to bind directly - before any of their combined forms. As a result, these precondition - implication rules are only used when needed. *) - -lemma hoare_add_post: - assumes r: "\P'\ f \Q'\" - assumes impP: "\s. P s \ P' s" - assumes impQ: "\P\ f \\rv s. Q' rv s \ Q rv s\" - shows "\P\ f \Q\" - apply (rule hoare_chain) - apply (rule hoare_vcg_conj_lift) - apply (rule r) - apply (rule impQ) - apply simp - apply (erule impP) - apply simp - done - -lemma whenE_wp: - "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" - unfolding whenE_def by clarsimp wp - -lemma hoare_gen_asmE: - "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" - by (simp add: validE_R_def validE_def valid_def) blast - -lemma hoare_list_case: - assumes P1: "\P1\ f f1 \Q\" - assumes P2: "\y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\" - shows "\case xs of [] \ P1 | y#ys \ P2 y ys\ - f (case xs of [] \ f1 | y#ys \ f2 y ys) - \Q\" - apply (cases xs; simp) - apply (rule P1) - apply (rule P2) - apply simp - done - -lemma unless_wp: - "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" - unfolding unless_def by wp auto - -lemma hoare_use_eq: - assumes x: "\P. \\s. P (f s)\ m \\rv s. P (f s)\" - assumes y: "\f. \\s. P f s\ m \\rv s. Q f s\" - shows "\\s. P (f s) s\ m \\rv s. Q (f s :: 'c :: type) s \" - apply (rule_tac Q="\rv s. \f'. f' = f s \ Q f' s" in hoare_post_imp) - apply simp - apply (wpsimp wp: hoare_vcg_ex_lift x y) - done - -lemma hoare_return_sp: - "\P\ return x \\r. P and K (r = x)\" - by (simp add: valid_def return_def mres_def) - -lemma hoare_fail_any [simp]: - "\P\ fail \Q\" by wp - -lemma hoare_failE [simp]: "\P\ fail \Q\,\E\" by wp - -lemma hoare_FalseE [simp]: - "\\s. False\ f \Q\,\E\" - by (simp add: valid_def validE_def) - -lemma hoare_K_bind [wp]: - "\P\ f \Q\ \ \P\ K_bind f x \Q\" - by simp - -text \Setting up the precondition case splitter.\ - -lemma wpc_helper_valid: - "\Q\ g \S\ \ wpc_helper (P, P') (Q, Q') \P\ g \S\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE: - "\Q\ f \R\,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f \R\,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validE_R: - "\Q\ f \R\,- \ wpc_helper (P, P') (Q, Q') \P\ f \R\,-" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_validR_R: - "\Q\ f -,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f -,\E\" - by (clarsimp simp: wpc_helper_def elim!: hoare_pre) - -lemma wpc_helper_no_fail_final: - "no_fail Q f \ wpc_helper (P, P') (Q, Q') (no_fail P f)" - by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) - -lemma wpc_helper_validNF: - "\Q\ g \S\! \ wpc_helper (P, P') (Q, Q') \P\ g \S\!" - apply (clarsimp simp: wpc_helper_def) - by (metis hoare_wp_combs(2) no_fail_pre validNF_def) - -lemma wpc_helper_validI: - "(\Q\,\R\ g \G\,\S\) \ wpc_helper (P, P') (case_prod Q, Q') (\curry P\,\R\ g \G\,\S\)" - by (clarsimp simp: wpc_helper_def elim!: validI_weaken_pre) - -wpc_setup "\m. \P\ m \Q\" wpc_helper_valid -wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE -wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R -wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R -wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final -wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF -wpc_setup "\m. \P\,\R\ m \G\,\S\" wpc_helper_validI - -lemma in_liftM: - "((r, s') \ mres (liftM t f s)) = (\r'. (r', s') \ mres (f s) \ r = t r')" - by (simp add: liftM_def in_return in_bind) - -(* FIXME: eliminate *) -lemmas handy_liftM_lemma = in_liftM - -lemma hoare_fun_app_wp[wp]: - "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" - "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" - "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" - "\P\ f x -,\E\ \ \P\ f $ x -,\E\" - by simp+ - -lemma hoare_validE_pred_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\f\Q and R\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma hoare_validE_conj: - "\ \P\f\Q\,\E\; \P\f\R\,\E\ \ \ \P\ f \\r s. Q r s \ R r s\,\E\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma hoare_valid_validE: - "\P\f\\r. Q\ \ \P\f\\r. Q\,\\r. Q\" - unfolding valid_def validE_def by (simp add: split_def split: sum.splits) - -lemma liftE_validE_E [wp]: - "\\\ liftE f -, \Q\" - by (clarsimp simp: validE_E_def valid_def) - -lemma validE_validE_E [wp_comb]: - "\P\ f \\\\, \E\ \ \P\ f -, \E\" - by (simp add: validE_E_def) - -lemma validE_E_validE: - "\P\ f -, \E\ \ \P\ f \\\\, \E\" - by (simp add: validE_E_def) - -(* - * if_validE_E: - * - * \?P1 \ \?Q1\ ?f1 -, \?E\; \ ?P1 \ \?R1\ ?g1 -, \?E\\ \ \\s. (?P1 \ ?Q1 s) \ (\ ?P1 \ ?R1 s)\ if ?P1 then ?f1 else ?g1 -, \?E\ - *) -lemmas if_validE_E [wp_split] = - validE_validE_E [OF hoare_vcg_split_ifE [OF validE_E_validE validE_E_validE]] - -lemma returnOk_E [wp]: - "\\\ returnOk r -, \Q\" - by (simp add: validE_E_def) wp - -lemma hoare_drop_imp: - "\P\ f \Q\ \ \P\ f \\r s. R r s \ Q r s\" - by (auto simp: valid_def) - -lemma hoare_drop_impE: - "\\P\ f \\r. Q\, \E\\ \ \P\ f \\r s. R r s \ Q s\, \E\" - by (simp add: validE_weaken) - -lemma hoare_drop_impE_R: - "\P\ f \Q\,- \ \P\ f \\r s. R r s \ Q r s\, -" - by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_drop_impE_E: - "\P\ f -,\Q\ \ \P\ f -,\\r s. R r s \ Q r s\" - by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) - -lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E -lemma mres_union: - "mres (a \ b) = mres a \ mres b" - by (simp add: mres_def image_Un) - -lemma mres_Failed_empty: - "mres ((\xs. (xs, Failed)) ` X ) = {}" - "mres ((\xs. (xs, Incomplete)) ` X ) = {}" - by (auto simp add: mres_def image_def) - -lemma det_set_option_eq: - "(\a\m. set_option (snd a)) = {(r, s')} \ - (ts, Some (rr, ss)) \ m \ rr = r \ ss = s'" - by (metis UN_I option.set_intros prod.inject singleton_iff snd_conv) - -lemma det_set_option_eq': - "(\a\m. set_option (snd a)) = {(r, s')} \ - Some (r, s') \ snd ` m" - using image_iff by fastforce - -lemma bind_det_exec: - "mres (a s) = {(r,s')} \ mres ((a >>= b) s) = mres (b r s')" - by (simp add: in_bind set_eq_iff) - -lemma in_bind_det_exec: - "mres (a s) = {(r,s')} \ (s'' \ mres ((a >>= b) s)) = (s'' \ mres (b r s'))" - by (cases s'', simp add: in_bind) - -lemma exec_put: - "(put s' >>= m) s = m () s'" - by (auto simp add: bind_def put_def mres_def split_def) - -lemma bind_execI: - "\ (r'',s'') \ mres (f s); \x \ mres (g r'' s''). P x \ \ - \x \ mres ((f >>= g) s). P x" - by (fastforce simp add: Bex_def in_bind) - -lemma True_E_E [wp]: "\\\ f -,\\\\" - by (auto simp: validE_E_def validE_def valid_def split: sum.splits) - -(* - * \\x. \?B1 x\ ?g1 x -, \?E\; \?P\ ?f1 \?B1\, \?E\\ \ \?P\ ?f1 >>=E ?g1 -, \?E\ - *) -lemmas [wp_split] = - validE_validE_E [OF hoare_vcg_seqE [OF validE_E_validE]] - -lemma case_option_wp: - assumes x: "\x. \P x\ m x \Q\" - assumes y: "\P'\ m' \Q\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma case_option_wpE: - assumes x: "\x. \P x\ m x \Q\,\E\" - assumes y: "\P'\ m' \Q\,\E\" - shows "\\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ - case_option m' m x \Q\,\E\" - apply (cases x; simp) - apply (rule y) - apply (rule x) - done - -lemma in_bindE: - "(rv, s') \ mres ((f >>=E (\rv'. g rv')) s) = - ((\ex. rv = Inl ex \ (Inl ex, s') \ mres (f s)) \ - (\rv' s''. (rv, s') \ mres (g rv' s'') \ (Inr rv', s'') \ mres (f s)))" - apply (clarsimp simp: bindE_def in_bind lift_def in_throwError) - apply (safe del: disjCI; strengthen subst[where P="\x. x \ mres (f s)", mk_strg I _ E]; - auto simp: in_throwError split: sum.splits) - done - -(* - * \?P\ ?m1 -, \?E\ \ \?P\ liftME ?f1 ?m1 -, \?E\ - *) -lemmas [wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] - -lemma assert_A_True[simp]: "assert True = return ()" - by (simp add: assert_def) - -lemma assert_wp [wp]: "\\s. P \ Q () s\ assert P \Q\" - by (cases P, (simp add: assert_def | wp)+) - -lemma list_cases_wp: - assumes a: "\P_A\ a \Q\" - assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" - shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" - by (cases ts, auto simp: a b) - -(* FIXME: make wp *) -lemma whenE_throwError_wp: - "\\s. \Q \ P s\ whenE Q (throwError e) \\rv. P\, -" - unfolding whenE_def by wp blast - -lemma select_throwError_wp: - "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" - by (clarsimp simp add: bind_def throwError_def return_def select_def validE_E_def - validE_def valid_def mres_def) - - -section "validNF Rules" - -subsection "Basic validNF theorems" - -lemma validNF [intro?]: - "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" - by (clarsimp simp: validNF_def) - -lemma validNF_valid: "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" - by (clarsimp simp: validNF_def) - -lemma validNF_no_fail: "\ \ P \ f \ Q \! \ \ no_fail P f" - by (clarsimp simp: validNF_def) - -lemma snd_validNF: - "\ \ P \ f \ Q \!; P s \ \ Failed \ snd ` (f s)" - by (clarsimp simp: validNF_def no_fail_def) - -lemma use_validNF: - "\ (r', s') \ mres (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" - by (fastforce simp: validNF_def valid_def) - -subsection "validNF weakest pre-condition rules" - -lemma validNF_return [wp]: - "\ P x \ return x \ P \!" - by (wp validNF)+ - -lemma validNF_get [wp]: - "\ \s. P s s \ get \ P \!" - by (wp validNF)+ - -lemma validNF_put [wp]: - "\ \s. P () x \ put x \ P \!" - by (wp validNF)+ - -lemma validNF_K_bind [wp]: - "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" - by simp - -lemma validNF_fail [wp]: - "\ \s. False \ fail \ Q \!" - by (clarsimp simp: validNF_def fail_def no_fail_def) - -lemma validNF_prop [wp_unsafe]: - "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" - by (wp validNF)+ - -lemma validNF_post_conj [intro!]: - "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q and R \!" - by (clarsimp simp: validNF_def) - -lemma no_fail_or: - "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" - by (clarsimp simp: no_fail_def) - -lemma validNF_pre_disj [intro!]: - "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" - by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) - -(* - * Set up combination rules for WP, which also requires - * a "wp_trip" rule for validNF. - *) - -definition "validNF_property Q s b \ Failed \ snd ` (b s) \ (\(r', s') \ mres (b s). Q r' s')" - -lemma validNF_is_triple [wp_trip]: - "validNF P f Q = triple_judgement P f (validNF_property Q)" - apply (clarsimp simp: validNF_def triple_judgement_def validNF_property_def) - apply (auto simp: no_fail_def valid_def) - done - -lemma validNF_weaken_pre [wp_comb]: - "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" - by (metis hoare_pre_imp no_fail_pre validNF_def) - -lemma validNF_post_comb_imp_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" - by (fastforce simp: validNF_def valid_def) - -lemma validNF_post_comb_conj_L: - "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj_R: - "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_post_comb_conj: - "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def valid_def no_fail_def) - apply force - done - -lemma validNF_split_if [wp_split]: - "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" - by simp - -lemma validNF_vcg_conj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (subst pred_conj_def[symmetric], subst pred_conj_def[symmetric], rule validNF_post_conj) - apply (erule validNF_weaken_pre, fastforce) - apply (erule validNF_weaken_pre, fastforce) - done - -lemma validNF_vcg_disj_lift: - "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ - \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" - apply (clarsimp simp: validNF_def) - apply safe - apply (auto intro!: hoare_vcg_disj_lift)[1] - apply (clarsimp simp: no_fail_def) - done - -lemma validNF_vcg_all_lift [wp]: - "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" - apply atomize - apply (rule validNF) - apply (clarsimp simp: validNF_def) - apply (rule hoare_vcg_all_lift) - apply force - apply (clarsimp simp: no_fail_def validNF_def) - done - -lemma no_fail_bind[wp_split]: - "\ no_fail P f; \x. no_fail (R x) (g x); \Q\ f \R\ \ - \ no_fail (P and Q) (do x \ f; g x od)" - apply (simp add: no_fail_def bind_def2 image_Un image_image - in_image_constant) - apply (intro allI conjI impI) - apply (fastforce simp: image_def) - apply clarsimp - apply (drule(1) post_by_hoare, erule in_mres) - apply (fastforce simp: image_def) - done - -lemma validNF_bind [wp_split]: - "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ - \A\ do x \ f; g x od \C\!" - apply (rule validNF) - apply (metis validNF_valid hoare_seq_ext) - apply (frule no_fail_bind[OF validNF_no_fail, where g=g]) - apply (rule validNF_no_fail, assumption) - apply (erule validNF_valid) - apply (simp add: no_fail_def) - done - -lemmas validNF_seq_ext = validNF_bind - -subsection "validNF compound rules" -lemma validNF_state_assert [wp]: - "\ \s. P () s \ G s \ state_assert G \ P \!" - apply (rule validNF) - apply wpsimp - apply (clarsimp simp: no_fail_def state_assert_def - bind_def2 assert_def return_def get_def) - done - -lemma validNF_modify [wp]: - "\ \s. P () (f s) \ modify f \ P \!" - apply (clarsimp simp: modify_def) - apply wp - done - -lemma validNF_gets [wp]: - "\\s. P (f s) s\ gets f \P\!" - apply (clarsimp simp: gets_def) - apply wp - done - -lemma validNF_condition [wp]: - "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" - apply rule - apply (drule validNF_valid)+ - apply (erule (1) condition_wp) - apply (drule validNF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -lemma validNF_alt_def: - "validNF P m Q = (\s. P s \ ((\(r', s') \ mres (m s). Q r' s') \ Failed \ snd ` (m s)))" - by (auto simp: validNF_def valid_def no_fail_def mres_def image_def) - -lemma validNF_assert [wp]: - "\ (\s. P) and (R ()) \ assert P \ R \!" - apply (rule validNF) - apply (clarsimp simp: valid_def in_return) - apply (clarsimp simp: no_fail_def return_def) - done - -lemma validNF_false_pre: - "\ \_. False \ P \ Q \!" - by (clarsimp simp: validNF_def no_fail_def) - -lemma validNF_chain: - "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" - by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) - -lemma validNF_case_prod [wp]: - "\ \x y. validNF (P x y) (B x y) Q \ \ validNF (case_prod P v) (case_prod (\x y. B x y) v) Q" - by (metis prod.exhaust split_conv) - -lemma validE_NF_case_prod [wp]: - "\ \a b. \P a b\ f a b \Q\, \E\! \ \ - \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_case_prod) - done - -lemma no_fail_is_validNF_True: "no_fail P s = (\ P \ s \ \_ _. True \!)" - by (clarsimp simp: no_fail_def validNF_def valid_def) - -subsection "validNF reasoning in the exception monad" - -lemma validE_NF [intro?]: - "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_valid: - "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_no_fail: - "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" - apply (clarsimp simp: validE_NF_def) - done - -lemma validE_NF_weaken_pre [wp_comb]: - "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" - apply (clarsimp simp: validE_NF_alt_def) - apply (erule validNF_weaken_pre) - apply simp - done - -lemma validE_NF_post_comb_conj_L: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ \_ _. True \ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj_R: - "\ \P\ f \Q\, \ \_ _. True \; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_post_comb_conj: - "\ \P\ f \Q\, \ E \!; \P'\ f \Q'\, \ E \! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\, \ E \!" - apply (clarsimp simp: validE_NF_alt_def validE_def validNF_def - valid_def no_fail_def split: sum.splits) - apply force - done - -lemma validE_NF_chain: - "\\P'\ a \R'\,\E'\!; - \s. P s \ P' s; - \r' s'. R' r' s' \ R r' s'; - \r'' s''. E' r'' s'' \ E r'' s''\ \ - \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" - by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) - -lemma validE_NF_bind_wp [wp]: - "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" - apply (unfold validE_NF_alt_def bindE_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wpsimp - done - -lemma validNF_catch [wp]: - "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" - apply (unfold validE_NF_alt_def catch_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp simp: lift_def throwError_def split: sum.splits) - apply wp - done - -lemma validNF_throwError [wp]: - "\E e\ throwError e \P\, \E\!" - by (unfold validE_NF_alt_def throwError_def o_def) wpsimp - -lemma validNF_returnOk [wp]: - "\P e\ returnOk e \P\, \E\!" - by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp - -lemma validNF_whenE [wp]: - "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" - unfolding whenE_def by clarsimp wp - -lemma validNF_nobindE [wp]: - "\ \B\ g \C\,\E\!; - \A\ f \\r s. B s\,\E\! \ \ - \A\ doE f; g odE \C\,\E\!" - by clarsimp wp - -(* - * Setup triple rules for validE_NF so that we can use the - * "wp_comb" attribute. - *) - -definition "validE_NF_property Q E s b \ Failed \ snd ` (b s) - \ (\(r', s') \ mres (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" - -lemma validE_NF_is_triple [wp_trip]: - "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" - apply (clarsimp simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def - validE_NF_property_def split: sum.splits) - apply blast - done - -lemmas [wp_comb] = validE_NF_weaken_pre - -lemma validNF_cong: - "\ \s. P s = P' s; \s. P s \ m s = m' s; - \r' s' s. \ P s; (r', s') \ mres (m s) \ \ Q r' s' = Q' r' s' \ \ - (\ P \ m \ Q \!) = (\ P' \ m' \ Q' \!)" - by (fastforce simp: validNF_alt_def) - -lemma validE_NF_liftE [wp]: - "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" - by (wpsimp simp: validE_NF_alt_def liftE_def) - -lemma validE_NF_handleE' [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f (\x. handler x) \Q\,\E\!" - apply (unfold validE_NF_alt_def handleE'_def) - apply (rule validNF_bind [rotated]) - apply assumption - apply (clarsimp split: sum.splits) - apply wpsimp - done - -lemma validE_NF_handleE [wp]: - "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ - \P\ f handler \Q\,\E\!" - apply (unfold handleE_def) - apply (metis validE_NF_handleE') - done - -lemma validE_NF_condition [wp]: - "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \!\ - \ \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" - apply rule - apply (drule validE_NF_valid)+ - apply wp - apply (drule validE_NF_no_fail)+ - apply (clarsimp simp: no_fail_def condition_def) - done - -lemma validI_name_pre: - "prefix_closed f \ - (\s0 s. P s0 s \ \\s0' s'. s0' = s0 \ s' = s\,\R\ f \G\,\Q\) - \ \P\,\R\ f \G\,\Q\" - unfolding validI_def - by metis - -lemma validI_well_behaved': - "prefix_closed f - \ \P\,\R'\ f \G'\,\Q\ - \ R \ R' - \ G' \ G - \ \P\,\R\ f \G\,\Q\" - apply (subst validI_def, clarsimp) - apply (clarsimp simp add: rely_def) - apply (drule (2) validI_D) - apply (fastforce simp: rely_cond_def guar_cond_def)+ - done - -lemmas validI_well_behaved = validI_well_behaved'[unfolded le_fun_def, simplified] - -text \Strengthen setup.\ - -context strengthen_implementation begin - -lemma strengthen_hoare [strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\) (\P\ f \R\)" - by (cases F, auto elim: hoare_strengthen_post) - -lemma strengthen_validE_R_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" - by (cases F, auto intro: hoare_post_imp_R) - -lemma strengthen_validE_cong[strg]: - "(\r s. st F (\) (Q r s) (R r s)) - \ (\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" - by (cases F, auto elim: hoare_post_impErr) - -lemma strengthen_validE_E_cong[strg]: - "(\r s. st F (\) (S r s) (T r s)) - \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" - by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) - -lemma strengthen_validI[strg]: - "(\r s0 s. st F (\) (Q r s0 s) (Q' r s0 s)) - \ st F (\) (\P\,\G\ f \R\,\Q\) (\P\,\G\ f \R\,\Q'\)" - by (cases F, auto elim: validI_strengthen_post) - -end - -end diff --git a/lib/Monads/Det.thy b/lib/Monads/nondet/Nondet_Det.thy similarity index 91% rename from lib/Monads/Det.thy rename to lib/Monads/nondet/Nondet_Det.thy index df42523171..03e4118099 100644 --- a/lib/Monads/Det.thy +++ b/lib/Monads/nondet/Nondet_Det.thy @@ -5,9 +5,9 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory Det +theory Nondet_Det imports - NonDetMonad + Nondet_Monad begin subsection "Determinism" @@ -51,13 +51,13 @@ lemma det_UN: lemma bind_detI[simp, intro!]: "\ det f; \x. det (g x) \ \ det (f >>= g)" unfolding bind_def det_def + apply (erule all_reg[rotated]) apply clarsimp - apply (erule_tac x=s in allE) - apply clarsimp - apply (erule_tac x="a" in allE) - apply (erule_tac x="b" in allE) - apply clarsimp - done + by (metis fst_conv snd_conv) + +lemma det_modify[iff]: + "det (modify f)" + by (simp add: modify_def) lemma the_run_stateI: "fst (M s) = {s'} \ the_run_state M s = s'" diff --git a/lib/Monads/Empty_Fail.thy b/lib/Monads/nondet/Nondet_Empty_Fail.thy similarity index 98% rename from lib/Monads/Empty_Fail.thy rename to lib/Monads/nondet/Nondet_Empty_Fail.thy index 7b75a3680d..889f170c80 100644 --- a/lib/Monads/Empty_Fail.thy +++ b/lib/Monads/nondet/Nondet_Empty_Fail.thy @@ -5,16 +5,16 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory Empty_Fail +theory Nondet_Empty_Fail imports - NonDetMonad + Nondet_Monad WPSimp begin section \Monads that are wellformed w.r.t. failure\ text \ - Usually, well-formed monads constructed from the primitives in NonDetMonad will have the following + Usually, well-formed monads constructed from the primitives in Nondet_Monad will have the following property: if they return an empty set of results, they will have the failure flag set.\ definition empty_fail :: "('s,'a) nondet_monad \ bool" where "empty_fail m \ \s. fst (m s) = {} \ snd (m s)" @@ -27,7 +27,7 @@ definition mk_ef :: "'a set \ bool \ 'a set \ bool" wh subsection \WPC setup\ lemma wpc_helper_empty_fail_final: - "empty_fail f \ wpc_helper (P, P') (Q, Q') (empty_fail f)" + "empty_fail f \ wpc_helper (P, P', P'') (Q, Q', Q'') (empty_fail f)" by (clarsimp simp: wpc_helper_def) wpc_setup "\m. empty_fail m" wpc_helper_empty_fail_final @@ -63,7 +63,7 @@ subsection \Wellformed monads\ (* Collect generic empty_fail lemmas here: - - naming convention is emtpy_fail_NAME. + - naming convention is empty_fail_NAME. - add lemmas with assumptions to [empty_fail_cond] set - add lemmas without assumption to [empty_fail_term] set *) diff --git a/lib/Monads/In_Monad.thy b/lib/Monads/nondet/Nondet_In_Monad.thy similarity index 86% rename from lib/Monads/In_Monad.thy rename to lib/Monads/nondet/Nondet_In_Monad.thy index 4c8b923d12..720b1ac96d 100644 --- a/lib/Monads/In_Monad.thy +++ b/lib/Monads/nondet/Nondet_In_Monad.thy @@ -5,8 +5,8 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory In_Monad - imports NonDetMonadLemmas +theory Nondet_In_Monad + imports Nondet_Lemmas begin section \Reasoning directly about states\ @@ -38,9 +38,13 @@ lemma in_bindE_L: by (simp add: bindE_def bind_def) (force simp: return_def throwError_def lift_def split_def split: sum.splits if_split_asm) +lemma in_return: + "(r, s') \ fst (return v s) = (r = v \ s' = s)" + by (simp add: return_def) + lemma in_liftE: "((v, s') \ fst (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ fst (f s))" - by (force simp add: liftE_def bind_def return_def split_def) + by (force simp: liftE_def in_bind in_return) lemma in_whenE: "((v, s') \ fst (whenE P f s)) = ((P \ (v, s') \ fst (f s)) \ (\P \ v = Inr () \ s' = s))" @@ -58,10 +62,6 @@ lemma in_fail: "r \ fst (fail s) = False" by (simp add: fail_def) -lemma in_return: - "(r, s') \ fst (return v s) = (r = v \ s' = s)" - by (simp add: return_def) - lemma in_assert: "(r, s') \ fst (assert P s) = (P \ s' = s)" by (simp add: assert_def return_def fail_def) @@ -90,6 +90,18 @@ lemma in_when: "(v, s') \ fst (when P f s) = ((P \ (v, s') \ fst (f s)) \ (\P \ v = () \ s' = s))" by (simp add: when_def in_return) +lemma in_unless: + "(v, s') \ fst (unless P f s) = ((\ P \ (v, s') \ fst (f s)) \ (P \ v = () \ s' = s))" + by (simp add: unless_def in_when) + +lemma in_unlessE: + "(v, s') \ fst (unlessE P f s) = ((\ P \ (v, s') \ fst (f s)) \ (P \ v = Inr () \ s' = s))" + by (simp add: unlessE_def in_returnOk) + +lemma inl_unlessE: + "((Inl x, s') \ fst (unlessE P f s)) = (\ P \ (Inl x, s') \ fst (f s))" + by (auto simp add: in_unlessE) + lemma in_modify: "(v, s') \ fst (modify f s) = (s'=f s \ v = ())" by (simp add: modify_def bind_def get_def put_def) @@ -106,20 +118,17 @@ lemma in_liftM: "((r, s') \ fst (liftM t f s)) = (\r'. (r', s') \ fst (f s) \ r = t r')" by (simp add: liftM_def return_def bind_def Bex_def) -lemmas handy_liftM_lemma = in_liftM (* FIXME lib: eliminate *) - lemma in_bindE: "(rv, s') \ fst ((f >>=E (\rv'. g rv')) s) = ((\ex. rv = Inl ex \ (Inl ex, s') \ fst (f s)) \ (\rv' s''. (rv, s') \ fst (g rv' s'') \ (Inr rv', s'') \ fst (f s)))" by (force simp: bindE_def bind_def lift_def throwError_def return_def split: sum.splits) -(* FIXME lib: remove unlessE_whenE + unless_when here and replace with in_unless lemmas *) lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L in_bindE_R in_returnOk in_throwError in_fail in_assertE in_assert in_return in_assert_opt - in_get in_gets in_put in_when unlessE_whenE - unless_when in_modify gets_the_in_monad + in_get in_gets in_put in_when inl_unlessE in_unlessE + in_unless in_modify gets_the_in_monad in_alternative in_liftM lemma bind_det_exec: diff --git a/lib/Monads/NonDetMonadLemmas.thy b/lib/Monads/nondet/Nondet_Lemmas.thy similarity index 96% rename from lib/Monads/NonDetMonadLemmas.thy rename to lib/Monads/nondet/Nondet_Lemmas.thy index 530dd79f07..1ab44cdf17 100644 --- a/lib/Monads/NonDetMonadLemmas.thy +++ b/lib/Monads/nondet/Nondet_Lemmas.thy @@ -5,8 +5,8 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory NonDetMonadLemmas -imports NonDetMonad +theory Nondet_Lemmas + imports Nondet_Monad begin section \General Lemmas Regarding the Nondeterministic State Monad\ @@ -15,12 +15,12 @@ subsection \Congruence Rules for the Function Package\ lemma bind_cong[fundef_cong]: "\ f = f'; \v s s'. (v, s') \ fst (f' s) \ g v s' = g' v s' \ \ f >>= g = f' >>= g'" - by (auto simp: bind_def Let_def split_def intro: rev_image_eqI) + by (auto simp: bind_def split_def) lemma bind_apply_cong [fundef_cong]: "\ f s = f' s'; \rv st. (rv, st) \ fst (f' s') \ g rv st = g' rv st \ \ (f >>= g) s = (f' >>= g') s'" - by (auto simp: bind_def split_def intro: SUP_cong [OF refl] intro: rev_image_eqI) + by (auto simp: bind_def split_def) lemma bindE_cong[fundef_cong]: "\ M = M' ; \v s s'. (Inr v, s') \ fst (M' s) \ N v s' = N' v s' \ \ bindE M N = bindE M' N'" @@ -192,8 +192,8 @@ lemma liftE_liftM: lemma liftME_liftM: "liftME f = liftM (case_sum Inl (Inr \ f))" unfolding liftME_def liftM_def bindE_def returnOk_def lift_def - apply (rule ext, rename_tac x) - apply (rule_tac f="bind x" in arg_cong) + apply (rule ext) + apply (rule arg_cong[where f="bind m" for m]) apply (fastforce simp: throwError_def split: sum.splits) done @@ -277,7 +277,8 @@ lemma monad_state_eqI [intro]: subsection \General @{const whileLoop} reasoning\ definition whileLoop_terminatesE :: - "('a \ 's \ bool) \ ('a \ ('s, 'e + 'a) nondet_monad) \ 'a \ 's \ bool" where + "('a \ 's \ bool) \ ('a \ ('s, 'e + 'a) nondet_monad) \ 'a \ 's \ bool" + where "whileLoop_terminatesE C B \ \r. whileLoop_terminates (\r s. case r of Inr v \ C v s | _ \ False) (lift B) (Inr r)" @@ -340,10 +341,10 @@ lemma whileLoop_unroll': lemma whileLoopE_unroll: "whileLoopE C B r = condition (C r) (B r >>=E whileLoopE C B) (returnOk r)" unfolding whileLoopE_def - apply (rule ext, rename_tac x) + apply (rule ext) apply (subst whileLoop_unroll) apply (clarsimp simp: bindE_def returnOk_def lift_def split: condition_splits) - apply (rule_tac f="\a. (B r >>= a) x" in arg_cong) + apply (rule arg_cong[where f="\a. (B r >>= a) x" for x]) apply (rule ext)+ apply (clarsimp simp: lift_def split: sum.splits) apply (subst whileLoop_unroll) diff --git a/lib/Monads/NonDetMonad.thy b/lib/Monads/nondet/Nondet_Monad.thy similarity index 87% rename from lib/Monads/NonDetMonad.thy rename to lib/Monads/nondet/Nondet_Monad.thy index cfa3747679..e14cf17fd2 100644 --- a/lib/Monads/NonDetMonad.thy +++ b/lib/Monads/nondet/Nondet_Monad.thy @@ -11,7 +11,7 @@ chapter "Nondeterministic State Monad with Failure" -theory NonDetMonad +theory Nondet_Monad imports Fun_Pred_Syntax Monad_Lib @@ -71,16 +71,15 @@ text \ operation may have failed, if @{text f} may have failed or @{text g} may have failed on any of the results of @{text f}.\ definition bind :: - "('s, 'a) nondet_monad \ ('a \ ('s, 'b) nondet_monad) \ ('s, 'b) nondet_monad" - (infixl ">>=" 60) where + "('s, 'a) nondet_monad \ ('a \ ('s, 'b) nondet_monad) \ ('s, 'b) nondet_monad" (infixl ">>=" 60) + where "bind f g \ \s. (\(fst ` case_prod g ` fst (f s)), True \ snd ` case_prod g ` fst (f s) \ snd (f s))" -text \ - Sometimes it is convenient to write @{text bind} in reverse order.\ +text \Sometimes it is convenient to write @{text bind} in reverse order.\ abbreviation (input) bind_rev :: - "('c \ ('a, 'b) nondet_monad) \ ('a, 'c) nondet_monad \ ('a, 'b) nondet_monad" - (infixl "=<<" 60) where + "('c \ ('a, 'b) nondet_monad) \ ('a, 'c) nondet_monad \ ('a, 'b) nondet_monad" (infixl "=<<" 60) + where "g =<< f \ f >>= g" text \ @@ -107,36 +106,40 @@ definition select :: "'a set \ ('s,'a) nondet_monad" where "select A \ \s. (A \ {s}, False)" definition alternative :: - "('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" (infixl "\" 20) where + "('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" (infixl "\" 20) + where "f \ g \ \s. (fst (f s) \ fst (g s), snd (f s) \ snd (g s))" -text \A variant of @{text select} that takes a pair. The first component - is a set as in normal @{text select}, the second component indicates - whether the execution failed. This is useful to lift monads between - different state spaces.\ +text \ + A variant of @{text select} that takes a pair. The first component is a set + as in normal @{text select}, the second component indicates whether the + execution failed. This is useful to lift monads between different state + spaces.\ definition select_f :: "'a set \ bool \ ('s,'a) nondet_monad" where "select_f S \ \s. (fst S \ {s}, snd S)" -text \@{text select_state} takes a relationship between - states, and outputs nondeterministically a state - related to the input state.\ +text \ + @{text state_select} takes a relationship between states, and outputs + nondeterministically a state related to the input state. Fails if no such + state exists.\ definition state_select :: "('s \ 's) set \ ('s, unit) nondet_monad" where "state_select r \ \s. ((\x. ((), x)) ` {s'. (s, s') \ r}, \ (\s'. (s, s') \ r))" + subsection "Failure" text \ The monad function that always fails. Returns an empty set of results and sets the failure flag.\ definition fail :: "('s, 'a) nondet_monad" where - "fail \ \s. ({}, True)" + "fail \ \s. ({}, True)" text \Assertions: fail if the property @{text P} is not true\ definition assert :: "bool \ ('a, unit) nondet_monad" where - "assert P \ if P then return () else fail" + "assert P \ if P then return () else fail" text \Fail if the value is @{const None}, return result @{text v} for @{term "Some v"}\ definition assert_opt :: "'a option \ ('b, 'a) nondet_monad" where - "assert_opt v \ case v of None \ fail | Some v \ return v" + "assert_opt v \ case v of None \ fail | Some v \ return v" text \An assertion that also can introspect the current state.\ definition state_assert :: "('s \ bool) \ ('s, unit) nondet_monad" where @@ -146,11 +149,11 @@ subsection "Generic functions on top of the state monad" text \Apply a function to the current state and return the result without changing the state.\ definition gets :: "('s \ 'a) \ ('s, 'a) nondet_monad" where - "gets f \ get >>= (\s. return (f s))" + "gets f \ get >>= (\s. return (f s))" text \Modify the current state using the function passed in.\ definition modify :: "('s \ 's) \ ('s, unit) nondet_monad" where - "modify f \ get >>= (\s. put (f s))" + "modify f \ get >>= (\s. put (f s))" lemma simpler_gets_def: "gets f = (\s. ({(f s, s)}, False))" @@ -172,7 +175,8 @@ text \ Perform a test on the current state, performing the left monad if the result is true or the right monad if the result is false. \ definition condition :: - "('s \ bool) \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad" where + "('s \ bool) \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad \ ('s, 'r) nondet_monad" + where "condition P L R \ \s. if (P s) then (L s) else (R s)" notation (output) @@ -184,18 +188,16 @@ text \ definition gets_the :: "('s \ 'a option) \ ('s, 'a) nondet_monad" where "gets_the f \ gets f >>= assert_opt" - text \ Get a map (such as a heap) from the current state and apply an argument to the map. Fail if the map returns @{const None}, otherwise return the value.\ -definition - gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) nondet_monad" where +definition gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) nondet_monad" where "gets_map f p \ gets f >>= (\m. assert_opt (m p))" subsection \The Monad Laws\ -text \A more expanded definition of @{text bind}\ +text \An alternative definition of @{term bind}, sometimes more convenient.\ lemma bind_def': "(f >>= g) \ \s. ({(r'', s''). \(r', s') \ fst (f s). (r'', s'') \ fst (g r' s') }, @@ -211,7 +213,8 @@ lemma return_bind[simp]: by (simp add: return_def bind_def) text \@{term return} is absorbed on the right of a @{term bind}\ -lemma bind_return[simp]: "(m >>= return) = m" +lemma bind_return[simp]: + "(m >>= return) = m" by (simp add: bind_def return_def split_def) text \@{term bind} is associative\ @@ -263,7 +266,6 @@ definition bindE :: (infixl ">>=E" 60) where "f >>=E g \ f >>= lift g" - text \ Lifting a normal nondeterministic monad into the exception monad is achieved by always returning its @@ -271,7 +273,6 @@ text \ definition liftE :: "('s,'a) nondet_monad \ ('s, 'e+'a) nondet_monad" where "liftE f \ f >>= (\r. return (Inr r))" - text \ Since the underlying type and @{text return} function changed, we need new definitions for when and unless:\ @@ -281,13 +282,11 @@ definition whenE :: "bool \ ('s, 'e + unit) nondet_monad \ ('s, 'e + unit) nondet_monad \ ('s, 'e + unit) nondet_monad" where "unlessE P f \ if P then returnOk () else f" - text \ Throwing an exception when the parameter is @{term None}, otherwise returning @{term "v"} for @{term "Some v"}.\ definition throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) nondet_monad" where - "throw_opt ex x \ case x of None \ throwError ex | Some v \ returnOk v" - + "throw_opt ex x \ case x of None \ throwError ex | Some v \ returnOk v" text \ Failure in the exception monad is redefined in the same way @@ -296,6 +295,7 @@ text \ definition assertE :: "bool \ ('a, 'e + unit) nondet_monad" where "assertE P \ if P then returnOk () else fail" + subsection "Monad Laws for the Exception Monad" text \More direct definition of @{const liftE}:\ @@ -414,9 +414,7 @@ lemma "doE x \ returnOk 1; by simp - -section "Library of Monadic Functions and Combinators" - +section "Library of additional Monadic Functions and Combinators" text \Lifting a normal function into the monad type:\ definition liftM :: "('a \ 'b) \ ('s,'a) nondet_monad \ ('s, 'b) nondet_monad" where @@ -426,12 +424,11 @@ text \The same for the exception monad:\ definition liftME :: "('a \ 'b) \ ('s,'e+'a) nondet_monad \ ('s,'e+'b) nondet_monad" where "liftME f m \ doE x \ m; returnOk (f x) odE" -text \ Execute @{term f} for @{term "Some x"}, otherwise do nothing. \ +text \Execute @{term f} for @{term "Some x"}, otherwise do nothing.\ definition maybeM :: "('a \ ('s, unit) nondet_monad) \ 'a option \ ('s, unit) nondet_monad" where "maybeM f y \ case y of Some x \ f x | None \ return ()" -text \ - Run a sequence of monads from left to right, ignoring return values.\ +text \Run a sequence of monads from left to right, ignoring return values.\ definition sequence_x :: "('s, 'a) nondet_monad list \ ('s, unit) nondet_monad" where "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" @@ -446,10 +443,10 @@ text \ going through both lists simultaneously, left to right, ignoring return values.\ definition zipWithM_x :: - "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, unit) nondet_monad" where + "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, unit) nondet_monad" + where "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" - text \ The same three functions as above, but returning a list of return values instead of @{text unit}\ @@ -461,15 +458,18 @@ definition mapM :: "('a \ ('s,'b) nondet_monad) \ 'a lis "mapM f xs \ sequence (map f xs)" definition zipWithM :: - "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, 'c list) nondet_monad" where + "('a \ 'b \ ('s,'c) nondet_monad) \ 'a list \ 'b list \ ('s, 'c list) nondet_monad" + where "zipWithM f xs ys \ sequence (zipWith f xs ys)" -definition foldM :: "('b \ 'a \ ('s, 'a) nondet_monad) \ 'b list \ 'a \ ('s, 'a) nondet_monad" +definition foldM :: + "('b \ 'a \ ('s, 'a) nondet_monad) \ 'b list \ 'a \ ('s, 'a) nondet_monad" where "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " definition foldME :: - "('b \ 'a \ ('s,('e + 'b)) nondet_monad) \ 'b \ 'a list \ ('s, ('e + 'b)) nondet_monad" where + "('b \ 'a \ ('s,('e + 'b)) nondet_monad) \ 'b \ 'a list \ ('s, ('e + 'b)) nondet_monad" + where "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" text \ @@ -485,11 +485,11 @@ definition sequenceE :: "('s, 'e+'a) nondet_monad list \ ('s, 'e+'a "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) in foldr mcons xs (returnOk [])" -definition mapME :: "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ ('s,'e+'b list) nondet_monad" +definition mapME :: + "('a \ ('s,'e+'b) nondet_monad) \ 'a list \ ('s,'e+'b list) nondet_monad" where "mapME f xs \ sequenceE (map f xs)" - text \Filtering a list using a monadic function as predicate:\ primrec filterM :: "('a \ ('s, bool) nondet_monad) \ 'a list \ ('s, 'a list) nondet_monad" where "filterM P [] = return []" @@ -499,6 +499,21 @@ primrec filterM :: "('a \ ('s, bool) nondet_monad) \ 'a return (if b then (x # ys) else ys) od" +text \An alternative definition of @{term state_select}\ +lemma state_select_def2: + "state_select r \ (do + s \ get; + S \ return {s'. (s, s') \ r}; + assert (S \ {}); + s' \ select S; + put s' + od)" + apply (clarsimp simp add: state_select_def get_def return_def assert_def fail_def select_def + put_def bind_def fun_eq_iff + intro!: eq_reflection) + apply fastforce + done + section "Catching and Handling Exceptions" @@ -520,8 +535,7 @@ text \ The handler may throw a type of exceptions different from the left side.\ definition handleE' :: - "('s, 'e1 + 'a) nondet_monad \ ('e1 \ ('s, 'e2 + 'a) nondet_monad) \ - ('s, 'e2 + 'a) nondet_monad" + "('s, 'e1 + 'a) nondet_monad \ ('e1 \ ('s, 'e2 + 'a) nondet_monad) \ ('s, 'e2 + 'a) nondet_monad" (infix "" 10) where "f handler \ do @@ -540,15 +554,13 @@ definition handleE :: (infix "" 10) where "handleE \ handleE'" - text \ Handling exceptions, and additionally providing a continuation if the left-hand side throws no exception:\ -definition - handle_elseE :: +definition handle_elseE :: "('s, 'e + 'a) nondet_monad \ ('e \ ('s, 'ee + 'b) nondet_monad) \ - ('a \ ('s, 'ee + 'b) nondet_monad) \ ('s, 'ee + 'b) nondet_monad" - ("_ _ _" 10) where + ('a \ ('s, 'ee + 'b) nondet_monad) \ ('s, 'ee + 'b) nondet_monad" ("_ _ _" 10) + where "f handler continue \ do v \ f; case v of Inl e \ handler e @@ -577,7 +589,8 @@ inductive_simps whileLoop_results_simps_valid: "(Some x, Some y) \ whileLoop inductive_simps whileLoop_results_simps_start_fail[simp]: "(None, x) \ whileLoop_results C B" inductive whileLoop_terminates :: - "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ 'r \ 's \ bool" for C B where + "('r \ 's \ bool) \ ('r \ ('s, 'r) nondet_monad) \ 'r \ 's \ bool" + for C B where "\ C r s \ whileLoop_terminates C B r s" | "\ C r s; \(r', s') \ fst (B r s). whileLoop_terminates C B r' s' \ \ whileLoop_terminates C B r s" @@ -586,7 +599,8 @@ inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" definition whileLoop :: - "('a \ 'b \ bool) \ ('a \ ('b, 'a) nondet_monad) \ 'a \ ('b, 'a) nondet_monad" where + "('a \ 'b \ bool) \ ('a \ ('b, 'a) nondet_monad) \ 'a \ ('b, 'a) nondet_monad" + where "whileLoop C B \ \r s. ({(r',s'). (Some (r, s), Some (r', s')) \ whileLoop_results C B}, (Some (r, s), None) \ whileLoop_results C B \ \whileLoop_terminates C B r s)" @@ -609,17 +623,18 @@ section "Combinators that have conditions with side effects" definition notM :: "('s, bool) nondet_monad \ ('s, bool) nondet_monad" where "notM m = do c \ m; return (\ c) od" -definition - whileM :: "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, unit) nondet_monad" where +definition whileM :: + "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, unit) nondet_monad" + where "whileM C B \ do c \ C; whileLoop (\c s. c) (\_. do B; C od) c; return () od" -definition - ifM :: "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ - ('s, 'a) nondet_monad" where +definition ifM :: + "('s, bool) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad \ ('s, 'a) nondet_monad" + where "ifM test t f = do c \ test; if c then t else f @@ -627,22 +642,26 @@ definition definition ifME :: "('a, 'b + bool) nondet_monad \ ('a, 'b + 'c) nondet_monad \ ('a, 'b + 'c) nondet_monad - \ ('a, 'b + 'c) nondet_monad" where + \ ('a, 'b + 'c) nondet_monad" + where "ifME test t f = doE c \ test; if c then t else f odE" -definition - whenM :: "('s, bool) nondet_monad \ ('s, unit) nondet_monad \ ('s, unit) nondet_monad" where +definition whenM :: + "('s, bool) nondet_monad \ ('s, unit) nondet_monad \ ('s, unit) nondet_monad" + where "whenM t m = ifM t m (return ())" -definition - orM :: "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" where +definition orM :: + "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" + where "orM a b = ifM a (return True) b" -definition - andM :: "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" where +definition andM :: + "('s, bool) nondet_monad \ ('s, bool) nondet_monad \ ('s, bool) nondet_monad" + where "andM a b = ifM a b (return False)" end diff --git a/lib/Monads/MonadEq.thy b/lib/Monads/nondet/Nondet_MonadEq.thy similarity index 97% rename from lib/Monads/MonadEq.thy rename to lib/Monads/nondet/Nondet_MonadEq.thy index e121823ff3..b73a1df9e2 100644 --- a/lib/Monads/MonadEq.thy +++ b/lib/Monads/nondet/Nondet_MonadEq.thy @@ -19,10 +19,10 @@ * * are added to the "monad_eq" set. *) -theory MonadEq +theory Nondet_MonadEq imports - In_Monad - NonDetMonadVCG + Nondet_In_Monad + Nondet_VCG begin (* Setup "monad_eq" attributes. *) diff --git a/lib/Monads/MonadEq_Lemmas.thy b/lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy similarity index 96% rename from lib/Monads/MonadEq_Lemmas.thy rename to lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy index 05871cbb18..51f8052402 100644 --- a/lib/Monads/MonadEq_Lemmas.thy +++ b/lib/Monads/nondet/Nondet_MonadEq_Lemmas.thy @@ -11,10 +11,10 @@ If you are planning to use the monad_eq method, this is the theory you should import. - See MonadEq.thy for definition and description of the method. *) + See Nondet_MonadEq.thy for definition and description of the method. *) -theory MonadEq_Lemmas - imports MonadEq +theory Nondet_MonadEq_Lemmas + imports Nondet_MonadEq begin lemma snd_return[monad_eq]: @@ -151,6 +151,10 @@ lemma snd_handleE[monad_eq]: unfolding handleE_def by (rule snd_handleE') +lemma snd_liftM[monad_eq, simp]: + "snd (liftM t f s) = snd (f s)" + by (auto simp: liftM_def bind_def return_def) + declare in_liftE[monad_eq] lemma snd_liftE[monad_eq]: diff --git a/lib/Monads/Monad_Equations.thy b/lib/Monads/nondet/Nondet_Monad_Equations.thy similarity index 84% rename from lib/Monads/Monad_Equations.thy rename to lib/Monads/nondet/Nondet_Monad_Equations.thy index a2ce88969d..7e45253b89 100644 --- a/lib/Monads/Monad_Equations.thy +++ b/lib/Monads/nondet/Nondet_Monad_Equations.thy @@ -8,11 +8,11 @@ (* Equations between monads. Conclusions of the form "f = g" where f and g are monads. Should not be Hoare triples (those go into a different theory). *) -theory Monad_Equations +theory Nondet_Monad_Equations imports - Empty_Fail - No_Fail - MonadEq_Lemmas + Nondet_Empty_Fail + Nondet_No_Fail + Nondet_MonadEq_Lemmas begin lemmas assertE_assert = assertE_liftE @@ -31,11 +31,9 @@ lemma exec_modify: lemma bind_return_eq: "(a >>= return) = (b >>= return) \ a = b" - apply (clarsimp simp:bind_def) - apply (rule ext) - apply (drule_tac x= x in fun_cong) - apply (auto simp:return_def split_def) - done + by clarsimp + +lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] lemma bindE_bind_linearise: "((f >>=E g) >>= h) = @@ -51,7 +49,7 @@ lemma throwError_bind: lemma bind_bindE_assoc: "((f >>= g) >>=E h) - = f >>= (\rv. g rv >>=E h)" + = f >>= (\rv. g rv >>=E h)" by (simp add: bindE_def bind_assoc) lemma returnOk_bind: @@ -118,7 +116,7 @@ lemma select_f_asserts: lemma liftE_bindE_handle: "((liftE f >>=E (\x. g x)) h) - = f >>= (\x. g x h)" + = f >>= (\x. g x h)" by (simp add: liftE_bindE handleE_def handleE'_def bind_assoc) @@ -140,21 +138,21 @@ lemma liftE_bindE_assoc: lemma unlessE_throw_catch_If: "catch (unlessE P (throwError e) >>=E f) g - = (if P then catch (f ()) g else g e)" + = (if P then catch (f ()) g else g e)" by (simp add: unlessE_def catch_throwError split: if_split) lemma whenE_bindE_throwError_to_if: "whenE P (throwError e) >>=E (\_. b) = (if P then (throwError e) else b)" unfolding whenE_def bindE_def - by (auto simp: NonDetMonad.lift_def throwError_def returnOk_def) + by (auto simp: lift_def throwError_def returnOk_def) lemma alternative_liftE_returnOk: "(liftE m \ returnOk v) = liftE (m \ return v)" by (simp add: liftE_def alternative_def returnOk_def bind_def return_def) lemma alternative_left_readonly_bind: - "\ \(=) s\ f \\rv. (=) s\; fst (f s) \ {} \ \ - alternative (f >>= (\x. g x)) h s + "\ \(=) s\ f \\rv. (=) s\; fst (f s) \ {} \ + \ alternative (f >>= (\x. g x)) h s = (f >>= (\x. alternative (g x) h)) s" apply (subgoal_tac "\x \ fst (f s). snd x = s") apply (clarsimp simp: alternative_def bind_def split_def) @@ -179,35 +177,22 @@ lemma gets_the_returns: by (simp_all add: returnOk_def throwError_def gets_the_return) -lemma all_rv_choice_fn_eq_pred: - "\ \rv. P rv \ \fn. f rv = g fn \ \ \fn. \rv. P rv \ f rv = g (fn rv)" - apply (rule_tac x="\rv. SOME h. f rv = g h" in exI) - apply (clarsimp split: if_split) - by (meson someI_ex) - -lemma all_rv_choice_fn_eq: - "\ \rv. \fn. f rv = g fn \ - \ \fn. f = (\rv. g (fn rv))" - using all_rv_choice_fn_eq_pred[where f=f and g=g and P=\] - by (simp add: fun_eq_iff) - lemma gets_the_eq_bind: - "\ \fn. f = gets_the (fn o fn'); \rv. \fn. g rv = gets_the (fn o fn') \ - \ \fn. (f >>= g) = gets_the (fn o fn')" - apply (clarsimp dest!: all_rv_choice_fn_eq) - apply (rule_tac x="\s. case (fn s) of None \ None | Some v \ fna v s" in exI) + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>= g) = gets_the (fn o fn')" + apply clarsimp + apply (rule exI[where x="\s. case (fn_f s) of None \ None | Some v \ fn_g v s"]) apply (simp add: gets_the_def bind_assoc exec_gets assert_opt_def fun_eq_iff split: option.split) done lemma gets_the_eq_bindE: - "\ \fn. f = gets_the (fn o fn'); \rv. \fn. g rv = gets_the (fn o fn') \ - \ \fn. (f >>=E g) = gets_the (fn o fn')" - apply (simp add: bindE_def) - apply (erule gets_the_eq_bind) + "\ f = gets_the (fn_f o fn'); \rv. g rv = gets_the (fn_g rv o fn') \ + \ \fn. (f >>=E g) = gets_the (fn o fn')" + unfolding bindE_def + apply (erule gets_the_eq_bind[where fn_g="\rv s. case rv of Inl e \ Some (Inl e) | Inr v \ fn_g v s"]) apply (simp add: lift_def gets_the_returns split: sum.split) - apply fastforce done lemma gets_the_fail: @@ -229,9 +214,9 @@ lemma ex_const_function: lemma gets_the_condsE: "(\fn. whenE P f = gets_the (fn o fn')) - = (P \ (\fn. f = gets_the (fn o fn')))" + = (P \ (\fn. f = gets_the (fn o fn')))" "(\fn. unlessE P g = gets_the (fn o fn')) - = (\ P \ (\fn. g = gets_the (fn o fn')))" + = (\ P \ (\fn. g = gets_the (fn o fn')))" by (simp add: whenE_def unlessE_def gets_the_returns ex_const_function split: if_split)+ @@ -245,7 +230,7 @@ lemma liftME_return: lemma fold_bindE_into_list_case: "(doE v \ f; case_list (g v) (h v) x odE) - = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" + = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" by (simp split: list.split) lemma whenE_liftE: @@ -278,7 +263,7 @@ lemma maybe_fail_bind_fail: lemma select_singleton[simp]: "select {x} = return x" - by (fastforce simp add: fun_eq_iff select_def return_def) + by (simp add: select_def return_def) lemma return_modify: "return () = modify id" @@ -296,10 +281,9 @@ lemma modify_id_return: "modify id = return ()" by (simp add: simpler_modify_def return_def) - lemma liftE_bind_return_bindE_returnOk: "liftE (v >>= (\rv. return (f rv))) - = (liftE v >>=E (\rv. returnOk (f rv)))" + = (liftE v >>=E (\rv. returnOk (f rv)))" by (simp add: liftE_bindE, simp add: liftE_def returnOk_def) lemma bind_eqI: @@ -307,12 +291,12 @@ lemma bind_eqI: lemma unlessE_throwError_returnOk: "(if P then returnOk v else throwError x) - = (unlessE P (throwError x) >>=E (\_. returnOk v))" + = (unlessE P (throwError x) >>=E (\_. returnOk v))" by (cases P, simp_all add: unlessE_def) lemma gets_the_bind_eq: "\ f s = Some x; g x s = h s \ - \ (gets_the f >>= g) s = h s" + \ (gets_the f >>= g) s = h s" by (simp add: gets_the_def bind_assoc exec_gets assert_opt_def) lemma zipWithM_x_modify: @@ -358,7 +342,7 @@ qed lemma assert2: "(do v1 \ assert P; v2 \ assert Q; c od) - = (do v \ assert (P \ Q); c od)" + = (do v \ assert (P \ Q); c od)" by (simp add: assert_def split: if_split) lemma assert_opt_def2: @@ -367,23 +351,31 @@ lemma assert_opt_def2: lemma gets_assert: "(do v1 \ assert v; v2 \ gets f; c v1 v2 od) - = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" + = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" by (simp add: simpler_gets_def return_def assert_def fail_def bind_def split: if_split) lemma modify_assert: "(do v2 \ modify f; v1 \ assert v; c v1 od) - = (do v1 \ assert v; v2 \ modify f; c v1 od)" + = (do v1 \ assert v; v2 \ modify f; c v1 od)" by (simp add: simpler_modify_def return_def assert_def fail_def bind_def split: if_split) lemma gets_fold_into_modify: "do x \ gets f; modify (g x) od = modify (\s. g (f s) s)" "do x \ gets f; _ \ modify (g x); h od - = do modify (\s. g (f s) s); h od" + = do modify (\s. g (f s) s); h od" by (simp_all add: fun_eq_iff modify_def bind_assoc exec_gets exec_get exec_put) +lemma gets_return_gets_eq: + "gets f >>= (\g. return (h g)) = gets (\s. h (f s))" + by (simp add: simpler_gets_def bind_def return_def) + +lemma gets_prod_comp: + "gets (case x of (a, b) \ f a b) = (case x of (a, b) \ gets (f a b))" + by (auto simp: split_def) + lemma bind_assoc2: "(do x \ a; _ \ b; c x od) = (do x \ (do x' \ a; _ \ b; return x' od); c x od)" by (simp add: bind_assoc) @@ -431,7 +423,7 @@ lemma liftE_fail[simp]: "liftE fail = fail" lemma catch_bind_distrib: "do _ <- m h; f od = (doE m; liftE f odE (\x. do h x; f od))" - by (force simp: catch_def bindE_def bind_assoc liftE_def NonDetMonad.lift_def bind_def + by (force simp: catch_def bindE_def bind_assoc liftE_def lift_def bind_def split_def return_def throwError_def split: sum.splits) @@ -451,7 +443,7 @@ lemma catch_is_if: od" apply (simp add: bindE_def catch_def bind_assoc cong: if_cong) apply (rule bind_cong, rule refl) - apply (clarsimp simp: NonDetMonad.lift_def throwError_def split: sum.splits) + apply (clarsimp simp: lift_def throwError_def split: sum.splits) done lemma liftE_K_bind: "liftE ((K_bind (\s. A s)) x) = K_bind (liftE (\s. A s)) x" @@ -464,8 +456,8 @@ lemma monad_eq_split: shows "(g >>= f) s = (g >>= f') s" proof - have pre: "\rv s'. \(rv, s') \ fst (g s)\ \ f rv s' = f' rv s'" - using assms unfolding valid_def - by (erule_tac x=s in allE) auto + using assms unfolding valid_def apply - + by (erule allE[where x=s]) auto show ?thesis by (simp add: bind_def image_def case_prod_unfold pre) qed @@ -536,16 +528,15 @@ lemma bind_inv_inv_comm: empty_fail f; empty_fail g \ \ do x \ f; y \ g; n x y od = do y \ g; x \ f; n x y od" apply (rule ext) - apply (rename_tac s) - apply (rule_tac s="(do (x, y) \ do x \ f; y \ (\_. g s) ; (\_. return (x, y) s) od; - n x y od) s" in trans) + apply (rule trans[where s="(do (x, y) \ do x \ f; y \ (\_. g s) ; (\_. return (x, y) s) od; + n x y od) s" for s]) apply (simp add: bind_assoc) apply (intro bind_apply_cong, simp_all)[1] apply (metis in_inv_by_hoareD) apply (simp add: return_def bind_def) apply (metis in_inv_by_hoareD) - apply (rule_tac s="(do (x, y) \ do y \ g; x \ (\_. f s) ; (\_. return (x, y) s) od; - n x y od) s" in trans[rotated]) + apply (rule trans[where s="(do (x, y) \ do y \ g; x \ (\_. f s) ; (\_. return (x, y) s) od; + n x y od) s" for s, rotated]) apply (simp add: bind_assoc) apply (intro bind_apply_cong, simp_all)[1] apply (metis in_inv_by_hoareD) @@ -577,4 +568,22 @@ lemma if_to_top_of_bindE: "(bindE (If P x y) z) = If P (bindE x z) (bindE y z)" by (simp split: if_split) +lemma modify_modify: + "(do x \ modify f; modify (g x) od) = modify (g () o f)" + by (simp add: bind_def simpler_modify_def) + +lemmas modify_modify_bind = + arg_cong2[where f=bind, OF modify_modify refl, simplified bind_assoc] + +lemma put_then_get[unfolded K_bind_def]: + "do put s; get od = do put s; return s od" + by (simp add: put_def bind_def get_def return_def) + +lemmas put_then_get_then = + put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] + +lemma select_empty_bind[simp]: + "select {} >>= f = select {}" + by (simp add: select_def bind_def) + end \ No newline at end of file diff --git a/lib/Monads/nondet/Nondet_More_VCG.thy b/lib/Monads/nondet/Nondet_More_VCG.thy new file mode 100644 index 0000000000..853d5bc159 --- /dev/null +++ b/lib/Monads/nondet/Nondet_More_VCG.thy @@ -0,0 +1,745 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Partial correctness Hoare logic lemmas over the nondet monad. Hoare triples, lifting lemmas, etc. + If it doesn't contain a Hoare triple it likely doesn't belong in here. *) + +theory Nondet_More_VCG + imports + Nondet_VCG + Nondet_In_Monad +begin + +lemma hoare_take_disjunct: + "\P\ f \\rv s. P' rv s \ (False \ P'' rv s)\ + \ \P\ f \P''\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_add: + "\P\ S \\r s. R r s \ Q r s\ \ \P\ S \Q\" + by (erule hoare_strengthen_post, simp) + +lemma hoare_post_addE: + "\P\ f \\_ s. R s \ Q s\, \T\ \ \P\ f \\_ s. Q s\, \T\" + by (erule hoare_post_impErr'; simp) + +lemma hoare_pre_add: + "(\s. P s \ R s) \ (\P\ f \Q\ \ \P and R\ f \Q\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_pre, assumption, clarsimp) + +lemma hoare_pre_addE: + "(\s. P s \ R s) \ (\P\ f \Q\, \S\ \ \P and R\ f \Q\, \S\)" + apply (subst iff_conv_conj_imp) + by(intro conjI impI; rule hoare_weaken_preE, assumption, clarsimp) + +lemma hoare_name_pre_state: + "\ \s. P s \ \(=) s\ f \Q\ \ \ \P\ f \Q\" + by (clarsimp simp: valid_def) + +lemma hoare_name_pre_stateE: + "\\s. P s \ \(=) s\ f \Q\, \E\\ \ \P\ f \Q\, \E\" + by (clarsimp simp: validE_def2) + +lemma hoare_vcg_if_lift_strong: + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. if P rv s then Q rv s else R rv s\" + + "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \ Q\; \R'\ f \R\ \ \ + \\s. if P' s then Q' s else R' s\ f \\rv s. (if P rv s then Q rv else R rv) s\" + by (wpsimp wp: hoare_vcg_imp_lift' | assumption | fastforce)+ + +lemma hoare_vcg_imp_lift_pre_add: + "\ \P and Q\ f \\rv s. R rv s\; f \\s. \ Q s\ \ \ \P\ f \\rv s. Q s \ R rv s\" + apply (rule hoare_weaken_pre) + apply (rule hoare_vcg_imp_lift') + apply fastforce + apply fastforce + apply (clarsimp simp: pred_conj_def valid_def) + done + +lemma hoare_pre_tautI: + "\ \A and P\ a \B\; \A and not P\ a \B\ \ \ \A\ a \B\" + by (fastforce simp: valid_def split_def pred_conj_def pred_neg_def) + +lemma hoare_lift_Pf_pre_conj: + assumes P: "\x. \\s. Q x s\ m \P x\" + assumes f: "\P. \\s. P (g s) \ R s\ m \\_ s. P (f s)\" + shows "\\s. Q (g s) s \ R s\ m \\rv s. P (f s) rv s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ P], simp) + apply (rule use_valid [OF _ f], simp, simp) + done + +lemmas hoare_lift_Pf4 = hoare_lift_Pf_pre_conj[where R=\, simplified] +lemmas hoare_lift_Pf3 = hoare_lift_Pf4[where f=f and g=f for f] +lemmas hoare_lift_Pf2 = hoare_lift_Pf3[where P="\f _. P f" for P] +lemmas hoare_lift_Pf = hoare_lift_Pf2[where Q=P and P=P for P] + +lemmas hoare_lift_Pf3_pre_conj = hoare_lift_Pf_pre_conj[where f=f and g=f for f] +lemmas hoare_lift_Pf2_pre_conj = hoare_lift_Pf3_pre_conj[where P="\f _. P f" for P] +lemmas hoare_lift_Pf_pre_conj' = hoare_lift_Pf2_pre_conj[where Q=P and P=P for P] + +lemma hoare_if_r_and: + "\P\ f \\r. if R r then Q r else Q' r\ + = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" + by (fastforce simp: valid_def) + +lemma hoare_convert_imp: + "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ + \ \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" + apply (simp only: imp_conv_disj) + apply (erule(1) hoare_vcg_disj_lift) + done + +lemma hoare_vcg_ex_lift_R: + "\ \v. \P v\ f \Q v\,- \ \ \\s. \v. P v s\ f \\rv s. \v. Q v rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (rule hoare_strengthen_post, erule hoare_vcg_ex_lift) + apply (auto split: sum.split) + done + +lemma hoare_case_option_wpR: + "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" + by (cases v) auto + +lemma hoare_vcg_conj_liftE_R: + "\ \P\ f \P'\,-; \Q\ f \Q'\,- \ \ \P and Q\ f \\rv s. P' rv s \ Q' rv s\, -" + apply (simp add: validE_R_def validE_def valid_def split: sum.splits) + apply blast + done + +lemma K_valid[wp]: + "\K P\ f \\_. K P\" + by (simp add: valid_def) + +lemma hoare_exI_tuple: + "\P\ f \\(rv,rv') s. Q x rv rv' s\ \ \P\ f \\(rv,rv') s. \x. Q x rv rv' s\" + by (fastforce simp: valid_def) + +lemma hoare_ex_all: + "(\x. \P x\ f \Q\) = \\s. \x. P x s\ f \Q\" + apply (rule iffI) + apply (fastforce simp: valid_def)+ + done + +lemma hoare_imp_eq_substR: + "\P\ f \Q\,- \ \P\ f \\rv s. rv = x \ Q x s\,-" + by (fastforce simp add: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_split_bind_case_sum: + assumes x: "\rv. \R rv\ g rv \Q\" + "\rv. \S rv\ h rv \Q\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\" + apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x split: sum.splits) + done + +lemma hoare_split_bind_case_sumE: + assumes x: "\rv. \R rv\ g rv \Q\,\E\" + "\rv. \S rv\ h rv \Q\,\E\" + assumes y: "\P\ f \S\,\R\" + shows "\P\ f >>= case_sum g h \Q\,\E\" + apply (unfold validE_def) + apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]]) + apply (wpsimp wp: x[unfolded validE_def] split: sum.splits) + done + +lemma assertE_sp: + "\P\ assertE Q \\rv s. Q \ P s\,\E\" + by (clarsimp simp: assertE_def) wp + +lemma throwErrorE_E [wp]: + "\Q e\ throwError e -, \Q\" + by (simp add: validE_E_def) wp + +lemma gets_inv [simp]: + "\ P \ gets f \ \r. P \" + by (simp add: gets_def, wp) + +lemma select_inv: + "\ P \ select S \ \r. P \" + by wpsimp + +lemmas return_inv = hoare_return_drop_var + +lemma assert_inv: "\P\ assert Q \\r. P\" + unfolding assert_def + by (cases Q) simp+ + +lemma assert_opt_inv: "\P\ assert_opt Q \\r. P\" + unfolding assert_opt_def + by (cases Q) simp+ + +lemma case_options_weak_wp: + "\ \P\ f \Q\; \x. \P'\ g x \Q\ \ \ \P and P'\ case opt of None \ f | Some x \ g x \Q\" + apply (cases opt) + apply (clarsimp elim!: hoare_weaken_pre) + apply (rule hoare_weaken_pre [where Q=P']) + apply simp+ + done + +lemma case_option_wp_None_return: + assumes [wp]: "\x. \P' x\ f x \\_. Q\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ return () | Some x \ f x) + \\_. Q\" + by (cases opt; wpsimp) + +lemma case_option_wp_None_returnOk: + assumes [wp]: "\x. \P' x\ f x \\_. Q\,\E\" + shows "\\x s. (Q and P x) s \ P' x s \ + \ \Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ returnOk () | Some x \ f x) + \\_. Q\,\E\" + by (cases opt; wpsimp) + +lemma list_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\x xs. \P_B\ b x xs \Q\" + shows + "\P_A and P_B\ + case ts of + [] \ a + | x#xs \ b x xs + \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemmas hoare_FalseE_R = hoare_FalseE[where E="\\", folded validE_R_def] + +lemma hoare_vcg_if_lift2: + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\ \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\ \ + \R\ f \\rv. if P' rv then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) + "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\, - \ + \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" + + "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\, - \ + \R\ f \\rv. if P' rv then X rv else Y rv\, -" + by (auto simp: valid_def validE_R_def validE_def split_def) + +lemma hoare_list_all_lift: + "(\r. r \ set xs \ \Q r\ f \\rv. Q r\) + \ \\s. list_all (\r. Q r s) xs\ f \\rv s. list_all (\r. Q r s) xs\" + apply (induct xs; simp) + apply wpsimp + apply (rule hoare_vcg_conj_lift; simp) + done + +lemma undefined_valid: "\\\ undefined \Q\" + by (rule hoare_pre_cont) + +lemma assertE_wp: + "\\s. F \ Q () s\ assertE F \Q\,\E\" + apply (rule hoare_pre) + apply (unfold assertE_def) + apply wp + apply simp + done + +lemma doesn't_grow_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + shows "\\s. card (S s) < n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule le_less_trans[rotated]) + apply (rule card_mono[OF y]) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + done + +lemma hoare_vcg_propE_R: + "\\s. P\ f \\rv s. P\, -" + by (simp add: validE_R_def validE_def valid_def split_def split: sum.split) + +lemma set_preserved_proof: + assumes y: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + assumes x: "\x. \\s. Q s \ x \ S s\ f \\rv s. x \ S s\" + shows "\\s. Q s \ P (S s)\ f \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + by (metis (mono_tags, lifting) equalityI post_by_hoare subsetI x y) + +lemma set_shrink_proof: + assumes x: "\x. \\s. x \ S s\ f \\rv s. x \ S s\" + shows + "\\s. \S'. S' \ S s \ P S'\ + f + \\rv s. P (S s)\" + apply (clarsimp simp: valid_def) + apply (drule spec, erule mp) + apply (clarsimp simp: subset_iff) + apply (rule ccontr) + apply (drule(1) use_valid [OF _ x]) + apply simp + done + +lemma shrinks_proof: + assumes y: "\s. finite (S s)" + assumes x: "\x. \\s. x \ S s \ P s\ f \\rv s. x \ S s\" + assumes z: "\P\ f \\rv s. x \ S s\" + assumes w: "\s. P s \ x \ S s" + shows "\\s. card (S s) \ n \ P s\ f \\rv s. card (S s) < n\" + apply (clarsimp simp: valid_def) + apply (erule less_le_trans[rotated]) + apply (rule psubset_card_mono[OF y]) + apply (rule psubsetI) + apply clarsimp + apply (rule ccontr) + apply (drule (2) use_valid[OF _ x, OF _ conjI]) + apply simp + by (metis use_valid w z) + +lemma use_validE_R: + "\ (Inr r, s') \ fst (f s); \P\ f \Q\,-; P s \ \ Q r s'" + unfolding validE_R_def validE_def + by (frule(2) use_valid, simp) + +lemma valid_preservation_ex: + assumes x: "\x P. \\s. P (f s x :: 'b)\ m \\rv s. P (f s x)\" + shows "\\s. P (f s :: 'a \ 'b)\ m \\rv s. P (f s)\" + apply (clarsimp simp: valid_def) + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (erule use_valid [OF _ x]) + apply simp + done + +lemma whenE_inv: + assumes a: "\P\ f \\_. P\" + shows "\P\ whenE Q f \\_. P\" + by (wpsimp wp: a) + +lemma whenE_throwError_wp: + "\\s. \ P \ Q s\ whenE P (throwError e) \\_. Q\, \\\\" + by wpsimp + +lemma ifM_throwError_returnOk: + "\Q\ test \\c s. \ c \ P s\ \ \Q\ ifM test (throwError e) (returnOk ()) \\_. P\, -" + unfolding ifM_def + apply (fold liftE_bindE) + apply wpsimp + apply assumption + apply simp + done + +lemma ifME_liftE: + "ifME (liftE test) a b = ifM test a b" + by (simp add: ifME_def ifM_def liftE_bindE) + +lemma gets_the_inv: "\P\ gets_the V \\rv. P\" by wpsimp + +lemma select_f_inv: + "\P\ select_f S \\_. P\" + by (simp add: select_f_def valid_def) + +lemmas state_unchanged = in_inv_by_hoareD [THEN sym] + +lemma validI: + assumes rl: "\s r s'. \ P s; (r, s') \ fst (S s) \ \ Q r s'" + shows "\P\ S \Q\" + unfolding valid_def using rl by safe + +lemma opt_return_pres_lift: + assumes x: "\v. \P\ f v \\rv. P\" + shows "\P\ case x of None \ return () | Some v \ f v \\rv. P\" + by (wpsimp wp: x) + +lemma valid_return_unit: + "\P\ f >>= (\_. return ()) \\r. Q\ \ \P\ f \\r. Q\" + by (auto simp: valid_def in_bind in_return Ball_def) + +lemma hoare_weak_lift_imp_conj: + "\ \Q\ m \Q'\; \R\ m \R'\ \ + \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_weak_lift_imp) + apply assumption+ + done + +lemma hoare_eq_P: + assumes "\P. \P\ f \\_. P\" + shows "\(=) s\ f \\_. (=) s\" + by (rule assms) + +lemma hoare_validE_R_conj: + "\\P\ f \Q\, -; \P\ f \R\, -\ \ \P\ f \Q and R\, -" + by (simp add: valid_def validE_def validE_R_def Let_def split_def split: sum.splits) + +lemmas throwError_validE_R = throwError_wp [where E="\\", folded validE_R_def] + +lemma valid_case_option_post_wp: + "\\x. \P x\ f \\rv. Q x\\ \ + \\s. case ep of Some x \ P x s | _ \ True\ + f + \\rv s. case ep of Some x \ Q x s | _ \ True\" + by (cases ep, simp_all add: hoare_vcg_prop) + +lemma P_bool_lift: + assumes t: "\Q\ f \\r. Q\" + assumes f: "\\s. \Q s\ f \\r s. \Q s\" + shows "\\s. P (Q s)\ f \\r s. P (Q s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P=P], assumption) + apply (rule iffI) + apply (erule (1) use_valid [OF _ t]) + apply (rule classical) + apply (drule (1) use_valid [OF _ f]) + apply simp + done + +lemmas fail_inv = hoare_fail_any[where Q="\_. P" and P=P for P] + +lemma gets_sp: "\P\ gets f \\rv. P and (\s. f s = rv)\" + by (wp, simp) + +lemma hoare_Ball_helper: + assumes x: "\x. \P x\ f \Q x\" + assumes y: "\P. \\s. P (S s)\ f \\rv s. P (S s)\" + shows "\\s. \x \ S s. P x s\ f \\rv s. \x \ S s. Q x rv s\" + apply (clarsimp simp: valid_def) + apply (drule bspec, erule back_subst[where P="\A. x\A" for x]) + apply (erule post_by_hoare[OF y, rotated]) + apply (rule refl) + apply (erule (1) post_by_hoare[OF x]) + done + +lemma handy_prop_divs: + assumes x: "\P. \\s. P (Q s) \ S s\ f \\rv s. P (Q' rv s)\" + "\P. \\s. P (R s) \ S s\ f \\rv s. P (R' rv s)\" + shows "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + "\\s. P (Q s \ R s) \ S s\ f \\rv s. P (Q' rv s \ R' rv s)\" + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + apply (clarsimp simp: valid_def + elim!: subst[rotated, where P=P]) + apply (rule use_valid [OF _ x(1)], assumption) + apply (rule use_valid [OF _ x(2)], assumption) + apply simp + done + +lemma hoare_as_subst: + "\ \P. \\s. P (fn s)\ f \\rv s. P (fn s)\; + \v :: 'a. \P v\ f \Q v\ \ \ + \\s. P (fn s) s\ f \\rv s. Q (fn s) rv s\" + by (rule hoare_lift_Pf3) + +lemmas hoare_vcg_ball_lift = hoare_vcg_const_Ball_lift + +lemma hoare_set_preserved: + assumes x: "\x. \fn' x\ m \\rv. fn x\" + shows "\\s. set xs \ {x. fn' x s}\ m \\rv s. set xs \ {x. fn x s}\" + apply (induct xs) + apply simp + apply wp + apply simp + apply (rule hoare_vcg_conj_lift) + apply (rule x) + apply assumption + done + +lemma hoare_ex_pre: (* safe, unlike hoare_vcg_ex_lift *) + "(\x. \P x\ f \Q\) \ \\s. \x. P x s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_ex_pre_conj: + "\\x. \\s. P x s \ P' s\ f \Q\\ + \ \\s. (\x. P x s) \ P' s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_conj_lift_inv: + "\\P\ f \Q\; \\s. P' s \ I s\ f \\rv. I\; + \s. P s \ P' s\ + \ \\s. P s \ I s\ f \\rv s. Q rv s \ I s\" + by (fastforce simp: valid_def) + +lemma hoare_in_monad_post: + assumes x: "\P. \P\ f \\x. P\" + shows "\\\ f \\rv s. (rv, s) \ fst (f s)\" + apply (clarsimp simp: valid_def) + apply (rule back_subst[where P="\s. x\fst (f s)" for x], assumption) + apply (simp add: state_unchanged[OF x]) + done + +lemma list_case_throw_validE_R: + "\ \y ys. xs = y # ys \ \P\ f y ys \Q\,- \ \ + \P\ case xs of [] \ throwError e | x # xs \ f x xs \Q\,-" + apply (cases xs, simp_all) + apply wp + done + +lemma validE_R_sp: + assumes x: "\P\ f \Q\,-" + assumes y: "\x. \Q x\ g x \R\,-" + shows "\P\ f >>=E (\x. g x) \R\,-" + by (rule hoare_pre, wp x y, simp) + +lemma valid_set_take_helper: + "\P\ f \\rv s. \x \ set (xs rv s). Q x rv s\ + \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" + apply (erule hoare_strengthen_post) + apply (clarsimp dest!: in_set_takeD) + done + +lemma whenE_throwError_sp: + "\P\ whenE Q (throwError e) \\rv s. \ Q \ P s\, -" + apply (simp add: whenE_def validE_R_def) + apply (intro conjI impI; wp) + done + +lemma weaker_hoare_ifE: + assumes x: "\P \ a \Q\,\E\" + assumes y: "\P'\ b \Q\,\E\" + shows "\P and P'\ if test then a else b \Q\,\E\" + apply (rule hoare_vcg_precond_impE) + apply (wp x y) + apply simp + done + +lemma wp_split_const_if: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f \Q'\" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\" + by (cases G; simp add: x y) + +lemma wp_split_const_if_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\,-" + by (cases G; simp add: x y) + +lemma hoare_disj_division: + "\ P \ Q; P \ \R\ f \S\; Q \ \T\ f \S\ \ + \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" + apply safe + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + apply (rule hoare_pre_imp) + prefer 2 + apply simp + apply simp + done + +lemma hoare_grab_asm: + "\ G \ \P\ f \Q\ \ \ \\s. G \ P s\ f \Q\" + by (cases G, simp+) + +lemma hoare_grab_asm2: + "\P' \ \\s. P s \ R s\ f \Q\\ + \ \\s. P s \ P' \ R s\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_grab_exs: + assumes x: "\x. P x \ \P'\ f \Q\" + shows "\\s. \x. P x \ P' s\ f \Q\" + apply (clarsimp simp: valid_def) + apply (erule(2) use_valid [OF _ x]) + done + +lemma hoare_prop_E: "\\rv. P\ f -,\\rv s. P\" + unfolding validE_E_def + by (rule hoare_pre, wp, simp) + +lemma hoare_vcg_conj_lift_R: + "\ \P\ f \Q\,-; \R\ f \S\,- \ + \ \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" + apply (simp add: validE_R_def validE_def) + apply (drule(1) hoare_vcg_conj_lift) + apply (erule hoare_strengthen_post) + apply (clarsimp split: sum.splits) + done + +lemma hoare_walk_assmsE: + assumes x: "\P\ f \\rv. P\" and y: "\s. P s \ Q s" and z: "\P\ g \\rv. Q\" + shows "\P\ doE x \ f; g odE \\rv. Q\" + apply (wp z) + apply (simp add: validE_def) + apply (rule hoare_strengthen_post [OF x]) + apply (auto simp: y split: sum.splits) + done + +lemma univ_wp: + "\\s. \(rv, s') \ fst (f s). Q rv s'\ f \Q\" + by (simp add: valid_def) + +lemma univ_get_wp: + assumes x: "\P. \P\ f \\rv. P\" + shows "\\s. \(rv, s') \ fst (f s). s = s' \ Q rv s'\ f \Q\" + apply (rule hoare_pre_imp[OF _ univ_wp]) + apply clarsimp + apply (drule bspec, assumption, simp) + apply (drule mp) + apply (simp add: state_unchanged[OF x]) + apply simp + done + +lemma other_hoare_in_monad_post: + assumes x: "\P. \P\ fn \\rv. P\" + shows "\\s. \(v, s) \ fst (fn s). F v = v\ fn \\v s'. (F v, s') \ fst (fn s')\" + proof - + have P: "\v s. (F v = v) \ (v, s) \ fst (fn s) \ (F v, s) \ fst (fn s)" + by simp + show ?thesis + apply (rule hoare_post_imp [OF P], assumption) + apply (rule hoare_pre_imp) + defer + apply (rule hoare_vcg_conj_lift) + apply (rule univ_get_wp [OF x]) + apply (rule hoare_in_monad_post [OF x]) + apply clarsimp + apply (drule bspec, assumption, simp) + done + qed + +lemma weak_if_wp: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ + \P and P'\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma weak_if_wp': + "\P\ f \\r. Q r and Q' r\ \ + \P\ f \\r. if C r then Q r else Q' r\" + by (auto simp add: valid_def split_def) + +lemma bindE_split_recursive_asm: + assumes x: "\x s'. \ (Inr x, s') \ fst (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" + shows "\A\ f \B\, \E\ \ \\st. A st \ st = s\ f >>=E g \C\, \E\" + apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def) + apply (erule allE, erule(1) impE) + apply (drule(1) bspec, simp) + apply (clarsimp simp: in_throwError split: sum.splits) + apply (drule x) + apply (clarsimp simp: validE_def valid_def) + apply (drule(1) bspec, simp split: sum.splits) + done + +lemma validE_R_abstract_rv: + "\P\ f \\rv s. \rv'. Q rv' s\,- \ \P\ f \Q\,-" + by (erule hoare_post_imp_R, simp) + +lemma validE_cases_valid: + "\P\ f \\rv s. Q (Inr rv) s\,\\rv s. Q (Inl rv) s\ + \ \P\ f \Q\" + apply (simp add: validE_def) + apply (erule hoare_strengthen_post) + apply (simp split: sum.split_asm) + done + +lemma liftM_pre: + assumes rl: "\\s. \ P s \ a \ \_ _. False \" + shows "\\s. \ P s \ liftM f a \ \_ _. False \" + unfolding liftM_def + apply (rule seq) + apply (rule rl) + apply wp + apply simp + done + +lemma hoare_gen_asm': + "(P \ \P'\ f \Q\) \ \P' and (\_. P)\ f \Q\" + apply (auto intro: hoare_assume_pre) + done + +lemma hoare_gen_asm_conj: + "(P \ \P'\ f \Q\) \ \\s. P' s \ P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_add_K: + "\P\ f \Q\ \ \\s. P s \ I\ f \\rv s. Q rv s \ I\" + by (fastforce simp: valid_def) + +lemma valid_rv_lift: + "\P'\ f \\rv s. rv \ Q rv s\ \ \\s. P \ P' s\ f \\rv s. rv \ P \ Q rv s\" + by (fastforce simp: valid_def) + +lemma valid_imp_ex: + "\P\ f \\rv s. \x. rv \ Q rv s x\ \ \P\ f \\rv s. rv \ (\x. Q rv s x)\" + by (fastforce simp: valid_def) + +lemma valid_rv_split: + "\\P\ f \\rv s. rv \ Q s\; \P\ f \\rv s. \rv \ Q' s\\ + \ \P\ f \\rv s. if rv then Q s else Q' s\" + by (fastforce simp: valid_def) + +lemma hoare_rv_split: + "\\P\ f \\rv s. rv \ (Q rv s)\; \P\ f \\rv s. (\rv) \ (Q rv s)\\ + \ \P\ f \Q\" + apply (clarsimp simp: valid_def split_def) + by (metis (full_types) fst_eqD snd_conv) + +lemma combine_validE: + "\ \ P \ x \ Q \,\ E \; \ P' \ x \ Q' \,\ E' \ \ + \ \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" + apply (clarsimp simp: validE_def valid_def split: sum.splits) + apply (erule allE, erule (1) impE)+ + apply (drule (1) bspec)+ + apply clarsimp + done + +lemma valid_case_prod: + "\ \x y. valid (P x y) (f x y) Q \ \ valid (case_prod P v) (case_prod (\x y. f x y) v) Q" + by (simp add: split_def) + +lemma validE_case_prod: + "\ \x y. validE (P x y) (f x y) Q E \ \ validE (case_prod P v) (case_prod (\x y. f x y) v) Q E" + by (simp add: split_def) + +lemma valid_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s' \ \ \ P \ m \ Q \" + by (clarsimp simp: valid_def) + +lemma validE_pre_satisfies_post: + "\ \s r' s'. P s \ Q r' s'; \s r' s'. P s \ R r' s' \ \ \ P \ m \ Q \,\ R \" + by (clarsimp simp: validE_def2 split: sum.splits) + +lemma hoare_validE_R_conjI: + "\ \P\ f \Q\, - ; \P\ f \Q'\, - \ \ \P\ f \\rv s. Q rv s \ Q' rv s\, -" + by (clarsimp simp: Ball_def validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_validE_E_conjI: + "\ \P\ f -, \Q\ ; \P\ f -, \Q'\ \ \ \P\ f -, \\rv s. Q rv s \ Q' rv s\" + by (clarsimp simp: Ball_def validE_E_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD1: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \Q\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma validE_R_post_conjD2: + "\P\ f \\r s. Q r s \ R r s\,- \ \P\ f \R\,-" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma throw_opt_wp[wp]: + "\if v = None then E ex else Q (the v)\ throw_opt ex v \Q\,\E\" + unfolding throw_opt_def by wpsimp auto + +lemma hoare_name_pre_state2: + "(\s. \P and ((=) s)\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def intro: hoare_name_pre_state) + +lemma returnOk_E': "\P\ returnOk r -,\E\" + by wpsimp + +lemma throwError_R': "\P\ throwError e \Q\,-" + by wpsimp + +end \ No newline at end of file diff --git a/lib/Monads/No_Fail.thy b/lib/Monads/nondet/Nondet_No_Fail.thy similarity index 93% rename from lib/Monads/No_Fail.thy rename to lib/Monads/nondet/Nondet_No_Fail.thy index 9736a08747..8bf7c5fd0b 100644 --- a/lib/Monads/No_Fail.thy +++ b/lib/Monads/nondet/Nondet_No_Fail.thy @@ -7,10 +7,10 @@ (* Lemmas about the no_fail predicate. *) -theory No_Fail +theory Nondet_No_Fail imports - In_Monad - NonDetMonadVCG + Nondet_In_Monad + Nondet_VCG WPSimp begin @@ -32,7 +32,7 @@ lemma no_fail_pre[wp_pre]: by (simp add: no_fail_def) lemma wpc_helper_no_fail_final: - "no_fail Q f \ wpc_helper (P, P') (Q, Q') (no_fail P f)" + "no_fail Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_fail P f)" by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final @@ -160,7 +160,7 @@ lemma no_fail_spec: lemma no_fail_assertE[wp]: "no_fail (\_. P) (assertE P)" - by (simp add: assertE_def split: if_split) + by (simp add: assertE_def) lemma no_fail_spec_pre: "\ no_fail (((=) s) and P') f; \s. P s \ P' s \ \ no_fail (((=) s) and P) f" @@ -168,7 +168,7 @@ lemma no_fail_spec_pre: lemma no_fail_whenE[wp]: "\ G \ no_fail P f \ \ no_fail (\s. G \ P s) (whenE G f)" - by (simp add: whenE_def split: if_split) + by (simp add: whenE_def) lemma no_fail_unlessE[wp]: "\ \ G \ no_fail P f \ \ no_fail (\s. \ G \ P s) (unlessE G f)" @@ -225,4 +225,12 @@ lemma no_fail_condition: unfolding condition_def no_fail_def by clarsimp +lemma no_fail_ex_lift: + "(\x. no_fail (P x) f) \ no_fail (\s. \x. P x s) f" + by (clarsimp simp: no_fail_def) + +lemma no_fail_grab_asm: + "(G \ no_fail P f) \ no_fail (\s. G \ P s) f" + by (cases G, simp+) + end diff --git a/lib/Monads/No_Throw.thy b/lib/Monads/nondet/Nondet_No_Throw.thy similarity index 96% rename from lib/Monads/No_Throw.thy rename to lib/Monads/nondet/Nondet_No_Throw.thy index d2af764ec8..03de80df87 100644 --- a/lib/Monads/No_Throw.thy +++ b/lib/Monads/nondet/Nondet_No_Throw.thy @@ -8,10 +8,10 @@ (* Lemmas about no_throw. Usually should have a conclusion "no_throw P m". Includes some monad equations that have no_throw as a main assumption. *) -theory No_Throw +theory Nondet_No_Throw imports - WhileLoopRules - MonadEq_Lemmas + Nondet_While_Loop_Rules + Nondet_MonadEq_Lemmas begin section "Basic exception reasoning" @@ -32,6 +32,8 @@ lemma no_throw_def': by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits) +subsection \no_throw rules\ + lemma no_throw_returnOk[simp]: "no_throw P (returnOk a)" unfolding no_throw_def @@ -73,7 +75,7 @@ lemma no_throw_fail[simp]: lemma bindE_fail_propagates: "\ no_throw \ A; empty_fail A \ \ A >>=E (\_. fail) = fail" by (fastforce simp: no_throw_def validE_def valid_def bind_def empty_fail_def - bindE_def split_def fail_def NonDetMonad.lift_def throwError_def + bindE_def split_def fail_def Nondet_Monad.lift_def throwError_def split: sum.splits) lemma whileLoopE_nothrow: diff --git a/lib/Monads/NonDetMonad_Sat.thy b/lib/Monads/nondet/Nondet_Sat.thy similarity index 96% rename from lib/Monads/NonDetMonad_Sat.thy rename to lib/Monads/nondet/Nondet_Sat.thy index e0b940f539..f49ff82b8e 100644 --- a/lib/Monads/NonDetMonad_Sat.thy +++ b/lib/Monads/nondet/Nondet_Sat.thy @@ -5,9 +5,9 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory NonDetMonad_Sat +theory Nondet_Sat imports - NonDetMonad + Nondet_Monad WPSimp begin @@ -17,7 +17,8 @@ text \ The dual to validity: an existential instead of a universal quantifier for the post condition. In refinement, it is often sufficient to know that there is one state that satisfies a condition.\ definition exs_valid :: - "('a \ bool) \ ('a, 'b) nondet_monad \ ('b \ 'a \ bool) \ bool" ("\_\ _ \\_\") where + "('a \ bool) \ ('a, 'b) nondet_monad \ ('b \ 'a \ bool) \ bool" + ("\_\ _ \\_\") where "\P\ f \\Q\ \ \s. P s \ (\(rv, s') \ fst (f s). Q rv s')" text \The above for the exception monad\ @@ -139,7 +140,7 @@ lemma gets_exs_valid: lemma exs_valid_assert_opt[wp]: "\\s. \x. G = Some x \ Q x s\ assert_opt G \\Q\" - by (clarsimp simp: assert_opt_def exs_valid_def get_def assert_def bind_def' return_def) + by (clarsimp simp: assert_opt_def exs_valid_def return_def) lemma gets_the_exs_valid[wp]: "\\s. \x. h s = Some x \ Q x s\ gets_the h \\Q\" diff --git a/lib/Monads/nondet/Nondet_Strengthen_Setup.thy b/lib/Monads/nondet/Nondet_Strengthen_Setup.thy new file mode 100644 index 0000000000..56556e2438 --- /dev/null +++ b/lib/Monads/nondet/Nondet_Strengthen_Setup.thy @@ -0,0 +1,77 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Nondet_Strengthen_Setup + imports + Strengthen + Nondet_No_Fail + Nondet_VCG +begin + +section \Strengthen setup.\ + +context strengthen_implementation begin + +lemma strengthen_hoare [strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\) (\P\ f \R\)" + by (cases F, auto elim: hoare_strengthen_post) + +lemma strengthen_validE_R_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" + by (cases F, auto intro: hoare_post_imp_R) + +lemma strengthen_validE_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" + by (cases F, auto elim: hoare_post_impErr) + +lemma strengthen_validE_E_cong[strg]: + "\\r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" + by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) + +lemma wpfix_strengthen_hoare: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" + by (cases F, auto elim: hoare_chain) + +lemma wpfix_strengthen_validE_R_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" + by (cases F, auto elim: hoare_chainE simp: validE_R_def) + +lemma wpfix_strengthen_validE_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (R r s); + \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" + by (cases F, auto elim: hoare_chainE) + +lemma wpfix_strengthen_validE_E_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" + by (cases F, auto elim: hoare_chainE simp: validE_E_def) + +lemma wpfix_no_fail_cong: + "\\s. st (\ F) (\) (P s) (P' s)\ + \ st F (\) (no_fail P f) (no_fail P' f)" + by (cases F, auto elim: no_fail_pre) + +lemmas nondet_wpfix_strgs = + wpfix_strengthen_validE_R_cong + wpfix_strengthen_validE_E_cong + wpfix_strengthen_validE_cong + wpfix_strengthen_hoare + wpfix_no_fail_cong + +end + +lemmas nondet_wpfix_strgs[wp_fix_strgs] + = strengthen_implementation.nondet_wpfix_strgs + +end \ No newline at end of file diff --git a/lib/Monads/NonDetMonad_Total.thy b/lib/Monads/nondet/Nondet_Total.thy similarity index 95% rename from lib/Monads/NonDetMonad_Total.thy rename to lib/Monads/nondet/Nondet_Total.thy index 7ad256f5a5..5eac5abd4b 100644 --- a/lib/Monads/NonDetMonad_Total.thy +++ b/lib/Monads/nondet/Nondet_Total.thy @@ -5,13 +5,13 @@ * SPDX-License-Identifier: BSD-2-Clause *) -(* Total correctness Hoare logic for the NonDetMonad (= valid + no_fail) *) +(* Total correctness Hoare logic for the Nondet_Monad (= valid + no_fail) *) -theory NonDetMonad_Total - imports No_Fail +theory Nondet_Total + imports Nondet_No_Fail begin -section \Total correctness for NonDetMonad and NonDetMonad with exceptions\ +section \Total correctness for Nondet_Monad and Nondet_Monad with exceptions\ subsection Definitions @@ -20,7 +20,8 @@ text \ is often similar. The following definitions allow such reasoning to take place.\ definition validNF :: - "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" ("\_\/ _ /\_\!") where + "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\!") where "\P\ f \Q\! \ \P\ f \Q\ \ no_fail P f" lemma validNF_alt_def: @@ -40,7 +41,7 @@ lemma validE_NF_alt_def: subsection \@{method wpc} setup\ lemma wpc_helper_validNF: - "\Q\ g \S\! \ wpc_helper (P, P') (Q, Q') \P\ g \S\!" + "\Q\ g \S\! \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\!" unfolding wpc_helper_def by clarsimp (metis hoare_vcg_precond_imp no_fail_pre validNF_def) @@ -52,13 +53,15 @@ subsection \Basic @{const validNF} theorems\ lemma validNF_make_schematic_post: "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \!) \ \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \!" - by (auto simp add: valid_def validNF_def no_fail_def split: prod.splits) + by (auto simp: valid_def validNF_def no_fail_def + split: prod.splits) lemma validE_NF_make_schematic_post: "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \!) \ \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \!" - by (auto simp add: validE_NF_def validE_def valid_def no_fail_def split: prod.splits sum.splits) + by (auto simp: validE_NF_def validE_def valid_def no_fail_def + split: prod.splits sum.splits) lemma validNF_conjD1: "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q \!" @@ -84,7 +87,7 @@ lemma validNF_no_fail: "\ \ P \ f \ Q \! \ \ no_fail P f" by (erule validNFE) -lemma snd_validNF: +lemma validNF_not_failed: "\ \ P \ f \ Q \!; P s \ \ \ snd (f s)" by (clarsimp simp: validNF_def no_fail_def) @@ -214,7 +217,7 @@ lemma validNF_chain: by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) lemma validNF_case_prod[wp]: - "(\x y. \P x y\ B x y \Q\!) \ \case v of (x, y) \ P x y\ case v of (x, y) \ B x y \Q\!" + "\\x y. \P x y\ B x y \Q\!\ \ \case v of (x, y) \ P x y\ case v of (x, y) \ B x y \Q\!" by (metis prod.exhaust split_conv) lemma validE_NF_case_prod[wp]: @@ -302,7 +305,8 @@ lemma validNF_nobindE[wp]: text \ Set up triple rules for @{term validE_NF} so that we can use @{method wp} combinator rules.\ definition validE_NF_property :: - "('a \ 's \ bool) \ ('c \ 's \ bool) \ 's \ ('s, 'c+'a) nondet_monad \ bool" where + "('a \ 's \ bool) \ ('c \ 's \ bool) \ 's \ ('s, 'c+'a) nondet_monad \ bool" + where "validE_NF_property Q E s b \ \ snd (b s) \ (\(r', s') \ fst (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" @@ -344,6 +348,6 @@ lemma validE_NF_condition[wp]: lemma hoare_assume_preNF: "(\s. P s \ \P\ f \Q\!) \ \P\ f \Q\!" - by (metis validNF_alt_def) + by (simp add: validNF_alt_def) end \ No newline at end of file diff --git a/lib/Monads/NonDetMonadVCG.thy b/lib/Monads/nondet/Nondet_VCG.thy similarity index 89% rename from lib/Monads/NonDetMonadVCG.thy rename to lib/Monads/nondet/Nondet_VCG.thy index 8fbeca771e..4c926f36b0 100644 --- a/lib/Monads/NonDetMonadVCG.thy +++ b/lib/Monads/nondet/Nondet_VCG.thy @@ -5,17 +5,18 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory NonDetMonadVCG -imports - NonDetMonadLemmas - WPSimp +theory Nondet_VCG + imports + Nondet_Lemmas + WPSimp begin section \Hoare Logic\ subsection \Validity\ -text \This section defines a Hoare logic for partial correctness for +text \ + This section defines a Hoare logic for partial correctness for the nondeterministic state monad as well as the exception monad. The logic talks only about the behaviour part of the monad and ignores the failure flag. @@ -31,18 +32,19 @@ text \This section defines a Hoare logic for partial correctness for the monad satisfy the postcondition. Note that if the computation returns the empty set, the triple is trivially valid. This means @{term "assert P"} does not require us to prove that @{term P} holds, but rather allows us - to assume @{term P}! Proving non-failure is done via separate predicate and - calculus (see below). -\ + to assume @{term P}! Proving non-failure is done via a separate predicate and + calculus (see Nondet_No_Fail).\ definition valid :: - "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" ("\_\/ _ /\_\") where + "('s \ bool) \ ('s,'a) nondet_monad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\") where "\P\ f \Q\ \ \s. P s \ (\(r,s') \ fst (f s). Q r s')" text \ We often reason about invariant predicates. The following provides shorthand syntax - that avoids repeating potentially long predicates. \ + that avoids repeating potentially long predicates.\ abbreviation (input) invariant :: - "('s,'a) nondet_monad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) where + "('s,'a) nondet_monad \ ('s \ bool) \ bool" + ("_ \_\" [59,0] 60) where "invariant f P \ \P\ f \\_. P\" text \ @@ -60,7 +62,7 @@ lemma validE_def2: text \ The following two instantiations are convenient to separate reasoning for exceptional and - normal case. \ + normal case.\ (* Narrator: they are in fact not convenient, and are now considered a mistake that should have been an abbreviation instead. *) definition validE_R :: (* FIXME lib: this should be an abbreviation *) @@ -73,7 +75,6 @@ definition validE_E :: (* FIXME lib: this should be an abbreviation *) where "\P\ f -,\E\ \ \P\ f \\_. \\,\E\" - (* These lemmas are useful to apply to rules to convert valid rules into a format suitable for wp. *) lemma valid_make_schematic_post: "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \) \ @@ -96,7 +97,7 @@ lemma hoare_pre_imp: lemmas hoare_weaken_pre = hoare_pre_imp[rotated] lemma hoare_vcg_precond_impE: (* FIXME lib: eliminate in favour of hoare_weaken_preE *) - "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" + "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" by (fastforce simp add:validE_def2) lemmas hoare_weaken_preE = hoare_vcg_precond_impE @@ -120,19 +121,19 @@ lemmas hoare_pre [wp_pre] = subsection \Setting up the precondition case splitter.\ lemma wpc_helper_valid: - "\Q\ g \S\ \ wpc_helper (P, P') (Q, Q') \P\ g \S\" + "\Q\ g \S\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\" by (clarsimp simp: wpc_helper_def elim!: hoare_pre) lemma wpc_helper_validE: - "\Q\ f \R\,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f \R\,\E\" + "\Q\ f \R\,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,\E\" by (clarsimp simp: wpc_helper_def elim!: hoare_pre) lemma wpc_helper_validE_R: - "\Q\ f \R\,- \ wpc_helper (P, P') (Q, Q') \P\ f \R\,-" + "\Q\ f \R\,- \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,-" by (clarsimp simp: wpc_helper_def elim!: hoare_pre) lemma wpc_helper_validR_R: - "\Q\ f -,\E\ \ wpc_helper (P, P') (Q, Q') \P\ f -,\E\" + "\Q\ f -,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f -,\E\" by (clarsimp simp: wpc_helper_def elim!: hoare_pre) @@ -144,13 +145,17 @@ wpc_setup "\m. \P\ m -,\E\" wpc_helper_v subsection \Hoare Logic Rules\ +lemma bind_wp[wp_split]: + "\ \r. \Q' r\ g r \Q\; \P\f \Q'\ \ \ \P\ f >>= (\rv. g rv) \Q\" + by (fastforce simp: valid_def bind_def' intro: image_eqI[rotated]) + lemma seq: "\ \A\ f \B\; \x. P x \ \C\ g x \D\; \x s. B x s \ P x \ C s \ \ \A\ do x \ f; g x od \D\" by (fastforce simp: valid_def bind_def) lemma seq_ext: "\ \A\ f \B\; \x. \B x\ g x \C\ \ \ \A\ do x \ f; g x od \C\" - by (fastforce simp: valid_def bind_def) + by (rule bind_wp) lemma seqE: "\ \A\ f \B\,\E\; \x. \B x\ g x \C\,\E\ \ \ \A\ doE x \ f; g x odE \C\,\E\" @@ -420,11 +425,11 @@ lemma validE_R_validE: by (simp add: validE_R_def) lemma validE_validE_E: - "\P\ f \\\\,\E\ \ \P\ f -,\E\" + "\P\ f \\\\, \E\ \ \P\ f -, \E\" by (simp add: validE_E_def) lemma validE_E_validE: - "\P\ f -,\E\ \ \P\ f \\\\,\E\" + "\P\ f -, \E\ \ \P\ f \\\\, \E\" by (simp add: validE_E_def) @@ -445,6 +450,310 @@ lemma liftE_validE[simp]: by (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) +subsection \Operator lifting/splitting\ + +lemma hoare_vcg_if_split: + "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma hoare_vcg_if_splitE: + "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ + \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" + by simp + +lemma hoare_vcg_split_case_option: + "\ \x. x = None \ \P x\ f x \R x\; \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ + \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_optionE: + "\ \x. x = None \ \P x\ f x \R x\,\E x\; \x y. x = Some y \ \Q x y\ g x y \R x\,\E x\ \ \ + \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\, \E x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_sum: + "\ \x a. x = Inl a \ \P x a\ f x a \R x\; \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ + \\s. (\a. x = Inl a \ P x a s) \ (\b. x = Inr b \ Q x b s) \ + case x of Inl a \ f x a | Inr b \ g x b + \R x\" + by (cases x; simp) + +lemmas hoare_vcg_precond_imp = hoare_weaken_pre (* FIXME lib: eliminate *) + +lemmas hoare_seq_ext = seq_ext[rotated] +lemmas hoare_vcg_seqE = seqE[rotated] + +lemma hoare_seq_ext_nobind: + "\ \B\ g \C\; \A\ f \\_. B\ \ \ \A\ do f; g od \C\" + by (erule seq_ext) (clarsimp simp: valid_def) + +lemma hoare_seq_ext_nobindE: + "\ \B\ g \C\, \E\; \A\ f \\_. B\, \E\ \ \ \A\ doE f; g odE \C\, \E\" + by (erule seqE) (clarsimp simp: validE_def) + +lemmas hoare_seq_ext_skip' = hoare_seq_ext[where B=C and C=C for C] + +lemma hoare_chain: + "\ \P\ f \Q\; \s. R s \ P s; \rv s. Q rv s \ S rv s \ \ \R\ f \S\" + by (wp_pre, rule hoare_post_imp) + +lemma validE_weaken: (* FIXME lib: eliminate in favour of hoare_chainE *) + "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \rv s. Q' rv s \ Q rv s; \rv s. E' rv s \ E rv s \ + \ \P\ A \Q\,\E\" + by wp_pre (rule hoare_post_impErr) + +lemmas hoare_chainE = validE_weaken + +lemma hoare_vcg_conj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +\ \A variant which works nicely with subgoals that do not contain schematics\ +lemmas hoare_vcg_conj_lift_pre_fix = hoare_vcg_conj_lift[where P=R and P'=R for R, simplified] + +lemma hoare_vcg_conj_liftE1: + "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ \P and P'\ f \\rv s. Q rv s \ Q' rv s\,\E\" + unfolding valid_def validE_R_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE_weaker: + assumes "\P\ f \Q\, \E\" + assumes "\P'\ f \Q'\, \E\" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\" + apply (rule hoare_pre) + apply (fastforce intro: assms hoare_vcg_conj_liftE1 validE_validE_R hoare_post_impErr) + apply simp + done + +lemma hoare_vcg_disj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +lemma hoare_vcg_const_Ball_lift: + "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_const_Ball_lift_R: + "\ \x. x \ S \ \P x\ f \Q x\,- \ \ \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_strengthen_post) + (fastforce intro!: hoare_vcg_const_Ball_lift split: sum.splits)+ + +lemma hoare_vcg_all_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_all_lift_R: + "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" + by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_lift: + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (simp only: imp_conv_disj) (rule hoare_vcg_disj_lift) + +lemma hoare_vcg_imp_lift': + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (wpsimp wp: hoare_vcg_imp_lift) + +lemma hoare_vcg_imp_liftE: + "\ \P'\ f \\rv s. \ P rv s\, \A\; \Q'\ f \Q\, \A\ \ + \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \A\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R: + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R': + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. \P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_conj_lift[wp_comb]: + "\ \P\ f \\rv s. Q rv s \ Q' rv s\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] + +lemma hoare_absorb_imp: + "\ P \ f \\rv s. Q rv s \ R rv s\ \ \ P \ f \\rv s. Q rv s \ R rv s\" + by (erule hoare_post_imp[rotated], blast) + +lemma hoare_weaken_imp: + "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ + \ \P\ f \\rv s. Q rv s \ R rv s\" + by (clarsimp simp: valid_def split_def) + +lemma hoare_vcg_const_imp_lift: + "\ P \ \Q\ m \R\ \ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" + by (cases P, simp_all add: hoare_vcg_prop) + +lemma hoare_vcg_const_imp_lift_E: + "(P \ \Q\ f -, \R\) \ \\s. P \ Q s\ f -, \\rv s. P \ R rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_vcg_const_imp_lift_R: + "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_weak_lift_imp: + "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" + by (auto simp add: valid_def split_def) + +lemma hoare_weak_lift_impE: + "\Q\ m \R\,\E\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,\\rv s. P \ E rv s\" + by (cases P; simp add: validE_def hoare_vcg_prop) + +lemma hoare_weak_lift_imp_R: + "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (cases P, simp_all) + +lemmas hoare_vcg_weaken_imp = hoare_weaken_imp (* FIXME lib: eliminate *) + +lemma hoare_vcg_ex_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (clarsimp simp: valid_def, blast) + +lemma hoare_vcg_ex_lift_R1: + "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" + by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_liftP_ext: + assumes "\P x. m \\s. P (f s x)\" + shows "m \\s. P (f s)\" + unfolding valid_def + apply clarsimp + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (drule use_valid, rule assms, rule refl) + apply simp + done + +(* for instantiations *) +lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . +lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . +lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . +lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . + +lemma hoare_vcg_E_conj: + "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" + unfolding validE_def validE_E_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_vcg_E_elim: + "\ \P\ f -,\E\; \P'\ f \Q\,- \ \ \\s. P s \ P' s\ f \Q\,\E\" + by (rule hoare_post_impErr[OF hoare_vcg_E_conj]) (simp add: validE_R_def)+ + +lemma hoare_vcg_R_conj: + "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_lift_Pf_E_R: + "\ \x. \P x\ m \\_. P x\, -; \P. \\s. P (f s)\ m \\_ s. P (f s)\, - \ \ + \\s. P (f s) s\ m \\_ s. P (f s) s\, -" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_lift_Pf_E_E: + "\ \x. \P x\ m -, \\_. P x\; \P. \\s. P (f s)\ m -, \\_ s. P (f s)\ \ \ + \\s. P (f s) s\ m -, \\_ s. P (f s) s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift_E_E: + "(\x. x \ S \ \P x\ f -,\Q x\) \ \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" + unfolding validE_E_def validE_def valid_def + by (fastforce split: sum.splits) + +lemma hoare_vcg_all_liftE_E: + "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" + by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_liftE_E: + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_ex_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_liftE_E: + "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_R: + "\ \P\ f \Q'\,-; \rv s. Q' rv s \ Q rv s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (erule hoare_post_impErr) + +lemma hoare_post_imp_E: + "\ \P\ f -,\Q'\; \rv s. Q' rv s \ Q rv s \ \ \P\ f -,\Q\" + unfolding validE_E_def + by (rule hoare_post_impErr) + +lemma hoare_post_comb_imp_conj: + "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" + by (wpsimp wp: hoare_vcg_conj_lift) + +lemma hoare_vcg_if_lift: + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv s. if P then X rv s else Y rv s\" + + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv. if P then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_disj_lift_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + using assms + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_all_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_liftE: + "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_split_lift[wp]: + "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" + by simp + +named_theorems hoare_vcg_op_lift +lemmas [hoare_vcg_op_lift] = + hoare_vcg_const_imp_lift + hoare_vcg_const_imp_lift_E + hoare_vcg_const_imp_lift_R + (* leaving out hoare_vcg_conj_lift*, because that is built into wp *) + hoare_vcg_disj_lift + hoare_vcg_disj_lift_R + hoare_vcg_ex_lift + hoare_vcg_ex_liftE + hoare_vcg_ex_liftE_E + hoare_vcg_all_lift + hoare_vcg_all_liftE + hoare_vcg_all_liftE_E + hoare_vcg_all_lift_R + hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_lift_R + hoare_vcg_const_Ball_lift_E_E + hoare_vcg_split_lift + hoare_vcg_if_lift + hoare_vcg_imp_lift' + hoare_vcg_imp_liftE + hoare_vcg_imp_lift_R + hoare_vcg_imp_liftE_E + + subsection \Weakest Precondition Rules\ lemma fail_wp: @@ -457,19 +766,20 @@ lemma return_wp: lemma get_wp: "\\s. P s s\ get \P\" - by(simp add: valid_def split_def get_def) + by (simp add: valid_def get_def) lemma gets_wp: "\\s. P (f s) s\ gets f \P\" by(simp add: valid_def split_def gets_def return_def get_def bind_def) -lemma modify_wp: - "\\s. P () (f s)\ modify f \P\" - by(simp add: valid_def split_def modify_def get_def put_def bind_def) - lemma put_wp: - "\\s. P () x\ put x \P\" - by(simp add: valid_def put_def) + "\\_. Q () s\ put s \Q\" + by (simp add: put_def valid_def) + +lemma modify_wp: + "\\s. Q () (f s)\ modify f \Q\" + unfolding modify_def + by (wp put_wp get_wp) lemma failE_wp: "\\\ fail \Q\, \E\" @@ -489,7 +799,7 @@ lemma returnOKE_R_wp: lemma liftE_wp: "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" - by (clarsimp simp:valid_def validE_def2 liftE_def split_def Let_def bind_def return_def) + by simp lemma catch_wp: "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ \P\ catch f handler \Q\" @@ -540,8 +850,6 @@ lemma alternative_wp: using post_by_hoare[OF x] post_by_hoare[OF y] by fastforce -lemmas alternative_valid = alternative_wp[where P=P and P'=P for P, simplified] - lemma alternativeE_wp: assumes "\P\ f \Q\,\E\" assumes "\P'\ f' \Q\,\E\" @@ -554,7 +862,7 @@ lemma alternativeE_R_wp: unfolding validE_R_def by (rule alternativeE_wp) -lemma alternative_R_wp: +lemma alternativeE_E_wp: "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" unfolding validE_E_def by (rule alternativeE_wp) @@ -569,10 +877,11 @@ lemma select_f_wp: lemma state_select_wp: "\\s. \t. (s, t) \ f \ P () t\ state_select f \P\" - by (clarsimp simp: state_select_def valid_def) + unfolding state_select_def2 + by (wpsimp wp: put_wp select_wp return_wp get_wp assert_wp) lemma condition_wp: - "\ \Q\ A \P\; \R\ B \P\ \ \ \\s. if C s then Q s else R s\ condition C A B \P\" + "\ \Q\ A \P\; \R\ B \P\ \ \ \\s. if C s then Q s else R s\ condition C A B \P\" by (clarsimp simp: condition_def valid_def) lemma conditionE_wp: @@ -604,18 +913,18 @@ lemma unlessE_wp: lemma maybeM_wp: "(\x. y = Some x \ \P x\ m x \Q\) \ \\s. (\x. y = Some x \ P x s) \ (y = None \ Q () s)\ maybeM m y \Q\" - unfolding maybeM_def by (cases y; simp add: bind_def return_def valid_def) + unfolding maybeM_def by (wpsimp wp: return_wp) auto lemma notM_wp: "\P\ m \\c. Q (\ c)\ \ \P\ notM m \Q\" - unfolding notM_def by (fastforce simp: bind_def return_def valid_def) + unfolding notM_def by (wpsimp wp: return_wp) lemma ifM_wp: assumes [wp]: "\Q\ f \S\" "\R\ g \S\" assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ R s\" shows "\A and B\ ifM P f g \S\" - unfolding ifM_def using assms - by (fastforce simp: bind_def valid_def split: if_splits) + unfolding ifM_def + by (wpsimp wp: hoare_vcg_if_split hoare_vcg_conj_lift) lemma andM_wp: assumes [wp]: "\Q'\ B \Q\" @@ -673,7 +982,7 @@ lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF v lemma assert_opt_wp: "\\s. x \ None \ Q (the x) s\ assert_opt x \Q\" unfolding assert_opt_def - by (case_tac x; wpsimp wp: fail_wp return_wp) + by (cases x; wpsimp wp: fail_wp return_wp) lemma gets_the_wp: "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\" @@ -705,225 +1014,6 @@ lemma select_throwError_wp: by (simp add: bind_def throwError_def return_def select_def validE_E_def validE_def valid_def) -subsection \Operator lifting/splitting\ - -lemma hoare_vcg_if_split: - "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" - by simp - -lemma hoare_vcg_if_splitE: - "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ - \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" - by simp - -lemma hoare_vcg_split_case_option: - "\ \x. x = None \ \P x\ f x \R x\; \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ - \\s. (x = None \ P x s) \(\y. x = Some y \ Q x y s)\ - case x of None \ f x | Some y \ g x y - \R x\" - by (cases x; simp) - -lemma hoare_vcg_split_case_optionE: - "\ \x. x = None \ \P x\ f x \R x\,\E x\; \x y. x = Some y \ \Q x y\ g x y \R x\,\E x\ \ \ - \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ - case x of None \ f x | Some y \ g x y - \R x\, \E x\" - by (cases x; simp) - -lemma hoare_vcg_split_case_sum: - "\ \x a. x = Inl a \ \P x a\ f x a \R x\; \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ - \\s. (\a. x = Inl a \ P x a s) \ (\b. x = Inr b \ Q x b s) \ - case x of Inl a \ f x a | Inr b \ g x b - \R x\" - by (cases x; simp) - -lemmas hoare_vcg_precond_imp = hoare_weaken_pre (* FIXME lib: eliminate *) - -lemmas hoare_seq_ext = seq_ext[rotated] -lemmas hoare_vcg_seqE = seqE[rotated] - -lemma hoare_seq_ext_nobind: - "\ \B\ g \C\; \A\ f \\_. B\ \ \ \A\ do f; g od \C\" - by (fastforce simp: valid_def bind_def Let_def split_def) - -lemma hoare_seq_ext_nobindE: - "\ \B\ g \C\, \E\; \A\ f \\_. B\, \E\ \ \ \A\ doE f; g odE \C\, \E\" - by (fastforce simp: validE_def valid_def bindE_def bind_def throwError_def return_def lift_def - split: sum.splits) - -lemmas hoare_seq_ext_skip' = hoare_seq_ext[where B=C and C=C for C] - -lemma hoare_chain: - "\ \P\ f \Q\; \s. R s \ P s; \rv s. Q rv s \ S rv s \ \ \R\ f \S\" - by (wp_pre, rule hoare_post_imp) - -lemma validE_weaken: (* FIXME lib: eliminate in favour of hoare_chainE *) - "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \rv s. Q' rv s \ Q rv s; \rv s. E' rv s \ E rv s \ \ - \P\ A \Q\,\E\" - by wp_pre (rule hoare_post_impErr) - -lemmas hoare_chainE = validE_weaken - -lemma hoare_vcg_conj_lift: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - unfolding valid_def - by fastforce - -\ \A variant which works nicely with subgoals that do not contain schematics\ -lemmas hoare_vcg_conj_lift_pre_fix = hoare_vcg_conj_lift[where P=R and P'=R for R, simplified] - -lemma hoare_vcg_conj_liftE1: - "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ \P and P'\ f \\rv s. Q rv s \ Q' rv s\,\E\" - unfolding valid_def validE_R_def validE_def - by (fastforce simp: split_def split: sum.splits) - -lemma hoare_vcg_disj_lift: - "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" - unfolding valid_def - by fastforce - -lemma hoare_vcg_const_Ball_lift: - "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_const_Ball_lift_R: - "\ \x. x \ S \ \P x\ f \Q x\,- \ \ \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" - unfolding validE_R_def validE_def - by (rule hoare_strengthen_post) - (fastforce intro!: hoare_vcg_const_Ball_lift split: sum.splits)+ - -lemma hoare_vcg_all_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (fastforce simp: valid_def) - -lemma hoare_vcg_all_lift_R: - "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" - by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) - -lemma hoare_vcg_imp_lift: - "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" - by (simp only: imp_conv_disj) (rule hoare_vcg_disj_lift) - -lemma hoare_vcg_imp_lift': - "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" - by (wpsimp wp: hoare_vcg_imp_lift) - -lemma hoare_vcg_imp_conj_lift[wp_comb]: - "\ \P\ f \\rv s. Q rv s \ Q' rv s\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ - \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" - by (auto simp: valid_def) - -lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] - -lemma hoare_absorb_imp: - "\ P \ f \\rv s. Q rv s \ R rv s\ \ \ P \ f \\rv s. Q rv s \ R rv s\" - by (erule hoare_post_imp[rotated], blast) - -lemma hoare_weaken_imp: - "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ - \ \P\ f \\rv s. Q rv s \ R rv s\" - by (clarsimp simp: valid_def split_def) - -lemma hoare_vcg_const_imp_lift: - "\ P \ \Q\ m \R\ \ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: hoare_vcg_prop) - -lemma hoare_vcg_const_imp_lift_R: - "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) - -lemma hoare_weak_lift_imp: - "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" - by (auto simp add: valid_def split_def) - -lemmas hoare_vcg_weaken_imp = hoare_weaken_imp (* FIXME lib: eliminate *) - -lemma hoare_vcg_ex_lift: - "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" - by (clarsimp simp: valid_def, blast) - -lemma hoare_vcg_ex_lift_R1: - "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" - by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) - -lemma hoare_liftP_ext: - assumes "\P x. m \\s. P (f s x)\" - shows "m \\s. P (f s)\" - unfolding valid_def - apply clarsimp - apply (erule subst[rotated, where P=P]) - apply (rule ext) - apply (drule use_valid, rule assms, rule refl) - apply simp - done - -(* for instantiations *) -lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . -lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . -lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . -lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . - -lemma hoare_vcg_E_conj: - "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" - unfolding validE_def validE_E_def - by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) - -lemma hoare_vcg_E_elim: - "\ \P\ f -,\E\; \P'\ f \Q\,- \ \ \\s. P s \ P' s\ f \Q\,\E\" - by (rule hoare_post_impErr[OF hoare_vcg_E_conj]) (simp add: validE_R_def)+ - -lemma hoare_vcg_R_conj: - "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - unfolding validE_R_def validE_def - by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) - -lemma hoare_lift_Pf_E_R: - "\ \x. \P x\ m \\_. P x\, -; \P. \\s. P (f s)\ m \\_ s. P (f s)\, - \ \ - \\s. P (f s) s\ m \\_ s. P (f s) s\, -" - by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) - -lemma hoare_lift_Pf_E_E: - "\ \x. \P x\ m -, \\_. P x\; \P. \\s. P (f s)\ m -, \\_ s. P (f s)\ \ \ - \\s. P (f s) s\ m -, \\_ s. P (f s) s\" - by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) - -lemma hoare_vcg_const_Ball_lift_E_E: - "(\x. x \ S \ \P x\ f -,\Q x\) \ \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" - unfolding validE_E_def validE_def valid_def - by (fastforce split: sum.splits) - -lemma hoare_vcg_all_liftE_E: - "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" - by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) - -lemma hoare_vcg_imp_liftE_E: - "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ - \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" - by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) - -lemma hoare_vcg_ex_liftE: - "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_vcg_ex_liftE_E: - "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" - by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) - -lemma hoare_post_imp_R: - "\ \P\ f \Q'\,-; \rv s. Q' rv s \ Q rv s \ \ \P\ f \Q\,-" - unfolding validE_R_def - by (erule hoare_post_impErr) - -lemma hoare_post_imp_E: - "\ \P\ f -,\Q'\; \rv s. Q' rv s \ Q rv s \ \ \P\ f -,\Q\" - unfolding validE_E_def - by (rule hoare_post_impErr) - -lemma hoare_post_comb_imp_conj: - "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" - by (wpsimp wp: hoare_vcg_conj_lift) - - subsection \Setting up the @{method wp} method\ lemma valid_is_triple: @@ -1009,6 +1099,11 @@ lemmas [wp] = hoare_vcg_prop gets_the_wp gets_map_wp' liftE_wp + alternative_wp + alternativeE_R_wp + alternativeE_E_wp + alternativeE_wp + select_wp select_f_wp state_select_wp condition_wp @@ -1325,5 +1420,4 @@ lemmas hoare_forward_inv_step_validE_R[forward_inv_step_rules] = method forward_inv_step uses wp simp = rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ - end diff --git a/lib/Monads/WhileLoopRules.thy b/lib/Monads/nondet/Nondet_While_Loop_Rules.thy similarity index 85% rename from lib/Monads/WhileLoopRules.thy rename to lib/Monads/nondet/Nondet_While_Loop_Rules.thy index a7c2c7f7e5..e41a9d6460 100644 --- a/lib/Monads/WhileLoopRules.thy +++ b/lib/Monads/nondet/Nondet_While_Loop_Rules.thy @@ -4,11 +4,11 @@ * SPDX-License-Identifier: BSD-2-Clause *) -theory WhileLoopRules +theory Nondet_While_Loop_Rules imports - Empty_Fail - NonDetMonad_Total - NonDetMonad_Sat + Nondet_Empty_Fail + Nondet_Total + Nondet_Sat begin section "Well-ordered measures" @@ -47,12 +47,14 @@ text \ \ definition whileLoop_inv :: "('a \ 'b \ bool) \ ('a \ ('b, 'a) nondet_monad) \ 'a \ ('a \ 'b \ bool) \ - (('a \ 'b) \ 'a \ 'b) set \ ('b, 'a) nondet_monad" where + (('a \ 'b) \ 'a \ 'b) set \ ('b, 'a) nondet_monad" + where "whileLoop_inv C B x I R \ whileLoop C B x" definition whileLoopE_inv :: "('a \ 'b \ bool) \ ('a \ ('b, 'c + 'a) nondet_monad) \ 'a \ ('a \ 'b \ bool) \ - (('a \ 'b) \ 'a \ 'b) set \ ('b, 'c + 'a) nondet_monad" where + (('a \ 'b) \ 'a \ 'b) set \ ('b, 'c + 'a) nondet_monad" + where "whileLoopE_inv C B x I R \ whileLoopE C B x" lemma whileLoop_add_inv: @@ -284,22 +286,35 @@ lemma fst_whileLoop_cond_false: using loop_result by (rule in_whileLoop_induct, auto) +lemma whileLoop_terminates_results: + assumes non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ B r \\ \r' s'. C r' s' \ I r' s' \" + shows + "\whileLoop_terminates C B r s; (Some (r, s), None) \ whileLoop_results C B; I r s; C r s\ + \ False" +proof (induct rule: whileLoop_terminates.induct) + case (1 r s) + then show ?case + apply clarsimp + done +next + case (2 r s) + then show ?case + apply (cut_tac non_term[where r=r]) + apply (clarsimp simp: exs_valid_def) + apply (subst (asm) (2) whileLoop_results.simps) + apply clarsimp + apply (insert whileLoop_results.simps) + apply fast + done +qed + lemma snd_whileLoop: assumes init_I: "I r s" - and cond_I: "C r s" - and non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ B r \\ \r' s'. C r' s' \ I r' s' \" + and cond_I: "C r s" + and non_term: "\r. \ \s. I r s \ C r s \ \ snd (B r s) \ B r \\ \r' s'. C r' s' \ I r' s' \" shows "snd (whileLoop C B r s)" apply (clarsimp simp: whileLoop_def) - apply (rotate_tac) - apply (insert init_I cond_I) - apply (induct rule: whileLoop_terminates.induct) - apply clarsimp - apply (cut_tac r=r in non_term) - apply (clarsimp simp: exs_valid_def) - apply (subst (asm) (2) whileLoop_results.simps) - apply clarsimp - apply (insert whileLoop_results.simps) - apply fast + apply (erule (1) whileLoop_terminates_results[OF non_term _ _ init_I cond_I]) done lemma whileLoop_terminates_inv: @@ -332,7 +347,7 @@ proof - apply (induct arbitrary: r s rule: whileLoop_results.inducts) apply simp apply simp - apply (insert snd_validNF [OF inv_holds])[1] + apply (insert validNF_not_failed[OF inv_holds])[1] apply blast apply (drule use_validNF [OF _ inv_holds]) apply simp @@ -427,11 +442,11 @@ lemma whileLoopE_wp: by (rule validE_whileLoopE) lemma exs_valid_whileLoop: - assumes init_T: "\s. P s \ T r s" + assumes init_T: "\s. P s \ T r s" and iter_I: "\r s0. \\s. T r s \ C r s \ s = s0\ B r \\\r' s'. T r' s' \ ((r', s'),(r, s0)) \ R\" and wf_R: "wf R" and final_I: "\r s. \ T r s; \ C r s \ \ Q r s" - shows "\ P \ whileLoop C B r \\ Q \" + shows "\ P \ whileLoop C B r \\ Q \" proof (clarsimp simp: exs_valid_def Bex_def) fix s assume "P s" @@ -440,17 +455,21 @@ proof (clarsimp simp: exs_valid_def Bex_def) fix x have "T (fst x) (snd x) \ \r' s'. (r', s') \ fst (whileLoop C B (fst x) (snd x)) \ T r' s'" using wf_R - apply induction - apply atomize - apply (case_tac "C (fst x) (snd x)") - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' split: prod.splits) - apply (cut_tac ?s0.0=b and r=a in iter_I) - apply (clarsimp simp: exs_valid_def) - apply blast - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' return_def) - done + proof induct + case (less x) + then show ?case + apply atomize + apply (cases "C (fst x) (snd x)") + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def') + apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"]) + apply (clarsimp simp: exs_valid_def) + apply blast + apply (subst whileLoop_unroll) + apply (cases x) + apply (clarsimp simp: condition_def bind_def' return_def) + done + qed } thus "\r' s'. (r', s') \ fst (whileLoop C B r s) \ Q r' s'" @@ -475,8 +494,7 @@ proof - apply fact apply (rule cond_true, fact) apply (clarsimp simp: exs_valid_def) - apply (case_tac "fst (B r s) = {}") - apply (metis empty_failD [OF body_empty_fail]) + apply (drule empty_failD3[OF body_empty_fail]) apply (subst (asm) whileLoop_unroll) apply (fastforce simp: condition_def bind_def split_def cond_true) done @@ -496,33 +514,59 @@ lemma empty_fail_whileM[empty_fail_cond, intro!, wp]: unfolding whileM_def by (wpsimp wp: empty_fail_whileLoop empty_fail_bind) -lemma whileLoop_results_bisim: +lemma whileLoop_results_bisim_helper: assumes base: "(a, b) \ whileLoop_results C B" - and vars1: "Q = (case a of Some (r, s) \ Some (rt r, st s) | _ \ None)" - and vars2: "R = (case b of Some (r, s) \ Some (rt r, st s) | _ \ None)" - and inv_init: "case a of Some (r, s) \ I r s | _ \ True" - and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" - and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" - and fail_step: "\r s. \C r s; snd (B r s); I r s\ + and inv_init: "case a of Some (r, s) \ I r s | _ \ True" + and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" + and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" + and fail_step: "\r s. \C r s; snd (B r s); I r s\ \ (Some (rt r, st s), None) \ whileLoop_results C' B'" - and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ + and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ (rt r', st s') \ fst (B' (rt r) (st s))" - shows "(Q, R) \ whileLoop_results C' B'" - apply (subst vars1) - apply (subst vars2) - apply (insert base inv_init) - apply (induct rule: whileLoop_results.induct) + defines [simp]: "Q x \ (case x of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and [simp]: "R y\ (case y of Some (r, s) \ Some (rt r, st s) | _ \ None)" + shows "(Q a, R b) \ whileLoop_results C' B'" + using base inv_init +proof (induct rule: whileLoop_results.induct) + case (1 r s) + then show ?case apply clarsimp apply (subst (asm) cond_match) apply (clarsimp simp: option.splits) apply (clarsimp simp: option.splits) - apply (clarsimp simp: option.splits) - apply (metis fail_step) - apply (case_tac z) - apply (clarsimp simp: option.splits) - apply (metis cond_match inv_step refine whileLoop_results.intros(3)) - apply (clarsimp simp: option.splits) - apply (metis cond_match inv_step refine whileLoop_results.intros(3)) + done +next + case (2 r s) + then show ?case + apply (clarsimp simp: option.splits) + apply (metis fail_step) + done +next + case (3 r s r' s' z) + then show ?case + apply (cases z) + apply (clarsimp simp: option.splits) + apply (metis cond_match inv_step refine whileLoop_results.intros(3)) + apply (clarsimp simp: option.splits) + apply (metis cond_match inv_step refine whileLoop_results.intros(3)) + done +qed + +lemma whileLoop_results_bisim: + assumes base: "(a, b) \ whileLoop_results C B" + and vars1: "Q = (case a of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and vars2: "R = (case b of Some (r, s) \ Some (rt r, st s) | _ \ None)" + and inv_init: "case a of Some (r, s) \ I r s | _ \ True" + and inv_step: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ \ I r' s'" + and cond_match: "\r s. I r s \ C r s = C' (rt r) (st s)" + and fail_step: "\r s. \C r s; snd (B r s); I r s\ + \ (Some (rt r, st s), None) \ whileLoop_results C' B'" + and refine: "\r s r' s'. \ I r s; C r s; (r', s') \ fst (B r s) \ + \ (rt r', st s') \ fst (B' (rt r) (st s))" + shows "(Q, R) \ whileLoop_results C' B'" + apply (subst vars1, subst vars2) + apply (rule whileLoop_results_bisim_helper) + apply (rule assms; assumption?)+ done lemma whileLoop_terminates_liftE: @@ -562,6 +606,10 @@ lemma snd_X_return[simp]: "snd ((A >>= (\a. return (X a))) s) = snd (A s)" by (clarsimp simp: return_def bind_def split_def) +lemma isr_Inr_projr: + "\ isl a \ (a = Inr b) = (b = projr a)" + by auto + lemma whileLoopE_liftE: "whileLoopE C (\r. liftE (B r)) r = liftE (whileLoop C B r)" apply (rule ext) @@ -569,30 +617,33 @@ lemma whileLoopE_liftE: apply (rule prod_eqI) apply (rule set_eqI, rule iffI) apply clarsimp - apply (clarsimp simp: in_bind whileLoop_def liftE_def) - apply (rule_tac x="b" in exI) - apply (rule_tac x="projr a" in exI) + apply (clarsimp simp: in_liftE whileLoop_def) + \ \The schematic existential is instantiated by 'subst isr_Inr_proj' ... 'rule refl' in two lines\ + apply (rule exI) apply (rule conjI) - apply (erule whileLoop_results_bisim[where rt=projr - and st="\x. x" - and I="\r s. case r of Inr x \ True | _ \ False"], - auto intro: whileLoop_results.intros simp: bind_def return_def lift_def split: sum.splits)[1] - apply (drule whileLoop_results_induct_lemma2[where P="\(r, s). case r of Inr x \ True | _ \ False"]) + apply (subst isr_Inr_projr) + prefer 2 + apply (rule refl) + apply (drule whileLoop_results_induct_lemma2[where P="\(r, s). \ isl r"]) + apply (rule refl) apply (rule refl) - apply (rule refl) - apply clarsimp - apply (clarsimp simp: return_def bind_def lift_def split: sum.splits) - apply (clarsimp simp: return_def bind_def lift_def split: sum.splits) - apply (clarsimp simp: in_bind whileLoop_def liftE_def) + apply clarsimp + apply (clarsimp simp: return_def bind_def lift_def liftE_def split: sum.splits) + apply clarsimp + apply (erule whileLoop_results_bisim[where rt=projr + and st="\x. x" + and I="\r s. \ isl r"], + auto intro: whileLoop_results.intros simp: bind_def return_def lift_def liftE_def split: sum.splits)[1] + apply (clarsimp simp: in_liftE whileLoop_def) apply (erule whileLoop_results_bisim[where rt=Inr and st="\x. x" and I="\r s. True"], - auto intro: whileLoop_results.intros intro!: bexI simp: bind_def return_def lift_def - split: sum.splits)[1] + auto intro: whileLoop_results.intros intro!: bexI + simp: bind_def return_def lift_def liftE_def split: sum.splits)[1] apply (rule iffI) apply (clarsimp simp: whileLoop_def liftE_def del: notI) apply (erule disjE) apply (erule whileLoop_results_bisim[where rt=projr and st="\x. x" - and I="\r s. case r of Inr x \ True | _ \ False"], + and I="\r s. \ isl r"], auto intro: whileLoop_results.intros simp: bind_def return_def lift_def split: sum.splits)[1] apply (subst (asm) whileLoop_terminates_liftE [symmetric]) apply (fastforce simp: whileLoop_def liftE_def whileLoop_terminatesE_def) diff --git a/lib/Monads/WhileLoopRulesCompleteness.thy b/lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy similarity index 85% rename from lib/Monads/WhileLoopRulesCompleteness.thy rename to lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy index 7d65847964..3085502cb8 100644 --- a/lib/Monads/WhileLoopRulesCompleteness.thy +++ b/lib/Monads/nondet/Nondet_While_Loop_Rules_Completeness.thy @@ -6,12 +6,12 @@ (* * This is a purely theoretical theory containing proofs - * that the whileLoop rules in "WhileLoopRules" are complete. + * that the whileLoop rules in "Nondet_While_Loop_Rules" are complete. * * You probably don't care about this. *) -theory WhileLoopRulesCompleteness -imports WhileLoopRules +theory Nondet_While_Loop_Rules_Completeness + imports Nondet_While_Loop_Rules begin lemma whileLoop_rule_strong_complete: @@ -34,12 +34,14 @@ lemma valid_whileLoop_complete: = \ P r \ whileLoop C B r \ Q \" apply (rule iffI) apply clarsimp + apply (rename_tac I) apply (rule_tac I=I in valid_whileLoop, auto)[1] apply (rule exI [where x="\r s. \ \s'. s' = s \ whileLoop C B r \ Q \"]) apply (intro conjI) apply (clarsimp simp: valid_def) apply (subst (2) valid_def) apply clarsimp + apply (rename_tac a b) apply (subst (asm) (2) whileLoop_unroll) apply (case_tac "C a b") apply (clarsimp simp: valid_def bind_def' Bex_def condition_def split: if_split_asm) @@ -66,7 +68,7 @@ proof (rule iffI) by auto thus ?RHS - by (rule_tac validNF_whileLoop [where I=I and R=R], auto) + by - (rule validNF_whileLoop[where I=I and R=R], auto) next assume loop: "?RHS" @@ -225,6 +227,10 @@ where | "valid_path C B [x] = (\ C (fst x) (snd x))" | "valid_path C B (x#y#xs) = ((C (fst x) (snd x) \ y \ fst (B (fst x) (snd x)) \ valid_path C B (y#xs)))" +lemma valid_path_not_empty: + "valid_path C B xs \ xs \ []" + by clarsimp + definition "shortest_path_length C B x Q \ (LEAST n. \l. valid_path C B l \ hd l = x \ Q (fst (last l)) (snd (last l)) \ length l = n)" @@ -234,8 +240,7 @@ lemma shortest_path_length_same [simp]: apply (rule Least_equality) apply (rule exI [where x="[a]"]) apply clarsimp - apply (case_tac "y = 0") - apply clarsimp + apply (rule Suc_leI) apply clarsimp done @@ -243,9 +248,8 @@ lemma valid_path_simp: "valid_path C B l = (((\r s. l = [(r, s)] \ \ C r s) \ (\r s r' s' xs. l = (r, s)#(r', s')#xs \ C r s \ (r', s') \ fst (B r s) \ valid_path C B ((r', s')#xs))))" - apply (case_tac l) - apply clarsimp - apply (case_tac list) + apply (cases l rule: remdups_adj.cases) + apply clarsimp apply clarsimp apply clarsimp done @@ -260,15 +264,23 @@ proof - assume y: "Q r' s'" have ?thesis using x y - apply (induct rule: in_whileLoop_induct) - apply (rule_tac x="[(r,s)]" in exI) - apply clarsimp - apply clarsimp - apply (case_tac l) - apply clarsimp - apply (rule_tac x="(r, s)#l" in exI) - apply clarsimp - done + proof (induct rule: in_whileLoop_induct) + case (1 r s) + then show ?case + apply - + apply (rule exI[where x="[(r,s)]"]) + apply clarsimp + done + next + case (2 r s r' s' r'' s'') + then show ?case + apply clarsimp + apply (frule valid_path_not_empty) + apply (rename_tac l) + apply (rule_tac x="(r, s)#l" in exI) + apply (clarsimp simp: neq_Nil_conv) + done + qed } thus ?thesis @@ -297,27 +309,33 @@ lemma shortest_path_is_shortest: done lemma valid_path_implies_exs_valid_whileLoop: - "valid_path C B l \ \ \s. s = snd (hd l) \ whileLoop C B (fst (hd l)) \\ \r s. (r, s) = last l \" - apply (induct l) - apply clarsimp - apply clarsimp - apply rule - apply clarsimp - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) - apply clarsimp - apply (subst whileLoop_unroll) - apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) - apply rule - apply (clarsimp split: prod.splits) - apply (case_tac l) + "valid_path C B l \ \ \s. s = snd (hd l) \ whileLoop C B (fst (hd l)) \\ \r s. (r, s) = last l \" +proof (induct l) + case Nil + then show ?case + by clarsimp +next + case (Cons a l) + then show ?case apply clarsimp - apply (clarsimp split del: if_split) - apply (erule bexI [rotated]) - apply clarsimp - apply clarsimp - apply (case_tac l, auto) - done + apply rule + apply clarsimp + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) + apply clarsimp + apply (subst whileLoop_unroll) + apply (clarsimp simp: condition_def bind_def' exs_valid_def return_def) + apply rule + apply (clarsimp split: prod.splits) + apply (cases l) + apply clarsimp + apply (clarsimp split del: if_split) + apply (erule bexI[rotated]) + apply clarsimp + apply clarsimp + apply (cases l; clarsimp) + done +qed lemma shortest_path_gets_shorter: "\ \ \s'. s' = s \ whileLoop C B r \\ Q \; @@ -327,21 +345,22 @@ lemma shortest_path_gets_shorter: \ \ \s. s = s' \ whileLoop C B r' \\ Q \" apply (drule shortest_path_exists) apply clarsimp - apply (case_tac l) - apply clarsimp - apply (case_tac list) + apply (rename_tac l) + apply (case_tac l rule: remdups_adj.cases) + apply clarsimp apply clarsimp - apply (rule_tac x="aa" in bexI) - apply clarify - apply (simp only: valid_path.simps, clarify) - apply (frule shortest_path_is_shortest [where Q=Q]) - apply simp + apply (rule bexI[rotated]) apply clarsimp - apply (drule valid_path_implies_exs_valid_whileLoop) - apply (clarsimp simp: exs_valid_def) - apply (erule bexI [rotated]) - apply (clarsimp split: if_split_asm) + apply assumption + apply clarify + apply (simp only: valid_path.simps, clarify) + apply (frule shortest_path_is_shortest [where Q=Q]) + apply simp apply clarsimp + apply (drule valid_path_implies_exs_valid_whileLoop) + apply (clarsimp simp: exs_valid_def) + apply (erule bexI [rotated]) + apply (clarsimp split: if_split_asm) done lemma exs_valid_whileLoop_complete: diff --git a/lib/Monads/OptionMonad.thy b/lib/Monads/reader_option/Reader_Option_Monad.thy similarity index 99% rename from lib/Monads/OptionMonad.thy rename to lib/Monads/reader_option/Reader_Option_Monad.thy index 9a2a5ed911..83a7c23e96 100644 --- a/lib/Monads/OptionMonad.thy +++ b/lib/Monads/reader_option/Reader_Option_Monad.thy @@ -11,7 +11,7 @@ * Option monad while loop formalisation. *) -theory OptionMonad (* FIXME: this is really a Reader_Option_Monad *) +theory Reader_Option_Monad imports Monad_Lib Fun_Pred_Syntax @@ -241,7 +241,7 @@ abbreviation ogets :: "('s \ 'a) \ ('s, 'a) lookup" wher text \ Integration with exception monad. - Corresponding bindE would be analogous to lifting in NonDetMonad.\ + Corresponding bindE would be analogous to lifting in Nondet_Monad.\ definition "oreturnOk x = K (Some (Inr x))" diff --git a/lib/Monads/OptionMonadND.thy b/lib/Monads/reader_option/Reader_Option_ND.thy similarity index 92% rename from lib/Monads/OptionMonadND.thy rename to lib/Monads/reader_option/Reader_Option_ND.thy index b6259d0b72..fe55c35805 100644 --- a/lib/Monads/OptionMonadND.thy +++ b/lib/Monads/reader_option/Reader_Option_ND.thy @@ -4,16 +4,16 @@ * SPDX-License-Identifier: BSD-2-Clause *) -(* Option monad syntax plus the connection between the option monad and the nondet monad *) +(* Reader option monad syntax plus the connection between the reader option monad and the nondet monad *) -theory OptionMonadND +theory Reader_Option_ND imports - NonDetMonadLemmas - OptionMonad + Nondet_Lemmas + Reader_Option_Monad begin (* FIXME: remove this syntax, standardise on do {..} instead *) -(* Syntax defined here so we can reuse NonDetMonad definitions *) +(* Syntax defined here so we can reuse Nondet_Monad definitions *) syntax "_doO" :: "[dobinds, 'a] => 'a" ("(DO (_);// (_)//OD)" 100) @@ -118,6 +118,15 @@ lemma gets_the_Some: "gets_the (\_. Some x) = return x" by (simp add: gets_the_def assert_opt_Some) +lemma gets_the_oapply2_comp: + "gets_the (oapply2 y x \ f) = gets_map (swp f y) x" + by (clarsimp simp: gets_map_def gets_the_def o_def gets_def) + +lemma gets_obind_bind_eq: + "(gets (f |>> (\x. g x))) = + (gets f >>= (\x. case x of None \ return None | Some y \ gets (g y)))" + by (auto simp: simpler_gets_def bind_def obind_def return_def split: option.splits) + lemma fst_assert_opt: "fst (assert_opt opt s) = (if opt = None then {} else {(the opt,s)})" by (clarsimp simp: assert_opt_def fail_def return_def split: option.split) diff --git a/lib/Monads/OptionMonadWP.thy b/lib/Monads/reader_option/Reader_Option_VCG.thy similarity index 99% rename from lib/Monads/OptionMonadWP.thy rename to lib/Monads/reader_option/Reader_Option_VCG.thy index 2d60bc52d2..837b0c8c25 100644 --- a/lib/Monads/OptionMonadWP.thy +++ b/lib/Monads/reader_option/Reader_Option_VCG.thy @@ -5,16 +5,16 @@ *) (* -Hoare reasoning and WP (weakest-precondition) generator rules for the option monad. +Hoare reasoning and WP (weakest-precondition) generator rules for the reader option monad. This list is almost certainly incomplete; add rules here as they are needed. *) -theory OptionMonadWP +theory Reader_Option_VCG imports - OptionMonadND + Reader_Option_ND WP - No_Fail + Nondet_No_Fail begin (* Hoare triples. diff --git a/lib/Monads/trace/Trace_Det.thy b/lib/Monads/trace/Trace_Det.thy new file mode 100644 index 0000000000..4b17f18dbf --- /dev/null +++ b/lib/Monads/trace/Trace_Det.thy @@ -0,0 +1,75 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Det + imports + Trace_Monad +begin + +subsection "Determinism" + +text \ + A monad of type @{text tmonad} is deterministic iff it + returns an empty trace, exactly one state and result and does not fail\ +definition det :: "('a,'s) tmonad \ bool" where + "det f \ \s. \r. f s = {([], Result r)}" + +text \A deterministic @{text tmonad} can be turned into a normal state monad:\ +definition the_run_state :: "('s,'a) tmonad \ 's \ 'a \ 's" where + "the_run_state M \ \s. THE s'. mres (M s) = {s'}" + + +lemma det_set_iff: + "det f \ (r \ mres (f s)) = (mres (f s) = {r})" + unfolding det_def mres_def + by (fastforce elim: allE[where x=s]) + +lemma return_det[iff]: + "det (return x)" + by (simp add: det_def return_def) + +lemma put_det[iff]: + "det (put s)" + by (simp add: det_def put_def) + +lemma get_det[iff]: + "det get" + by (simp add: det_def get_def) + +lemma det_gets[iff]: + "det (gets f)" + by (auto simp add: gets_def det_def get_def return_def bind_def) + +lemma det_UN: + "det f \ (\x \ mres (f s). g x) = (g (THE x. x \ mres (f s)))" + unfolding det_def mres_def + apply simp + apply (drule spec [of _ s]) + apply (clarsimp simp: vimage_def) + done + +lemma bind_detI[simp, intro!]: + "\ det f; \x. det (g x) \ \ det (f >>= g)" + unfolding bind_def det_def + apply clarsimp + apply (erule_tac x=s in allE) + apply clarsimp + done + +lemma det_modify[iff]: + "det (modify f)" + by (simp add: modify_def) + +lemma the_run_stateI: + "mres (M s) = {s'} \ the_run_state M s = s'" + by (simp add: the_run_state_def) + +lemma the_run_state_det: + "\ s' \ mres (M s); det M \ \ the_run_state M s = s'" + by (simp add: the_run_stateI det_set_iff) + +end diff --git a/lib/Monads/trace/Trace_Empty_Fail.thy b/lib/Monads/trace/Trace_Empty_Fail.thy new file mode 100644 index 0000000000..472ec988af --- /dev/null +++ b/lib/Monads/trace/Trace_Empty_Fail.thy @@ -0,0 +1,368 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Empty_Fail + imports + Trace_Monad + WPSimp +begin + +section \Monads that are wellformed w.r.t. failure\ + +text \ + Usually, well-formed monads constructed from the primitives in Trace_Monad will have the following + property: if they return an empty set of completed results, there exists a trace corresponding to + a failed result.\ +definition empty_fail :: "('s,'a) tmonad \ bool" where + "empty_fail m \ \s. mres (m s) = {} \ Failed \ snd ` (m s)" + +text \Useful in forcing otherwise unknown executions to have the @{const empty_fail} property.\ +definition mk_ef :: + "((tmid \ 's) list \ ('s, 'a) tmres) set \ ((tmid \ 's) list \ ('s, 'a) tmres) set" where + "mk_ef S \ if mres S = {} then S \ {([], Failed)} else S" + + +subsection \WPC setup\ + +lemma wpc_helper_empty_fail_final: + "empty_fail f \ wpc_helper (P, P', P'') (Q, Q', Q'') (empty_fail f)" + by (clarsimp simp: wpc_helper_def) + +wpc_setup "\m. empty_fail m" wpc_helper_empty_fail_final + + +subsection \@{const empty_fail} intro/dest rules\ + +lemma empty_failI: + "(\s. mres (m s) = {} \ Failed \ snd ` (m s)) \ empty_fail m" + by (simp add: empty_fail_def) + +lemma empty_failD: + "\ empty_fail m; mres (m s) = {} \ \ Failed \ snd ` (m s)" + by (simp add: empty_fail_def) + +lemma empty_fail_not_snd: + "\ Failed \ snd ` (m s); empty_fail m \ \ \v. v \ mres (m s)" + by (fastforce simp: empty_fail_def) + +lemmas empty_failD2 = empty_fail_not_snd[rotated] + +lemma empty_failD3: + "\ empty_fail f; Failed \ snd ` (f s) \ \ mres (f s) \ {}" + by (drule(1) empty_failD2, clarsimp) + +lemma empty_fail_bindD1: + "empty_fail (a >>= b) \ empty_fail a" + unfolding empty_fail_def bind_def + apply clarsimp + apply (drule_tac x=s in spec) + by (force simp: split_def mres_def vimage_def split: tmres.splits) + + +subsection \Wellformed monads\ + +(* + Collect generic empty_fail lemmas here: + - naming convention is empty_fail_NAME. + - add lemmas with assumptions to [empty_fail_cond] set + - add lemmas without assumption to [empty_fail_term] set +*) + +named_theorems empty_fail_term +named_theorems empty_fail_cond + +lemma empty_fail_K_bind[empty_fail_cond]: + "empty_fail f \ empty_fail (K_bind f x)" + by simp + +lemma empty_fail_fun_app[empty_fail_cond]: + "empty_fail (f x) \ empty_fail (f $ x)" + by simp + +(* empty_fail as such does not need context, but empty_fail_select_f does, so we need to build + up context in other rules *) +lemma empty_fail_If[empty_fail_cond]: + "\ P \ empty_fail f; \P \ empty_fail g \ \ empty_fail (if P then f else g)" + by (simp split: if_split) + +lemma empty_fail_If_applied[empty_fail_cond]: + "\ P \ empty_fail (f x); \P \ empty_fail (g x) \ \ empty_fail ((if P then f else g) x)" + by simp + +lemma empty_fail_put[empty_fail_term]: + "empty_fail (put f)" + by (simp add: put_def empty_fail_def mres_def vimage_def) + +lemma empty_fail_modify[empty_fail_term]: + "empty_fail (modify f)" + by (simp add: empty_fail_def simpler_modify_def mres_def vimage_def) + +lemma empty_fail_gets[empty_fail_term]: + "empty_fail (gets f)" + by (simp add: empty_fail_def simpler_gets_def mres_def vimage_def) + +lemma empty_fail_select[empty_fail_cond]: + "S \ {} \ empty_fail (select S)" + by (simp add: empty_fail_def select_def mres_def image_def) + +lemma mres_bind_empty: + "mres ((f >>= g) s) = {} + \ mres (f s) = {} \ (\res\mres (f s). mres (g (fst res) (snd res)) = {})" + apply clarsimp + apply (clarsimp simp: mres_def split_def vimage_def bind_def) + apply (rename_tac rv s' trace) + apply (drule_tac x=rv in spec, drule_tac x=s' in spec) + apply (clarsimp simp: image_def) + apply fastforce + done + +lemma bind_FailedI1: + "Failed \ snd ` f s \ Failed \ snd ` (f >>= g) s" + by (force simp: bind_def vimage_def) + +lemma bind_FailedI2: + "\\res\mres (f s). Failed \ snd ` (g (fst res) (snd res)); mres (f s) \ {}\ + \ Failed \ snd ` (f >>= g) s" + by (force simp: bind_def mres_def image_def split_def) + +lemma empty_fail_bind[empty_fail_cond]: + "\ empty_fail a; \x. empty_fail (b x) \ \ empty_fail (a >>= b)" + unfolding empty_fail_def + apply clarsimp + apply (drule mres_bind_empty) + apply (erule context_disjE) + apply (fastforce intro: bind_FailedI1) + apply (fastforce intro!: bind_FailedI2) + done + +lemma empty_fail_return[empty_fail_term]: + "empty_fail (return x)" + by (simp add: empty_fail_def return_def mres_def vimage_def) + +lemma empty_fail_returnOk[empty_fail_term]: + "empty_fail (returnOk v)" + by (fastforce simp: returnOk_def empty_fail_term) + +lemma empty_fail_throwError[empty_fail_term]: + "empty_fail (throwError v)" + by (fastforce simp: throwError_def empty_fail_term) + +lemma empty_fail_lift[empty_fail_cond]: + "\ \x. empty_fail (f x) \ \ empty_fail (lift f x)" + unfolding lift_def + by (auto simp: empty_fail_term split: sum.split) + +lemma empty_fail_liftE[empty_fail_cond]: + "empty_fail f \ empty_fail (liftE f)" + by (simp add: liftE_def empty_fail_cond empty_fail_term) + +lemma empty_fail_bindE[empty_fail_cond]: + "\ empty_fail f; \rv. empty_fail (g rv) \ \ empty_fail (f >>=E g)" + by (simp add: bindE_def empty_fail_cond) + +lemma empty_fail_mapM[empty_fail_cond]: + assumes m: "\x. x \ set xs \ empty_fail (m x)" + shows "empty_fail (mapM m xs)" +using m +proof (induct xs) + case Nil + thus ?case by (simp add: mapM_def sequence_def empty_fail_term) +next + case Cons + have P: "\m x xs. mapM m (x # xs) = (do y \ m x; ys \ (mapM m xs); return (y # ys) od)" + by (simp add: mapM_def sequence_def Let_def) + from Cons + show ?case by (simp add: P m empty_fail_cond empty_fail_term) +qed + +lemma empty_fail_fail[empty_fail_term]: + "empty_fail fail" + by (simp add: fail_def empty_fail_def) + +lemma empty_fail_assert[empty_fail_term]: + "empty_fail (assert P)" + unfolding assert_def by (simp add: empty_fail_term) + +lemma empty_fail_assert_opt[empty_fail_term]: + "empty_fail (assert_opt x)" + by (simp add: assert_opt_def empty_fail_term split: option.splits) + +lemma empty_fail_mk_ef[empty_fail_term]: + "empty_fail (mk_ef o m)" + by (simp add: empty_fail_def mk_ef_def) + +lemma empty_fail_gets_the[empty_fail_term]: + "empty_fail (gets_the f)" + unfolding gets_the_def + by (simp add: empty_fail_cond empty_fail_term) + +lemma empty_fail_gets_map[empty_fail_term]: + "empty_fail (gets_map f p)" + unfolding gets_map_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_whenEs[empty_fail_cond]: + "(P \ empty_fail f) \ empty_fail (whenE P f)" + "(\P \ empty_fail f) \ empty_fail (unlessE P f)" + by (auto simp add: whenE_def unlessE_def empty_fail_term) + +lemma empty_fail_assertE[empty_fail_term]: + "empty_fail (assertE P)" + by (simp add: assertE_def empty_fail_term) + +lemma empty_fail_get[empty_fail_term]: + "empty_fail get" + by (simp add: empty_fail_def get_def mres_def vimage_def) + +lemma empty_fail_catch[empty_fail_cond]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catch f g)" + by (simp add: catch_def empty_fail_cond empty_fail_term split: sum.split) + +lemma empty_fail_guard[empty_fail_term]: + "empty_fail (state_assert G)" + by (clarsimp simp: state_assert_def empty_fail_cond empty_fail_term) + +lemma empty_fail_spec[empty_fail_term]: + "empty_fail (state_select F)" + by (clarsimp simp: state_select_def empty_fail_def default_elem_def mres_def image_def) + +lemma empty_fail_when[empty_fail_cond]: + "(P \ empty_fail x) \ empty_fail (when P x)" + unfolding when_def + by (simp add: empty_fail_term) + +lemma empty_fail_unless[empty_fail_cond]: + "(\P \ empty_fail f) \ empty_fail (unless P f)" + unfolding unless_def + by (simp add: empty_fail_cond) + +lemma empty_fail_liftM[empty_fail_cond]: + "empty_fail m \ empty_fail (liftM f m)" + unfolding liftM_def + by (fastforce simp: empty_fail_term empty_fail_cond) + +lemma empty_fail_liftME[empty_fail_cond]: + "empty_fail m \ empty_fail (liftME f m)" + unfolding liftME_def + by (simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_handleE[empty_fail_cond]: + "\ empty_fail L; \r. empty_fail (R r) \ \ empty_fail (L R)" + by (clarsimp simp: handleE_def handleE'_def empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_handle'[empty_fail_cond]: + "\empty_fail f; \e. empty_fail (handler e)\ \ empty_fail (f handler)" + unfolding handleE'_def + by (fastforce simp: empty_fail_term empty_fail_cond split: sum.splits) + +lemma empty_fail_sequence[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence ms)" + unfolding sequence_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequence_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequence_x ms)" + unfolding sequence_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE ms)" + unfolding sequenceE_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_sequenceE_x[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail m) \ empty_fail (sequenceE_x ms)" + unfolding sequenceE_x_def + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapM_x[empty_fail_cond]: + "(\m. m \ f ` set ms \ empty_fail m) \ empty_fail (mapM_x f ms)" + unfolding mapM_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME[empty_fail_cond]: + "(\m. m \ f ` set xs \ empty_fail m) \ empty_fail (mapME f xs)" + unfolding mapME_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_mapME_x[empty_fail_cond]: + "(\m'. m' \ f ` set xs \ empty_fail m') \ empty_fail (mapME_x f xs)" + unfolding mapME_x_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_filterM[empty_fail_cond]: + "(\m. m \ set ms \ empty_fail (P m)) \ empty_fail (filterM P ms)" + by (induct ms; simp add: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM_x[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM_x f xs ys)" + unfolding zipWithM_x_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_zipWithM[empty_fail_cond]: + "(\x y. empty_fail (f x y)) \ empty_fail (zipWithM f xs ys)" + unfolding zipWithM_def zipWith_def + by (fastforce intro: empty_fail_term empty_fail_cond) + +lemma empty_fail_maybeM[empty_fail_cond]: + "\x. empty_fail (f x) \ empty_fail (maybeM f t)" + unfolding maybeM_def + by (fastforce intro: empty_fail_term split: option.splits) + +lemma empty_fail_ifM[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifM P a b)" + by (simp add: ifM_def empty_fail_cond) + +lemma empty_fail_ifME[empty_fail_cond]: + "\ empty_fail P; empty_fail a; empty_fail b \ \ empty_fail (ifME P a b)" + by (simp add: ifME_def empty_fail_cond) + +lemma empty_fail_whenM[empty_fail_cond]: + "\ empty_fail P; empty_fail f \ \ empty_fail (whenM P f)" + by (simp add: whenM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_andM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (andM A B)" + by (simp add: andM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_orM[empty_fail_cond]: + "\ empty_fail A; empty_fail B \ \ empty_fail (orM A B)" + by (simp add: orM_def empty_fail_term empty_fail_cond) + +lemma empty_fail_notM[empty_fail_cond]: + "empty_fail A \ empty_fail (notM A)" + by (simp add: notM_def empty_fail_term empty_fail_cond) + +(* not everything [simp] by default, because side conditions can slow down simp a lot *) +lemmas empty_fail[wp, intro!] = empty_fail_term empty_fail_cond +lemmas [simp] = empty_fail_term + + +subsection \Equations and legacy names\ + +lemma empty_fail_select_eq[simp]: + "empty_fail (select V) = (V \ {})" + by (clarsimp simp: select_def empty_fail_def mres_def image_def) + +lemma empty_fail_liftM_eq[simp]: + "empty_fail (liftM f m) = empty_fail m" + unfolding liftM_def + by (fastforce dest: empty_fail_bindD1) + +lemma empty_fail_liftE_eq[simp]: + "empty_fail (liftE f) = empty_fail f" + by (auto simp: liftE_def empty_fail_bindD1) + +lemma liftME_empty_fail_eq[simp]: + "empty_fail (liftME f m) = empty_fail m" + unfolding liftME_def + by (fastforce dest: empty_fail_bindD1 simp: bindE_def) + +(* legacy name binding *) +lemmas empty_fail_error_bits = empty_fail_returnOk empty_fail_throwError empty_fail_liftE_eq + +end diff --git a/lib/Monads/trace/Trace_In_Monad.thy b/lib/Monads/trace/Trace_In_Monad.thy new file mode 100644 index 0000000000..0078c66c44 --- /dev/null +++ b/lib/Monads/trace/Trace_In_Monad.thy @@ -0,0 +1,153 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_In_Monad + imports Trace_Lemmas +begin + +section \Reasoning directly about states\ + +(* Lemmas about terms of the form "(v, s') \ mres (m s)" *) + +lemma in_throwError: + "((v, s') \ mres (throwError e s)) = (v = Inl e \ s' = s)" + by (simp add: throwError_def return_def mres_def) + +lemma in_returnOk: + "((v', s') \ mres (returnOk v s)) = (v' = Inr v \ s' = s)" + by (simp add: returnOk_def return_def mres_def) + +lemma in_bind: + "((r,s') \ mres ((do x \ f; g x od) s)) = + (\s'' x. (x, s'') \ mres (f s) \ (r, s') \ mres (g x s''))" + by (force simp: bind_def split_def mres_def split: tmres.splits) + +lemma in_bindE_R: + "((Inr r,s') \ mres ((doE x \ f; g x odE) s)) = + (\s'' x. (Inr x, s'') \ mres (f s) \ (Inr r, s') \ mres (g x s''))" + unfolding bindE_def lift_def split_def in_bind + by (force simp: throwError_def return_def mres_def split: sum.splits) + +lemma in_bindE_L: + "((Inl r, s') \ mres ((doE x \ f; g x odE) s)) \ + (\s'' x. (Inr x, s'') \ mres (f s) \ (Inl r, s') \ mres (g x s'')) \ ((Inl r, s') \ mres (f s))" + by (simp add: bindE_def in_bind) + (force simp: return_def throwError_def lift_def split_def mres_def split: sum.splits if_split_asm) + +lemma in_return: + "(r, s') \ mres (return v s) = (r = v \ s' = s)" + by (simp add: return_def mres_def) + +lemma in_liftE: + "((v, s') \ mres (liftE f s)) = (\v'. v = Inr v' \ (v', s') \ mres (f s))" + by (force simp: liftE_def in_bind in_return) + +lemma in_whenE: + "((v, s') \ mres (whenE P f s)) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = Inr () \ s' = s))" + by (simp add: whenE_def in_returnOk) + +lemma inl_whenE: + "((Inl x, s') \ mres (whenE P f s)) = (P \ (Inl x, s') \ mres (f s))" + by (auto simp add: in_whenE) + +lemma inr_in_unlessE_throwError[termination_simp]: + "(Inr (), s') \ mres (unlessE P (throwError E) s) = (P \ s'=s)" + by (simp add: unlessE_def returnOk_def throwError_def in_return) + +lemma in_fail: + "r \ mres (fail s) = False" + by (simp add: fail_def mres_def) + +lemma in_assert: + "(r, s') \ mres (assert P s) = (P \ s' = s)" + by (simp add: assert_def return_def fail_def mres_def) + +lemma in_assertE: + "(r, s') \ mres (assertE P s) = (P \ r = Inr () \ s' = s)" + by (simp add: assertE_def returnOk_def return_def fail_def mres_def) + +lemma in_assert_opt: + "(r, s') \ mres (assert_opt v s) = (v = Some r \ s' = s)" + by (auto simp: assert_opt_def in_fail in_return split: option.splits) + +lemma in_get: + "(r, s') \ mres (get s) = (r = s \ s' = s)" + by (simp add: get_def mres_def) + +lemma in_gets: + "(r, s') \ mres (gets f s) = (r = f s \ s' = s)" + by (simp add: simpler_gets_def mres_def) + +lemma in_put: + "(r, s') \ mres (put x s) = (s' = x \ r = ())" + by (simp add: put_def mres_def) + +lemma in_when: + "(v, s') \ mres (when P f s) = ((P \ (v, s') \ mres (f s)) \ (\P \ v = () \ s' = s))" + by (simp add: when_def in_return) + +lemma in_unless: + "(v, s') \ mres (unless P f s) = ((\ P \ (v, s') \ mres (f s)) \ (P \ v = () \ s' = s))" + by (simp add: unless_def in_when) + +lemma in_unlessE: + "(v, s') \ mres (unlessE P f s) = ((\ P \ (v, s') \ mres (f s)) \ (P \ v = Inr () \ s' = s))" + by (simp add: unlessE_def in_returnOk) + +lemma inl_unlessE: + "((Inl x, s') \ mres (unlessE P f s)) = (\ P \ (Inl x, s') \ mres (f s))" + by (auto simp add: in_unlessE) + +lemma in_modify: + "(v, s') \ mres (modify f s) = (s'=f s \ v = ())" + by (auto simp add: modify_def bind_def get_def put_def mres_def) + +lemma gets_the_in_monad: + "((v, s') \ mres (gets_the f s)) = (s' = s \ f s = Some v)" + by (auto simp: gets_the_def in_bind in_gets in_assert_opt split: option.split) + +lemma in_alternative: + "(r,s') \ mres ((f \ g) s) = ((r,s') \ mres (f s) \ (r,s') \ mres (g s))" + by (auto simp add: alternative_def mres_def) + +lemma in_liftM: + "((r, s') \ mres (liftM t f s)) = (\r'. (r', s') \ mres (f s) \ r = t r')" + by (simp add: liftM_def in_return in_bind) + +lemma in_bindE: + "(rv, s') \ mres ((f >>=E (\rv'. g rv')) s) = + ((\ex. rv = Inl ex \ (Inl ex, s') \ mres (f s)) \ + (\rv' s''. (rv, s') \ mres (g rv' s'') \ (Inr rv', s'') \ mres (f s)))" + apply (clarsimp simp: bindE_def in_bind lift_def in_throwError) + apply (safe del: disjCI; strengthen subst[where P="\x. x \ mres (f s)", mk_strg I _ E]; + auto simp: in_throwError split: sum.splits) + done + +lemmas in_monad = inl_whenE in_whenE in_liftE in_bind in_bindE_L + in_bindE_R in_returnOk in_throwError in_fail + in_assertE in_assert in_return in_assert_opt + in_get in_gets in_put in_when inl_unlessE in_unlessE + in_unless in_modify gets_the_in_monad + in_alternative in_liftM + +lemma bind_det_exec: + "mres (a s) = {(r,s')} \ mres ((a >>= b) s) = mres (b r s')" + by (simp add: in_bind set_eq_iff) + +lemma in_bind_det_exec: + "mres (a s) = {(r,s')} \ (s'' \ mres ((a >>= b) s)) = (s'' \ mres (b r s'))" + by (cases s'', simp add: in_bind) + +lemma exec_put: + "(put s' >>= m) s = m () s'" + by (simp add: bind_def put_def mres_def split_def) + +lemma bind_execI: + "\ (r'',s'') \ mres (f s); \x \ mres (g r'' s''). P x \ \ \x \ mres ((f >>= g) s). P x" + by (force simp: Bex_def in_bind) + +end diff --git a/lib/Monads/trace/Trace_Lemmas.thy b/lib/Monads/trace/Trace_Lemmas.thy new file mode 100644 index 0000000000..557c2a855d --- /dev/null +++ b/lib/Monads/trace/Trace_Lemmas.thy @@ -0,0 +1,271 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Lemmas + imports Trace_Monad +begin + +section \General Lemmas Regarding the Interference Trace Monad\ + +subsection \Congruence Rules for the Function Package\ + +\ \FIXME: where should this go\ +lemma in_mres: + "(tr, Result (rv, s)) \ S \ (rv, s) \ mres S" + by (fastforce simp: mres_def intro: image_eqI[rotated]) + +lemma bind_apply_cong': + "\f s = f' s'; (\rv st. (rv, st) \ mres (f s) \ g rv st = g' rv st)\ + \ bind f g s = bind f' g' s'" + apply (simp add: bind_def) + apply (rule SUP_cong; simp?) + apply (clarsimp split: tmres.split) + apply (drule spec2, drule mp, erule in_mres) + apply simp + done + +lemmas bind_apply_cong = bind_apply_cong'[rule_format, fundef_cong] + +lemma bind_cong[fundef_cong]: + "\ f = f'; \v s s'. (v, s') \ mres (f' s) \ g v s' = g' v s' \ \ f >>= g = f' >>= g'" + by (auto intro!: bind_apply_cong) + +lemma bindE_cong[fundef_cong]: + "\ M = M' ; \v s s'. (Inr v, s') \ mres (M' s) \ N v s' = N' v s' \ \ bindE M N = bindE M' N'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_cong) + +lemma bindE_apply_cong[fundef_cong]: + "\ f s = f' s'; \rv st. (Inr rv, st) \ mres (f' s') \ g rv st = g' rv st \ + \ (f >>=E g) s = (f' >>=E g') s'" + by (auto simp: bindE_def lift_def split: sum.splits intro!: bind_apply_cong) + +lemma K_bind_apply_cong[fundef_cong]: + "\ f st = f' st' \ \ K_bind f arg st = K_bind f' arg' st'" + by simp + +lemma when_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ when C m s = when C' m' s'" + by (simp add: when_def) + +lemma unless_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unless C m s = unless C' m' s'" + by (simp add: when_def unless_def) + +lemma whenE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; C' \ m s' = m' s' \ \ whenE C m s = whenE C' m' s'" + by (simp add: whenE_def) + +lemma unlessE_apply_cong[fundef_cong]: + "\ C = C'; s = s'; \ C' \ m s' = m' s' \ \ unlessE C m s = unlessE C' m' s'" + by (simp add: unlessE_def) + + +subsection \Simplifying Monads\ + +lemma nested_bind[simp]: + "do x <- do y <- f; return (g y) od; h x od = do y <- f; h (g y) od" + by (fastforce simp: bind_def return_def split: tmres.splits) + +lemma bind_dummy_ret_val: + "do y \ a; b od = do a; b od" + by simp + +lemma fail_update[iff]: + "fail (f s) = fail s" + by (simp add: fail_def) + +lemma fail_bind[simp]: + "fail >>= f = fail" + by (simp add: bind_def fail_def) + +lemma fail_bindE[simp]: + "fail >>=E f = fail" + by (simp add: bindE_def bind_def fail_def) + +lemma assert_A_False[simp]: + "assert False = fail" + by (simp add: assert_def) + +lemma assert_A_True[simp]: + "assert True = return ()" + by (simp add: assert_def) + +lemma assert_False[simp]: + "assert False >>= f = fail" + by simp + +lemma assert_True[simp]: + "assert True >>= f = f ()" + by simp + +lemma assertE_False[simp]: + "assertE False >>=E f = fail" + by (simp add: assertE_def) + +lemma assertE_True[simp]: + "assertE True >>=E f = f ()" + by (simp add: assertE_def) + +lemma when_False_bind[simp]: + "when False g >>= f = f ()" + by (rule ext) (simp add: when_def bind_def return_def) + +lemma when_True_bind[simp]: + "when True g >>= f = g >>= f" + by (simp add: when_def bind_def return_def) + +lemma whenE_False_bind[simp]: + "whenE False g >>=E f = f ()" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma whenE_True_bind[simp]: + "whenE True g >>=E f = g >>=E f" + by (simp add: whenE_def bindE_def returnOk_def lift_def) + +lemma when_True[simp]: + "when True X = X" + by (clarsimp simp: when_def) + +lemma when_False[simp]: + "when False X = return ()" + by (clarsimp simp: when_def) + +lemma unless_False[simp]: + "unless False X = X" + by (clarsimp simp: unless_def) + +lemma unlessE_False[simp]: + "unlessE False f = f" + unfolding unlessE_def by fastforce + +lemma unless_True[simp]: + "unless True X = return ()" + by (clarsimp simp: unless_def) + +lemma unlessE_True[simp]: + "unlessE True f = returnOk ()" + unfolding unlessE_def by fastforce + +lemma unlessE_whenE: + "unlessE P = whenE (\P)" + by (rule ext) (simp add: unlessE_def whenE_def) + +lemma unless_when: + "unless P = when (\P)" + by (rule ext) (simp add: unless_def when_def) + +lemma gets_to_return[simp]: + "gets (\s. v) = return v" + by (clarsimp simp: gets_def put_def get_def bind_def return_def) + +lemma assert_opt_Some: + "assert_opt (Some x) = return x" + by (simp add: assert_opt_def) + +lemma assertE_liftE: + "assertE P = liftE (assert P)" + by (simp add: assertE_def assert_def liftE_def returnOk_def) + +lemma liftE_handleE'[simp]: + "(liftE a b) = liftE a" + by (clarsimp simp: liftE_def handleE'_def) + +lemma liftE_handleE[simp]: + "(liftE a b) = liftE a" + unfolding handleE_def by simp + +lemma alternative_bind: + "((a \ b) >>= c) = ((a >>= c) \ (b >>= c))" + by (fastforce simp add: alternative_def bind_def split_def) + +lemma alternative_refl: + "(a \ a) = a" + by (simp add: alternative_def) + +lemma alternative_com: + "(f \ g) = (g \ f)" + by (auto simp: alternative_def) + +lemma liftE_alternative: + "liftE (a \ b) = (liftE a \ liftE b)" + by (simp add: liftE_def alternative_bind) + + +subsection \Lifting and Alternative Basic Definitions\ + +lemma liftE_liftM: + "liftE = liftM Inr" + by (auto simp: liftE_def liftM_def) + +lemma liftME_liftM: + "liftME f = liftM (case_sum Inl (Inr \ f))" + unfolding liftME_def liftM_def bindE_def returnOk_def lift_def + apply (rule ext, rename_tac x) + apply (rule_tac f="bind x" in arg_cong) + apply (fastforce simp: throwError_def split: sum.splits) + done + +lemma liftE_bindE: + "liftE a >>=E b = a >>= b" + by (simp add: liftE_def bindE_def lift_def bind_assoc) + +lemma liftM_id[simp]: + "liftM id = id" + by (auto simp: liftM_def) + +lemma liftM_bind: + "liftM t f >>= g = f >>= (\x. g (t x))" + by (simp add: liftM_def bind_assoc) + +lemma gets_bind_ign: + "gets f >>= (\x. m) = m" + by (simp add: bind_def simpler_gets_def) + +lemma exec_get: + "(get >>= f) x = f x x" + by (simp add: get_def bind_def) + +lemmas get_bind_apply = exec_get (* FIXME lib: eliminate *) + +lemma exec_gets: + "(gets f >>= m) s = m (f s) s" + by (simp add: simpler_gets_def bind_def) + +lemma bind_eqI: + "\ f = f'; \x. g x = g' x \ \ f >>= g = f' >>= g'" + by (auto simp: bind_def split_def) + +lemma condition_split: + "P (condition C a b s) \ (C s \ P (a s)) \ (\C s \ P (b s))" + by (clarsimp simp: condition_def) + +lemma condition_split_asm: + "P (condition C a b s) \ (\(C s \ \ P (a s) \ \C s \ \P (b s)))" + by (clarsimp simp: condition_def) + +lemmas condition_splits = condition_split condition_split_asm + +lemma condition_true_triv[simp]: + "condition (\_. True) A B = A" + by (fastforce split: condition_splits) + +lemma condition_false_triv[simp]: + "condition (\_. False) A B = B" + by (fastforce split: condition_splits) + +lemma condition_true: + "P s \ condition P A B s = A s" + by (clarsimp simp: condition_def) + +lemma condition_false: + "\ P s \ condition P A B s = B s" + by (clarsimp simp: condition_def) + +lemmas arg_cong_bind = arg_cong2[where f=bind] +lemmas arg_cong_bind1 = arg_cong_bind[OF refl ext] + +end diff --git a/lib/Monads/trace/Trace_Monad.thy b/lib/Monads/trace/Trace_Monad.thy new file mode 100644 index 0000000000..f0d093b77e --- /dev/null +++ b/lib/Monads/trace/Trace_Monad.thy @@ -0,0 +1,824 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +chapter "Interference Trace Monad" + +theory Trace_Monad + imports + Fun_Pred_Syntax + Monad_Lib + Strengthen +begin + +text \ + The ``Interference Trace Monad''. This nondeterministic monad + records the state at every interference point, permitting + nondeterministic interference by the environment at these points. + + The trace set initially includes all possible environment behaviours. + Trace steps are tagged as environment or self actions, and can then + be constrained to a smaller set where the environment acts according + to a rely constraint (i.e. rely-guarantee reasoning), or to set the + environment actions to be the self actions of another program (parallel + composition).\ + +section "The Trace Monad" + +text \Trace monad identifier. Me corresponds to the current thread running and Env to the environment.\ +datatype tmid = Me | Env + +text \ + Results associated with traces. Traces may correspond to incomplete, failed, or completed executions.\ +datatype ('s, 'a) tmres = Failed | Incomplete | Result "('a \ 's)" + +abbreviation map_tmres_rv :: "('a \ 'b) \ ('s, 'a) tmres \ ('s, 'b) tmres" where + "map_tmres_rv f \ map_tmres id f" + +text \ + tmonad returns a set of non-deterministic computations, including + a trace as a list of "thread identifier" \ state, and an optional + pair of result and state when the computation did not fail.\ +type_synonym ('s, 'a) tmonad = "'s \ ((tmid \ 's) list \ ('s, 'a) tmres) set" + + +text \ + Print the type @{typ "('s,'a) tmonad"} instead of its unwieldy expansion. + Needs an AST translation in code, because it needs to check that the state variable + @{typ 's} occurs three times. This comparison is not guaranteed to always work as expected + (AST instances might have different decoration), but it does seem to work here.\ +print_ast_translation \ + let + fun tmonad_tr _ [t1, Ast.Appl [Ast.Constant @{type_syntax set}, + Ast.Appl [Ast.Constant @{type_syntax prod}, + Ast.Appl [Ast.Constant @{type_syntax list}, + Ast.Appl [Ast.Constant @{type_syntax prod}, + Ast.Constant @{type_syntax tmid}, t2]], + Ast.Appl [Ast.Constant @{type_syntax tmres}, t3, t4]]]] = + if t1 = t2 andalso t1 = t3 + then Ast.Appl [Ast.Constant @{type_syntax "tmonad"}, t1, t4] + else raise Match + in [(@{type_syntax "fun"}, tmonad_tr)] end\ + + +text \Returns monad results, ignoring failures and traces.\ +definition mres :: "((tmid \ 's) list \ ('s, 'a) tmres) set \ ('a \ 's) set" where + "mres r = Result -` (snd ` r)" + +text \ + The definition of fundamental monad functions @{text return} and + @{text bind}. The monad function @{text "return x"} does not change + the state, does not fail, and returns @{text "x"}.\ +definition return :: "'a \ ('s,'a) tmonad" where + "return a \ \s. ({([], Result (a, s))})" + +text \ + The monad function @{text "bind f g"}, also written @{text "f >>= g"}, + is the execution of @{term f} followed by the execution of @{text g}. + The function @{text g} takes the result value \emph{and} the result + state of @{text f} as parameter. The definition says that the result of + the combined operation is the union of the set of sets that is created + by @{text g} applied to the result sets of @{text f}. The combined + operation may have failed, if @{text f} may have failed or @{text g} may + have failed on any of the results of @{text f}.\ +abbreviation (input) fst_upd :: "('a \ 'c) \ 'a \ 'b \ 'c \ 'b" where + "fst_upd f \ \(a,b). (f a, b)" + +abbreviation (input) snd_upd :: "('b \ 'c) \ 'a \ 'b \ 'a \ 'c" where + "snd_upd f \ \(a,b). (a, f b)" + +definition bind :: + "('s, 'a) tmonad \ ('a \ ('s, 'b) tmonad) \ ('s, 'b) tmonad" (infixl ">>=" 60) + where + "bind f g \ \s. \(xs, r) \ (f s). case r of Failed \ {(xs, Failed)} + | Incomplete \ {(xs, Incomplete)} + | Result (rv, s) \ fst_upd (\ys. ys @ xs) ` g rv s" + +text \Sometimes it is convenient to write @{text bind} in reverse order.\ +abbreviation (input) bind_rev :: + "('c \ ('a, 'b) tmonad) \ ('a, 'c) tmonad \ ('a, 'b) tmonad" (infixl "=<<" 60) + where + "g =<< f \ f >>= g" + +text \ + The basic accessor functions of the state monad. @{text get} returns the + current state as result, does not change the state, and does not add to the + trace. @{text "put s"} returns nothing (@{typ unit}), changes the current + state to @{text s}, and does not add to the trace. @{text "put_trace xs"} + returns nothing (@{typ unit}), does not change the state, and adds @{text xs} + to the trace.\ +definition get :: "('s,'s) tmonad" where + "get \ \s. {([], Result (s, s))}" + +definition put :: "'s \ ('s, unit) tmonad" where + "put s \ \_. {([], Result ((), s))}" + +definition put_trace_elem :: "(tmid \ 's) \ ('s, unit) tmonad" where + "put_trace_elem x = (\s. {([], Incomplete), ([x], Result ((), s))})" + +primrec put_trace :: "(tmid \ 's) list \ ('s, unit) tmonad" where + "put_trace [] = return ()" + | "put_trace (x # xs) = (put_trace xs >>= (\_. put_trace_elem x))" + + +subsection "Nondeterminism" + +text \ + Basic nondeterministic functions. @{text "select A"} chooses an element + of the set @{text A} as the result, does not change the state, does not add + to the trace, and does not fail (even if the set is empty). @{text "f \ g"} + executes @{text f} or executes @{text g}. It returns the union of results and + traces of @{text f} and @{text g}.\ +definition select :: "'a set \ ('s, 'a) tmonad" where + "select A \ \s. (Pair [] ` Result ` (A \ {s}))" + +definition alternative :: + "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" (infixl "\" 20) + where + "f \ g \ \s. (f s \ g s)" + +text \ + FIXME: The @{text select_f} function was left out here until we figure + out what variant we actually need.\ + +definition + "default_elem dflt A \ if A = {} then {dflt} else A" + +text \ + @{text state_select} takes a relationship between states, and outputs + nondeterministically a state related to the input state. Fails if no such + state exists.\ +definition state_select :: "('s \ 's) set \ ('s, unit) tmonad" where + "state_select r \ + \s. (Pair [] ` default_elem Failed (Result ` (\x. ((), x)) ` {s'. (s, s') \ r}))" + + +subsection "Failure" + +text \ + The monad function that always fails. Returns an empty trace with a Failed result.\ +definition fail :: "('s, 'a) tmonad" where + "fail \ \s. {([], Failed)}" + +text \Assertions: fail if the property @{text P} is not true\ +definition assert :: "bool \ ('a, unit) tmonad" where + "assert P \ if P then return () else fail" + +text \Fail if the value is @{const None}, return result @{text v} for @{term "Some v"}\ +definition assert_opt :: "'a option \ ('b, 'a) tmonad" where + "assert_opt v \ case v of None \ fail | Some v \ return v" + +text \An assertion that also can introspect the current state.\ +definition state_assert :: "('s \ bool) \ ('s, unit) tmonad" where + "state_assert P \ get >>= (\s. assert (P s))" + +subsection "Generic functions on top of the state monad" + +text \Apply a function to the current state and return the result without changing the state.\ +definition gets :: "('s \ 'a) \ ('s, 'a) tmonad" where + "gets f \ get >>= (\s. return (f s))" + +text \Modify the current state using the function passed in.\ +definition modify :: "('s \ 's) \ ('s, unit) tmonad" where + "modify f \ get >>= (\s. put (f s))" + +lemma simpler_gets_def: + "gets f = (\s. {([], Result ((f s), s))})" + by (simp add: gets_def return_def bind_def get_def) + +lemma simpler_modify_def: + "modify f = (\s. {([], Result ((),(f s)))})" + by (simp add: modify_def bind_def get_def put_def) + +text \Execute the given monad when the condition is true, return @{text "()"} otherwise.\ +definition "when" :: "bool \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "when P m \ if P then m else return ()" + +text \Execute the given monad unless the condition is true, return @{text "()"} otherwise.\ +definition unless :: "bool \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "unless P m \ when (\P) m" + +text \ + Perform a test on the current state, performing the left monad if + the result is true or the right monad if the result is false.\ +definition condition :: + "('s \ bool) \ ('s, 'r) tmonad \ ('s, 'r) tmonad \ ('s, 'r) tmonad" + where + "condition P L R \ \s. if (P s) then (L s) else (R s)" + +notation (output) + condition ("(condition (_)// (_)// (_))" [1000,1000,1000] 1000) + +text \ + Apply an option valued function to the current state, fail if it returns @{const None}, + return @{text v} if it returns @{term "Some v"}.\ +definition gets_the :: "('s \ 'a option) \ ('s, 'a) tmonad" where + "gets_the f \ gets f >>= assert_opt" + +text \ + Get a map (such as a heap) from the current state and apply an argument to the map. + Fail if the map returns @{const None}, otherwise return the value.\ +definition gets_map :: "('s \ 'a \ 'b option) \ 'a \ ('s, 'b) tmonad" where + "gets_map f p \ gets f >>= (\m. assert_opt (m p))" + + +subsection \The Monad Laws\ + +text \An alternative definition of @{term bind}, sometimes more convenient.\ +lemma bind_def': + "bind f g \ + \s. ((\xs. (xs, Failed)) ` {xs. (xs, Failed) \ f s}) + \ ((\xs. (xs, Incomplete)) ` {xs. (xs, Incomplete) \ f s}) + \ (\(xs, rv, s) \ {(xs, rv, s'). (xs, Result (rv, s')) \ f s}. fst_upd (\ys. ys @ xs) ` g rv s)" + apply (clarsimp simp add: bind_def fun_eq_iff + Un_Union_image split_def + intro!: eq_reflection) + apply (fastforce split: tmres.splits elim!: rev_bexI[where A="f x" for x] + intro: image_eqI[rotated]) + done + +lemma elem_bindE: + "\(tr, res) \ bind f g s; + \res = Incomplete \ res = Failed; (tr, map_tmres undefined undefined res) \ f s\ \ P; + \tr' tr'' x s'. \(tr', Result (x, s')) \ f s; (tr'', res) \ g x s'; tr = tr'' @ tr'\ \ P\ + \ P" + by (auto simp: bind_def') + +text \Each monad satisfies at least the following three laws.\ + +\ \FIXME: is this necessary? If it is, move it\ +declare map_option.identity[simp] + +text \@{term return} is absorbed at the left of a @{term bind}, applying the return value directly:\ +lemma return_bind[simp]: + "(return x >>= f) = f x" + by (simp add: return_def bind_def) + +text \@{term return} is absorbed on the right of a @{term bind}\ +lemma bind_return[simp]: + "(m >>= return) = m" + by (auto simp: fun_eq_iff bind_def return_def + split: tmres.splits) + +text \@{term bind} is associative\ +lemma bind_assoc: + fixes m :: "('a,'b) tmonad" + fixes f :: "'b \ ('a,'c) tmonad" + fixes g :: "'c \ ('a,'d) tmonad" + shows "(m >>= f) >>= g = m >>= (\x. f x >>= g)" + apply (unfold bind_def Let_def split_def) + apply (rule ext) + apply clarsimp + apply (rule SUP_cong[OF refl], clarsimp) + apply (split tmres.split; intro conjI impI; clarsimp) + apply (simp add: image_Union) + apply (rule SUP_cong[OF refl], clarsimp) + apply (split tmres.split; intro conjI impI; clarsimp) + apply (simp add: image_image) + done + + +section \Adding Exceptions\ + +text \ + The type @{typ "('s,'a) tmonad"} gives us nondeterminism and + failure. We now extend this monad with exceptional return values + that abort normal execution, but can be handled explicitly. + We use the sum type to indicate exceptions. + + In @{typ "('s, 'e + 'a) tmonad"}, @{typ "'s"} is the state, + @{typ 'e} is an exception, and @{typ 'a} is a normal return value. + + This new type itself forms a monad again. Since type classes in + Isabelle are not powerful enough to express the class of monads, + we provide new names for the @{term return} and @{term bind} functions + in this monad. We call them @{text returnOk} (for normal return values) + and @{text bindE} (for composition). We also define @{text throwError} + to return an exceptional value.\ +definition returnOk :: "'a \ ('s, 'e + 'a) tmonad" where + "returnOk \ return o Inr" + +definition throwError :: "'e \ ('s, 'e + 'a) tmonad" where + "throwError \ return o Inl" + +text \ + Lifting a function over the exception type: if the input is an + exception, return that exception; otherwise continue execution.\ +definition lift :: "('a \ ('s, 'e + 'b) tmonad) \ 'e +'a \ ('s, 'e + 'b) tmonad" where + "lift f v \ case v of Inl e \ throwError e | Inr v' \ f v'" + +text \ + The definition of @{term bind} in the exception monad (new + name @{text bindE}): the same as normal @{term bind}, but + the right-hand side is skipped if the left-hand side + produced an exception.\ +definition bindE :: + "('s, 'e + 'a) tmonad \ ('a \ ('s, 'e + 'b) tmonad) \ ('s, 'e + 'b) tmonad" (infixl ">>=E" 60) + where + "f >>=E g \ f >>= lift g" + +text \ + Lifting a normal nondeterministic monad into the + exception monad is achieved by always returning its + result as normal result and never throwing an exception.\ +definition liftE :: "('s,'a) tmonad \ ('s, 'e+'a) tmonad" where + "liftE f \ f >>= (\r. return (Inr r))" + +text \ + Since the underlying type and @{text return} function changed, + we need new definitions for when and unless:\ +definition whenE :: "bool \ ('s, 'e + unit) tmonad \ ('s, 'e + unit) tmonad" where + "whenE P f \ if P then f else returnOk ()" + +definition unlessE :: "bool \ ('s, 'e + unit) tmonad \ ('s, 'e + unit) tmonad" where + "unlessE P f \ if P then returnOk () else f" + +text \ + Throwing an exception when the parameter is @{term None}, otherwise + returning @{term "v"} for @{term "Some v"}.\ +definition throw_opt :: "'e \ 'a option \ ('s, 'e + 'a) tmonad" where + "throw_opt ex x \ case x of None \ throwError ex | Some v \ returnOk v" + +text \ + Failure in the exception monad is redefined in the same way + as @{const whenE} and @{const unlessE}, with @{term returnOk} + instead of @{term return}.\ +definition assertE :: "bool \ ('a, 'e + unit) tmonad" where + "assertE P \ if P then returnOk () else fail" + + +subsection "Monad Laws for the Exception Monad" + +text \More direct definition of @{const liftE}:\ +lemma liftE_def2: + "liftE f = (\s. snd_upd (map_tmres_rv Inr) ` (f s))" + apply (clarsimp simp: fun_eq_iff liftE_def return_def split_def bind_def image_def) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (erule rev_bexI[where A="f s" for s]) + apply (clarsimp split: tmres.splits) + apply clarsimp + apply (rule exI) + apply (rule conjI) + apply (erule rev_bexI[where A="f s" for s]) + apply (rule refl) + apply (clarsimp split: tmres.splits) + done + +text \Left @{const returnOk} absorbtion over @{term bindE}:\ +lemma returnOk_bindE[simp]: "(returnOk x >>=E f) = f x" + unfolding bindE_def returnOk_def + by (clarsimp simp: lift_def) + +lemma lift_return[simp]: + "lift (return \ Inr) = return" + by (auto simp: lift_def throwError_def split: sum.splits) + +text \Right @{const returnOk} absorbtion over @{term bindE}:\ +lemma bindE_returnOk[simp]: + "(m >>=E returnOk) = m" + by (simp add: bindE_def returnOk_def) + +text \Associativity of @{const bindE}:\ +lemma bindE_assoc: + "(m >>=E f) >>=E g = m >>=E (\x. f x >>=E g)" + unfolding bindE_def + by (fastforce simp: bind_assoc lift_def throwError_def + split: sum.splits + intro: arg_cong[where f="\x. m >>= x"]) + +text \@{const returnOk} could also be defined via @{const liftE}:\ +lemma returnOk_liftE: + "returnOk x = liftE (return x)" + by (simp add: liftE_def returnOk_def) + +text \Execution after throwing an exception is skipped:\ +lemma throwError_bindE[simp]: + "(throwError E >>=E f) = throwError E" + by (simp add: bindE_def bind_def throwError_def lift_def return_def) + + +section "Syntax" + +text \This section defines traditional Haskell-like do-syntax + for the state monad in Isabelle.\ + +subsection "Syntax for the Interference Trace Monad" + +text \ + We use @{text K_bind} to syntactically indicate the case where the return argument + of the left side of a @{term bind} is ignored\ +definition K_bind :: "'a \ 'b \ 'a" where + K_bind_def[iff]: "K_bind \ \x y. x" + +nonterminal + dobinds and dobind and nobind + +syntax + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ <-/ _)" 10) + "" :: "dobind => dobinds" ("_") + "_nobind" :: "'a => dobind" ("_") + "_dobinds" :: "[dobind, dobinds] => dobinds" ("(_);//(_)") + + "_do" :: "[dobinds, 'a] => 'a" ("(do ((_);//(_))//od)" 100) +syntax (xsymbols) + "_dobind" :: "[pttrn, 'a] => dobind" ("(_ \/ _)" 10) + +translations + "_do (_dobinds b bs) e" == "_do b (_do bs e)" + "_do (_nobind b) e" == "b >>= (CONST K_bind e)" + "do x <- a; e od" == "a >>= (\x. e)" + +text \Syntax examples:\ +lemma "do x \ return 1; + return (2::nat); + return x + od = + return 1 >>= + (\x. return (2::nat) >>= + K_bind (return x))" + by (rule refl) + +lemma "do x \ return 1; + return 2; + return x + od = return 1" + by simp + +subsection "Syntax for the Exception Monad" + +text \ + Since the exception monad is a different type, we need to distinguish it in the syntax + if we want to avoid ambiguous terms. We use @{text doE}/@{text odE} for this, but can + re-use most of the productions from @{text do}/@{text od} above. \ +syntax + "_doE" :: "[dobinds, 'a] => 'a" ("(doE ((_);//(_))//odE)" 100) + +translations + "_doE (_dobinds b bs) e" == "_doE b (_doE bs e)" + "_doE (_nobind b) e" == "b >>=E (CONST K_bind e)" + "doE x <- a; e odE" == "a >>=E (\x. e)" + +text \Syntax examples:\ +lemma "doE x \ returnOk 1; + returnOk (2::nat); + returnOk x + odE = + returnOk 1 >>=E + (\x. returnOk (2::nat) >>=E + K_bind (returnOk x))" + by (rule refl) + +lemma "doE x \ returnOk 1; + returnOk 2; + returnOk x + odE = returnOk 1" + by simp + + +subsection "Interference command" + +text \ + Interference commands must be inserted in between actions that can be interfered with by + commands running in other threads.\ + +definition last_st_tr :: "(tmid * 's) list \ 's \ 's" where + "last_st_tr tr s0 \ hd (map snd tr @ [s0])" + +text \Nondeterministically add all possible environment events to the trace.\ +definition env_steps :: "('s,unit) tmonad" where + "env_steps \ + do + s \ get; + \ \Add unfiltered environment events to the trace\ + xs \ select UNIV; + tr \ return (map (Pair Env) xs); + put_trace tr; + \ \Pick the last event of the trace as the final state\ + put (last_st_tr tr s) + od" + +text \Add the current state to the trace, tagged as a self action.\ +definition commit_step :: "('s,unit) tmonad" where + "commit_step \ + do + s \ get; + put_trace [(Me,s)] + od" + +text \ + Record the action taken by the current thread since the last interference point and + then add unfiltered environment events.\ +definition interference :: "('s,unit) tmonad" where + "interference \ + do + commit_step; + env_steps + od" + + +section "Library of additional Monadic Functions and Combinators" + +text \Lifting a normal function into the monad type:\ +definition liftM :: "('a \ 'b) \ ('s,'a) tmonad \ ('s, 'b) tmonad" where + "liftM f m \ do x \ m; return (f x) od" + +text \The same for the exception monad:\ +definition liftME :: "('a \ 'b) \ ('s,'e+'a) tmonad \ ('s,'e+'b) tmonad" where + "liftME f m \ doE x \ m; returnOk (f x) odE" + +text \Execute @{term f} for @{term "Some x"}, otherwise do nothing.\ +definition maybeM :: "('a \ ('s, unit) tmonad) \ 'a option \ ('s, unit) tmonad" where + "maybeM f y \ case y of Some x \ f x | None \ return ()" + +text \Run a sequence of monads from left to right, ignoring return values.\ +definition sequence_x :: "('s, 'a) tmonad list \ ('s, unit) tmonad" where + "sequence_x xs \ foldr (\x y. x >>= (\_. y)) xs (return ())" + +text \ + Map a monadic function over a list by applying it to each element + of the list from left to right, ignoring return values.\ +definition mapM_x :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, unit) tmonad" where + "mapM_x f xs \ sequence_x (map f xs)" + +text \ + Map a monadic function with two parameters over two lists, + going through both lists simultaneously, left to right, ignoring + return values.\ +definition zipWithM_x :: + "('a \ 'b \ ('s,'c) tmonad) \ 'a list \ 'b list \ ('s, unit) tmonad" + where + "zipWithM_x f xs ys \ sequence_x (zipWith f xs ys)" + +text \ + The same three functions as above, but returning a list of + return values instead of @{text unit}\ +definition sequence :: "('s, 'a) tmonad list \ ('s, 'a list) tmonad" where + "sequence xs \ let mcons = (\p q. p >>= (\x. q >>= (\y. return (x#y)))) + in foldr mcons xs (return [])" + +definition mapM :: "('a \ ('s,'b) tmonad) \ 'a list \ ('s, 'b list) tmonad" where + "mapM f xs \ sequence (map f xs)" + +definition zipWithM :: + "('a \ 'b \ ('s,'c) tmonad) \ 'a list \ 'b list \ ('s, 'c list) tmonad" + where + "zipWithM f xs ys \ sequence (zipWith f xs ys)" + +definition foldM :: "('b \ 'a \ ('s, 'a) tmonad) \ 'b list \ 'a \ ('s, 'a) tmonad" where + "foldM m xs a \ foldr (\p q. q >>= m p) xs (return a) " + +definition foldME :: + "('b \ 'a \ ('s,('e + 'b)) tmonad) \ 'b \ 'a list \ ('s, ('e + 'b)) tmonad" + where + "foldME m a xs \ foldr (\p q. q >>=E swp m p) xs (returnOk a)" + +text \ + The sequence and map functions above for the exception monad, with and without + lists of return value\ +definition sequenceE_x :: "('s, 'e+'a) tmonad list \ ('s, 'e+unit) tmonad" where + "sequenceE_x xs \ foldr (\x y. doE _ <- x; y odE) xs (returnOk ())" + +definition mapME_x :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ ('s,'e+unit) tmonad" where + "mapME_x f xs \ sequenceE_x (map f xs)" + +definition sequenceE :: "('s, 'e+'a) tmonad list \ ('s, 'e+'a list) tmonad" where + "sequenceE xs \ let mcons = (\p q. p >>=E (\x. q >>=E (\y. returnOk (x#y)))) + in foldr mcons xs (returnOk [])" + +definition mapME :: "('a \ ('s,'e+'b) tmonad) \ 'a list \ ('s,'e+'b list) tmonad" where + "mapME f xs \ sequenceE (map f xs)" + +text \Filtering a list using a monadic function as predicate:\ +primrec filterM :: "('a \ ('s, bool) tmonad) \ 'a list \ ('s, 'a list) tmonad" where + "filterM P [] = return []" +| "filterM P (x # xs) = do + b <- P x; + ys <- filterM P xs; + return (if b then (x # ys) else ys) + od" + +text \An alternative definition of @{term state_select}\ +lemma state_select_def2: + "state_select r \ (do + s \ get; + S \ return {s'. (s, s') \ r}; + assert (S \ {}); + s' \ select S; + put s' + od)" + apply (clarsimp simp add: state_select_def get_def return_def assert_def fail_def select_def + put_def bind_def fun_eq_iff default_elem_def + intro!: eq_reflection) + apply fastforce + done + + +section "Catching and Handling Exceptions" + +text \ + Turning an exception monad into a normal state monad + by catching and handling any potential exceptions:\ +definition catch :: + "('s, 'e + 'a) tmonad \ ('e \ ('s, 'a) tmonad) \ ('s, 'a) tmonad" (infix "" 10) + where + "f handler \ + do x \ f; + case x of + Inr b \ return b + | Inl e \ handler e + od" + +text \ + Handling exceptions, but staying in the exception monad. + The handler may throw a type of exceptions different from + the left side.\ +definition handleE' :: + "('s, 'e1 + 'a) tmonad \ ('e1 \ ('s, 'e2 + 'a) tmonad) \ ('s, 'e2 + 'a) tmonad" + (infix "" 10) where + "f handler \ + do + v \ f; + case v of + Inl e \ handler e + | Inr v' \ return (Inr v') + od" + +text \ + A type restriction of the above that is used more commonly in + practice: the exception handle (potentially) throws exception + of the same type as the left-hand side.\ +definition handleE :: + "('s, 'x + 'a) tmonad \ ('x \ ('s, 'x + 'a) tmonad) \ ('s, 'x + 'a) tmonad" (infix "" 10) + where + "handleE \ handleE'" + +text \ + Handling exceptions, and additionally providing a continuation + if the left-hand side throws no exception:\ +definition handle_elseE :: + "('s, 'e + 'a) tmonad \ ('e \ ('s, 'ee + 'b) tmonad) \ ('a \ ('s, 'ee + 'b) tmonad) \ + ('s, 'ee + 'b) tmonad" ("_ _ _" 10) + where + "f handler continue \ + do v \ f; + case v of Inl e \ handler e + | Inr v' \ continue v' + od" + +subsection "Loops" + +text \ + Loops are handled using the following inductive predicate; + non-termination is represented using the failure flag of the + monad. +FIXME: update comment about non-termination\ + +inductive_set whileLoop_results :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ (('r \ 's) \ ((tmid \ 's) list \ ('s, 'r) tmres)) set" + for C B where + "\ \ C r s \ \ ((r, s), ([], Result (r, s))) \ whileLoop_results C B" + | "\ C r s; (ts, Failed) \ B r s \ \ ((r, s), (ts, Failed)) \ whileLoop_results C B" + | "\ C r s; (ts, Incomplete) \ B r s \ \ ((r, s), (ts, Incomplete)) \ whileLoop_results C B" + | "\ C r s; (ts, Result (r', s')) \ B r s; ((r', s'), (ts',z)) \ whileLoop_results C B \ + \ ((r, s), (ts'@ts,z)) \ whileLoop_results C B" + +\ \FIXME: there are fewer lemmas here than in NonDetMonad and I don't understand this well enough + to know whether this is correct or not.\ +inductive_cases whileLoop_results_cases_result_end: "((x,y), ([],Result r)) \ whileLoop_results C B" +inductive_cases whileLoop_results_cases_fail: "((x,y), (ts, Failed)) \ whileLoop_results C B" +inductive_cases whileLoop_results_cases_incomplete: "((x,y), (ts, Incomplete)) \ whileLoop_results C B" + +inductive_simps whileLoop_results_simps_valid: "((x,y), ([], Result z)) \ whileLoop_results C B" + +inductive whileLoop_terminates :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ 's \ bool" + for C B where + "\ C r s \ whileLoop_terminates C B r s" + | "\ C r s; \(r', s') \ Result -` snd ` (B r s). whileLoop_terminates C B r' s' \ + \ whileLoop_terminates C B r s" + +inductive_cases whileLoop_terminates_cases: "whileLoop_terminates C B r s" +inductive_simps whileLoop_terminates_simps: "whileLoop_terminates C B r s" + +definition whileLoop :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" + where + "whileLoop C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B})" + +notation (output) + whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000) + +\ \FIXME: why does this differ to Nondet_Monad?\ +definition whileLoopT :: + "('r \ 's \ bool) \ ('r \ ('s, 'r) tmonad) \ 'r \ ('s, 'r) tmonad" + where + "whileLoopT C B \ (\r s. {(ts, res). ((r,s), ts,res) \ whileLoop_results C B + \ whileLoop_terminates C B r s})" + +notation (output) + whileLoopT ("(whileLoopT (_)// (_))" [1000, 1000] 1000) + +definition whileLoopE :: + "('r \ 's \ bool) \ ('r \ ('s, 'e + 'r) tmonad) \ 'r \ ('s, ('e + 'r)) tmonad" + where + "whileLoopE C body \ + \r. whileLoop (\r s. (case r of Inr v \ C v s | _ \ False)) (lift body) (Inr r)" + +notation (output) + whileLoopE ("(whileLoopE (_)// (_))" [1000, 1000] 1000) + + +section "Combinators that have conditions with side effects" + +definition notM :: "('s, bool) tmonad \ ('s, bool) tmonad" where + "notM m = do c \ m; return (\ c) od" + +definition whileM :: "('s, bool) tmonad \ ('s, 'a) tmonad \ ('s, unit) tmonad" where + "whileM C B \ do + c \ C; + whileLoop (\c s. c) (\_. do B; C od) c; + return () + od" + +definition ifM :: "('s, bool) tmonad \ ('s, 'a) tmonad \ ('s, 'a) tmonad \ ('s, 'a) tmonad" where + "ifM test t f = do + c \ test; + if c then t else f + od" + +definition ifME :: + "('a, 'b + bool) tmonad \ ('a, 'b + 'c) tmonad \ ('a, 'b + 'c) tmonad \ ('a, 'b + 'c) tmonad" + where + "ifME test t f = doE + c \ test; + if c then t else f + odE" + +definition whenM :: "('s, bool) tmonad \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "whenM t m = ifM t m (return ())" + +definition orM :: "('s, bool) tmonad \ ('s, bool) tmonad \ ('s, bool) tmonad" where + "orM a b = ifM a (return True) b" + +definition andM :: "('s, bool) tmonad \ ('s, bool) tmonad \ ('s, bool) tmonad" where + "andM a b = ifM a b (return False)" + + +section "Await command" + +text \@{term "Await c f"} blocks the execution until @{term "c"} is true, + and then atomically executes @{term "f"}.\ +definition Await :: "('s \ bool) \ ('s,unit) tmonad" where + "Await c \ + do + s \ get; + \ \Add unfiltered environment events, with the last one + satisfying the `c' state predicate\ + xs \ select {xs. c (last_st_tr (map (Pair Env) xs) s)}; + tr \ return (map (Pair Env) xs); + put_trace tr; + \ \Pick the last event of the trace\ + put (last_st_tr tr s) + od" + + +section "Parallel combinator" + +definition parallel :: "('s,'a) tmonad \ ('s,'a) tmonad \ ('s,'a) tmonad" where + "parallel f g = (\s. {(xs, rv). \f_steps. length f_steps = length xs + \ (map (\(f_step, (id, s)). (if f_step then id else Env, s)) (zip f_steps xs), rv) \ f s + \ (map (\(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \ g s})" + +abbreviation(input) + "parallel_mrg \ ((\((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)))" + +lemma parallel_def2: + "parallel f g = (\s. {(xs, rv). \ys zs. (ys, rv) \ f s \ (zs, rv) \ g s + \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs + \ xs = map parallel_mrg (zip ys zs)})" + apply (simp add: parallel_def fun_eq_iff set_eq_iff) + apply safe + apply (rule exI, rule conjI, assumption)+ + apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff) + apply clarsimp + apply (rule_tac x="map (((\) Env) o fst) ys" in exI) + apply (simp add: zip_map1 o_def split_def) + apply (strengthen subst[where P="\xs. (xs, v) \ S" for v S, mk_strg I _ E]) + apply (clarsimp simp: list_all2_conv_all_nth list_eq_iff_nth_eq + split_def prod_eq_iff + split del: if_split cong: if_cong) + apply auto + done + +lemma parallel_def3: + "parallel f g = (\s. (\(ys, zs, rv). (map parallel_mrg (zip ys zs), rv)) + ` {(ys, zs, rv). (ys, rv) \ f s \ (zs, rv) \ g s + \ list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs})" + by (simp add: parallel_def2, rule ext, auto simp: image_def) + +end diff --git a/lib/Monads/trace/Trace_Monad_Equations.thy b/lib/Monads/trace/Trace_Monad_Equations.thy new file mode 100644 index 0000000000..00156438b8 --- /dev/null +++ b/lib/Monads/trace/Trace_Monad_Equations.thy @@ -0,0 +1,577 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Equations between monads. Conclusions of the form "f = g" where f and g are monads. + Should not be Hoare triples (those go into a different theory). *) + +theory Trace_Monad_Equations + imports + Trace_No_Fail +begin + +lemmas assertE_assert = assertE_liftE + +lemma assert_def2: + "assert v = assert_opt (if v then Some () else None)" + by (cases v; simp add: assert_def assert_opt_def) + +lemma return_returnOk: + "return (Inr x) = returnOk x" + unfolding returnOk_def by simp + +lemma exec_modify: + "(modify f >>= g) s = g () (f s)" + by (simp add: bind_def simpler_modify_def) + +lemma bind_return_eq: + "(a >>= return) = (b >>= return) \ a = b" + by clarsimp + +lemmas bind_then_eq = arg_cong2[where f=bind, OF _ refl] + +lemma bindE_bind_linearise: + "((f >>=E g) >>= h) = + (f >>= case_sum (h o Inl) (\rv. g rv >>= h))" + apply (simp add: bindE_def bind_assoc) + apply (rule ext, rule bind_apply_cong, rule refl) + apply (simp add: lift_def throwError_def split: sum.split) + done + +lemma throwError_bind: + "(throwError e >>= f) = (f (Inl e))" + by (simp add: throwError_def) + +lemma bind_bindE_assoc: + "((f >>= g) >>=E h) + = f >>= (\rv. g rv >>=E h)" + by (simp add: bindE_def bind_assoc) + +lemma returnOk_bind: + "returnOk v >>= f = (f (Inr v))" + by (simp add: returnOk_def) + +lemma liftE_bind: + "(liftE m >>= m') = (m >>= (\rv. m' (Inr rv)))" + by (simp add: liftE_def) + +lemma catch_throwError: "catch (throwError ft) g = g ft" + by (simp add: catch_def throwError_bind) + +lemma cart_singleton_image: + "S \ {s} = (\v. (v, s)) ` S" + by auto + +lemma liftE_bindE_handle: + "((liftE f >>=E (\x. g x)) h) + = f >>= (\x. g x h)" + by (simp add: liftE_bindE handleE_def handleE'_def + bind_assoc) + +lemma catch_liftE: + "catch (liftE g) h = g" + by (simp add: catch_def liftE_def) + +lemma catch_liftE_bindE: + "catch (liftE g >>=E (\x. f x)) h = g >>= (\x. catch (f x) h)" + by (simp add: liftE_bindE catch_def bind_assoc) + +lemma returnOk_catch_bind: + "catch (returnOk v) h >>= g = g v" + by (simp add: returnOk_liftE catch_liftE) + +lemma liftE_bindE_assoc: + "(liftE f >>=E g) >>= h = f >>= (\x. g x >>= h)" + by (simp add: liftE_bindE bind_assoc) + +lemma unlessE_throw_catch_If: + "catch (unlessE P (throwError e) >>=E f) g + = (if P then catch (f ()) g else g e)" + by (simp add: unlessE_def catch_throwError split: if_split) + +lemma whenE_bindE_throwError_to_if: + "whenE P (throwError e) >>=E (\_. b) = (if P then (throwError e) else b)" + unfolding whenE_def bindE_def + by (auto simp: lift_def throwError_def returnOk_def) + +lemma alternative_liftE_returnOk: + "(liftE m \ returnOk v) = liftE (m \ return v)" + by (simp add: liftE_def alternative_def returnOk_def bind_def return_def) + +lemma gets_the_return: + "(return x = gets_the f) = (\s. f s = Some x)" + apply (subst fun_eq_iff) + apply (simp add: return_def gets_the_def exec_gets + assert_opt_def fail_def + split: option.split) + apply auto + done + +lemma gets_the_returns: + "(return x = gets_the f) = (\s. f s = Some x)" + "(returnOk x = gets_the g) = (\s. g s = Some (Inr x))" + "(throwError x = gets_the h) = (\s. h s = Some (Inl x))" + by (simp_all add: returnOk_def throwError_def + gets_the_return) + +lemma all_rv_choice_fn_eq_pred: + "\ \rv. P rv \ \fn. f rv = g fn \ \ \fn. \rv. P rv \ f rv = g (fn rv)" + apply (rule_tac x="\rv. SOME h. f rv = g h" in exI) + apply (clarsimp split: if_split) + by (meson someI_ex) + +lemma all_rv_choice_fn_eq: + "\ \rv. \fn. f rv = g fn \ + \ \fn. f = (\rv. g (fn rv))" + using all_rv_choice_fn_eq_pred[where f=f and g=g and P=\] + by (simp add: fun_eq_iff) + +lemma gets_the_eq_bind: + "\ \fn. f = gets_the (fn o fn'); \rv. \fn. g rv = gets_the (fn o fn') \ + \ \fn. (f >>= g) = gets_the (fn o fn')" + apply (clarsimp dest!: all_rv_choice_fn_eq) + apply (rule_tac x="\s. case (fn s) of None \ None | Some v \ fna v s" in exI) + apply (simp add: gets_the_def bind_assoc exec_gets + assert_opt_def fun_eq_iff + split: option.split) + done + +lemma gets_the_eq_bindE: + "\ \fn. f = gets_the (fn o fn'); \rv. \fn. g rv = gets_the (fn o fn') \ + \ \fn. (f >>=E g) = gets_the (fn o fn')" + apply (simp add: bindE_def) + apply (erule gets_the_eq_bind) + apply (simp add: lift_def gets_the_returns split: sum.split) + apply fastforce + done + +lemma gets_the_fail: + "(fail = gets_the f) = (\s. f s = None)" + by (simp add: gets_the_def exec_gets assert_opt_def + fail_def return_def fun_eq_iff + split: option.split) + +lemma gets_the_asserts: + "(fail = gets_the f) = (\s. f s = None)" + "(assert P = gets_the g) = (\s. g s = (if P then Some () else None))" + "(assertE P = gets_the h) = (\s. h s = (if P then Some (Inr ()) else None))" + by (simp add: assert_def assertE_def gets_the_fail gets_the_returns + split: if_split)+ + +lemma ex_const_function: + "\f. \s. f (f' s) = v" + by force + +lemma gets_the_condsE: + "(\fn. whenE P f = gets_the (fn o fn')) + = (P \ (\fn. f = gets_the (fn o fn')))" + "(\fn. unlessE P g = gets_the (fn o fn')) + = (\ P \ (\fn. g = gets_the (fn o fn')))" + by (simp add: whenE_def unlessE_def gets_the_returns ex_const_function + split: if_split)+ + +lemma let_into_return: + "(let f = x in m f) = (do f \ return x; m f od)" + by simp + +lemma liftME_return: + "liftME f (returnOk v) = returnOk (f v)" + by (simp add: liftME_def) + +lemma fold_bindE_into_list_case: + "(doE v \ f; case_list (g v) (h v) x odE) + = (case_list (doE v \ f; g v odE) (\x xs. doE v \ f; h v x xs odE) x)" + by (simp split: list.split) + +lemma whenE_liftE: + "whenE P (liftE f) = liftE (when P f)" + by (simp add: whenE_def when_def returnOk_liftE) + +lemma whenE_whenE_body: + "whenE P (throwError f) >>=E (\_. whenE Q (throwError f) >>=E r) = whenE (P \ Q) (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma whenE_whenE_same: + "whenE P (throwError f) >>=E (\_. whenE P (throwError g) >>=E r) = whenE P (throwError f) >>=E r" + apply (cases P) + apply (simp add: whenE_def) + apply simp + done + +lemma maybe_fail_bind_fail: + "unless P fail >>= (\_. fail) = fail" + "when P fail >>= (\_. fail) = fail" + by (clarsimp simp: bind_def fail_def return_def + unless_def when_def)+ + +lemma select_singleton[simp]: + "select {x} = return x" + by (simp add: select_def return_def) + +lemma return_modify: + "return () = modify id" + by (simp add: return_def simpler_modify_def) + +lemma liftE_liftM_liftME: + "liftE (liftM f m) = liftME f (liftE m)" + by (simp add: liftE_liftM liftME_liftM liftM_def) + +lemma bind_return_unit: + "f = (f >>= (\x. return ()))" + by simp + +lemma modify_id_return: + "modify id = return ()" + by (simp add: simpler_modify_def return_def) + +lemma liftE_bind_return_bindE_returnOk: + "liftE (v >>= (\rv. return (f rv))) + = (liftE v >>=E (\rv. returnOk (f rv)))" + by (simp add: liftE_bindE, simp add: liftE_def returnOk_def) + +lemma bind_eqI: + "g = g' \ f >>= g = f >>= g'" by simp + +lemma unlessE_throwError_returnOk: + "(if P then returnOk v else throwError x) + = (unlessE P (throwError x) >>=E (\_. returnOk v))" + by (cases P, simp_all add: unlessE_def) + +lemma gets_the_bind_eq: + "\ f s = Some x; g x s = h s \ + \ (gets_the f >>= g) s = h s" + by (simp add: gets_the_def bind_assoc exec_gets assert_opt_def) + +lemma zipWithM_x_modify: + "zipWithM_x (\a b. modify (f a b)) as bs + = modify (\s. foldl (\s (a, b). f a b s) s (zip as bs))" + apply (simp add: zipWithM_x_def zipWith_def sequence_x_def) + apply (induct ("zip as bs")) + apply (simp add: simpler_modify_def return_def) + apply (rule ext) + apply (simp add: simpler_modify_def bind_def split_def) + done + +lemma assert2: + "(do v1 \ assert P; v2 \ assert Q; c od) + = (do v \ assert (P \ Q); c od)" + by (simp add: assert_def split: if_split) + +lemma assert_opt_def2: + "assert_opt v = (do assert (v \ None); return (the v) od)" + by (simp add: assert_opt_def split: option.split) + +lemma gets_assert: + "(do v1 \ assert v; v2 \ gets f; c v1 v2 od) + = (do v2 \ gets f; v1 \ assert v; c v1 v2 od)" + by (simp add: simpler_gets_def return_def assert_def fail_def bind_def + split: if_split) + +lemma modify_assert: + "(do v2 \ modify f; v1 \ assert v; c v1 od) + = (do v1 \ assert v; v2 \ modify f; c v1 od)" + by (simp add: simpler_modify_def return_def assert_def fail_def bind_def + split: if_split) + +lemma gets_fold_into_modify: + "do x \ gets f; modify (g x) od = modify (\s. g (f s) s)" + "do x \ gets f; _ \ modify (g x); h od + = do modify (\s. g (f s) s); h od" + by (simp_all add: fun_eq_iff modify_def bind_assoc exec_gets + exec_get exec_put) + +lemma gets_return_gets_eq: + "gets f >>= (\g. return (h g)) = gets (\s. h (f s))" + by (simp add: simpler_gets_def bind_def return_def) + +lemma gets_prod_comp: + "gets (case x of (a, b) \ f a b) = (case x of (a, b) \ gets (f a b))" + by (auto simp: split_def) + +lemma bind_assoc2: + "(do x \ a; _ \ b; c x od) = (do x \ (do x' \ a; _ \ b; return x' od); c x od)" + by (simp add: bind_assoc) + +lemma bind_assoc_return_reverse: + "do x \ f; + _ \ g x; + h x + od = + do x \ do x \ f; + _ \ g x; + return x + od; + h x + od" + by (simp only: bind_assoc return_bind) + +lemma if_bind: + "(if P then (a >>= (\_. b)) else return ()) = + (if P then a else return ()) >>= (\_. if P then b else return ())" + by (cases P; simp) + +lemma bind_liftE_distrib: "(liftE (A >>= (\x. B x))) = (liftE A >>=E (\x. liftE (\s. B x s)))" + by (clarsimp simp: liftE_def bindE_def lift_def bind_assoc) + +lemma if_catch_distrib: + "((if P then f else g) h) = (if P then f h else g h)" + by (simp split: if_split) + +lemma will_throw_and_catch: + "f = throwError e \ (f (\_. g)) = g" + by (simp add: catch_def throwError_def) + +lemma catch_is_if: + "(doE x <- f; g x odE h) = + do + rv <- f; + if sum.isl rv then h (projl rv) else g (projr rv) h + od" + apply (simp add: bindE_def catch_def bind_assoc cong: if_cong) + apply (rule bind_cong, rule refl) + apply (clarsimp simp: lift_def throwError_def split: sum.splits) + done + +lemma liftE_K_bind: "liftE ((K_bind (\s. A s)) x) = K_bind (liftE (\s. A s)) x" + by clarsimp + +lemma monad_eq_split_tail: + "\f = g; a s = b s\ \ (a >>= f) s = ((b >>= g) s)" + by (simp add:bind_def) + +lemma assert_opt_If: + "assert_opt v = If (v = None) fail (return (the v))" + by (simp add: assert_opt_def split: option.split) + +lemma if_to_top_of_bind: + "(bind (If P x y) z) = If P (bind x z) (bind y z)" + by (simp split: if_split) + +lemma if_to_top_of_bindE: + "(bindE (If P x y) z) = If P (bindE x z) (bindE y z)" + by (simp split: if_split) + +lemma modify_modify: + "(do x \ modify f; modify (g x) od) = modify (g () o f)" + by (simp add: bind_def simpler_modify_def) + +lemmas modify_modify_bind = + arg_cong2[where f=bind, OF modify_modify refl, simplified bind_assoc] + +lemma put_then_get[unfolded K_bind_def]: + "do put s; get od = do put s; return s od" + by (simp add: put_def bind_def get_def return_def) + +lemmas put_then_get_then = + put_then_get[THEN bind_then_eq, simplified bind_assoc return_bind] + +lemma select_empty_bind[simp]: + "select {} >>= f = select {}" + by (simp add: select_def bind_def) + + +subsection \Alternative env_steps with repeat\ + +lemma mapM_Cons: + "mapM f (x # xs) = do + y \ f x; + ys \ mapM f xs; + return (y # ys) + od" + and mapM_Nil: + "mapM f [] = return []" + by (simp_all add: mapM_def sequence_def) + +lemma mapM_x_Cons: + "mapM_x f (x # xs) = do + y \ f x; + mapM_x f xs + od" + and mapM_x_Nil: + "mapM_x f [] = return ()" + by (simp_all add: mapM_x_def sequence_x_def) + +lemma mapM_append: + "mapM f (xs @ ys) = (do + fxs \ mapM f xs; + fys \ mapM f ys; + return (fxs @ fys) + od)" + by (induct xs, simp_all add: mapM_Cons mapM_Nil bind_assoc) + +lemma mapM_x_append: + "mapM_x f (xs @ ys) = (do + x \ mapM_x f xs; + mapM_x f ys + od)" + by (induct xs, simp_all add: mapM_x_Cons mapM_x_Nil bind_assoc) + +lemma mapM_map: + "mapM f (map g xs) = mapM (f o g) xs" + by (induct xs; simp add: mapM_Nil mapM_Cons) + +lemma mapM_x_map: + "mapM_x f (map g xs) = mapM_x (f o g) xs" + by (induct xs; simp add: mapM_x_Nil mapM_x_Cons) + +primrec repeat_n :: "nat \ ('s, unit) tmonad \ ('s, unit) tmonad" where + "repeat_n 0 f = return ()" + | "repeat_n (Suc n) f = do f; repeat_n n f od" + +lemma repeat_n_mapM_x: + "repeat_n n f = mapM_x (\_. f) (replicate n ())" + by (induct n, simp_all add: mapM_x_Cons mapM_x_Nil) + +definition repeat :: "('s, unit) tmonad \ ('s, unit) tmonad" where + "repeat f = do n \ select UNIV; repeat_n n f od" + +definition env_step :: "('s,unit) tmonad" where + "env_step = + do + s' \ select UNIV; + put_trace_elem (Env, s'); + put s' + od" + +abbreviation + "env_n_steps n \ repeat_n n env_step" + +lemma elem_select_bind: + "(tr, res) \ (do x \ select S; f x od) s + = (\x \ S. (tr, res) \ f x s)" + by (simp add: bind_def select_def) + +lemma select_bind_UN: + "(do x \ select S; f x od) = (\s. \x \ S. f x s)" + by (rule ext, auto simp: elem_select_bind) + +lemma select_early: + "S \ {} + \ do x \ f; y \ select S; g x y od + = do y \ select S; x \ f; g x y od" + apply (simp add: bind_def select_def Sigma_def) + apply (rule ext) + apply (fastforce elim: rev_bexI image_eqI[rotated] split: tmres.split_asm) + done + +lemma repeat_n_choice: + "S \ {} + \ repeat_n n (do x \ select S; f x od) + = (do xs \ select {xs. set xs \ S \ length xs = n}; mapM_x f xs od)" + apply (induct n; simp?) + apply (simp add: select_def bind_def mapM_x_Nil cong: conj_cong) + apply (simp add: select_early bind_assoc) + apply (subst select_early) + apply simp + apply (auto intro: exI[where x="replicate n xs" for n xs])[1] + apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) + apply (simp only: conj_comms[where Q="length xs = n" for xs n]) + apply (simp only: ex_simps[symmetric] conj_assoc length_Suc_conv, simp) + apply (auto simp: mapM_x_Cons) + done + +lemma repeat_choice: + "S \ {} + \ repeat (do x \ select S; f x od) + = (do xs \ select {xs. set xs \ S}; mapM_x f xs od)" + apply (simp add: repeat_def repeat_n_choice) + apply (simp(no_asm) add: fun_eq_iff set_eq_iff elem_select_bind) + done + +lemma put_trace_append: + "put_trace (xs @ ys) = do put_trace ys; put_trace xs od" + by (induct xs; simp add: bind_assoc) + +lemma put_trace_elem_put_comm: + "do y \ put_trace_elem x; put s od + = do y \ put s; put_trace_elem x od" + by (simp add: put_def put_trace_elem_def bind_def insert_commute) + +lemma put_trace_put_comm: + "do y \ put_trace xs; put s od + = do y \ put s; put_trace xs od" + apply (rule sym; induct xs; simp) + apply (simp add: bind_assoc put_trace_elem_put_comm) + apply (simp add: bind_assoc[symmetric]) + done + +lemma mapM_x_comm: + "(\x \ set xs. do y \ g; f x od = do y \ f x; g od) + \ do y \ g; mapM_x f xs od = do y \ mapM_x f xs; g od" + apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons) + apply (simp add: bind_assoc[symmetric], simp add: bind_assoc) + done + +lemma mapM_x_split: + "(\x \ set xs. \y \ set xs. do z \ g y; f x od = do (z :: unit) \ f x; g y od) + \ mapM_x (\x. do z \ f x; g x od) xs = do y \ mapM_x f xs; mapM_x g xs od" + apply (induct xs; simp add: mapM_x_Nil mapM_x_Cons bind_assoc) + apply (subst bind_assoc[symmetric], subst mapM_x_comm[where f=f and g="g x" for x]) + apply simp + apply (simp add: bind_assoc) + done + +lemma mapM_x_put: + "mapM_x put xs = unless (xs = []) (put (last xs))" + apply (induct xs rule: rev_induct) + apply (simp add: mapM_x_Nil unless_def when_def) + apply (simp add: mapM_x_append mapM_x_Cons mapM_x_Nil) + apply (simp add: bind_def unless_def when_def put_def return_def) + done + +lemma put_trace_mapM_x: + "put_trace xs = mapM_x put_trace_elem (rev xs)" + by (induct xs; simp add: mapM_x_Nil mapM_x_append mapM_x_Cons) + +lemma rev_surj: + "surj rev" + by (rule_tac f=rev in surjI, simp) + +lemma select_image: + "select (f ` S) = do x \ select S; return (f x) od" + by (auto simp add: bind_def select_def return_def Sigma_def) + +lemma env_steps_repeat: + "env_steps = repeat env_step" + apply (simp add: env_step_def repeat_choice env_steps_def + select_early) + apply (simp add: put_trace_elem_put_comm) + apply (simp add: mapM_x_split put_trace_elem_put_comm put_trace_put_comm + mapM_x_put) + apply (simp add: put_trace_mapM_x rev_map mapM_x_map o_def) + apply (subst rev_surj[symmetric], simp add: select_image bind_assoc) + apply (rule arg_cong2[where f=bind, OF refl ext]) + apply (simp add: bind_def get_def put_def unless_def when_def return_def) + apply (simp add: last_st_tr_def hd_map hd_rev) + done + +lemma repeat_n_plus: + "repeat_n (n + m) f = do repeat_n n f; repeat_n m f od" + by (induct n; simp add: bind_assoc) + +lemma repeat_eq_twice[simp]: + "(do x \ repeat f; repeat f od) = repeat f" + apply (simp add: repeat_def select_early) + apply (simp add: bind_assoc repeat_n_plus[symmetric, simplified]) + apply (simp add: bind_def select_def Sigma_def) + apply (rule ext, fastforce intro: exI[where x=0]) + done + +lemmas repeat_eq_twice_then[simp] = + repeat_eq_twice[THEN bind_then_eq, simplified bind_assoc] + +lemmas env_steps_eq_twice[simp] = + repeat_eq_twice[where f=env_step, folded env_steps_repeat] +lemmas env_steps_eq_twice_then[simp] = + env_steps_eq_twice[THEN bind_then_eq, simplified bind_assoc] + +lemmas mapM_collapse_append = + mapM_append[symmetric, THEN bind_then_eq, simplified bind_assoc, simplified] + +end \ No newline at end of file diff --git a/lib/Monads/More_NonDetMonadVCG.thy b/lib/Monads/trace/Trace_More_VCG.thy similarity index 76% rename from lib/Monads/More_NonDetMonadVCG.thy rename to lib/Monads/trace/Trace_More_VCG.thy index ea9c9caf61..927bb04272 100644 --- a/lib/Monads/More_NonDetMonadVCG.thy +++ b/lib/Monads/trace/Trace_More_VCG.thy @@ -5,17 +5,18 @@ * SPDX-License-Identifier: BSD-2-Clause *) -(* Partial correctness Hoare logic lemmas over the nondet monad. Hoare triples, lifting lemmas, etc. +(* Partial correctness Hoare logic lemmas over the trace monad. Hoare triples, lifting lemmas, etc. If it doesn't contain a Hoare triple it likely doesn't belong in here. *) -theory More_NonDetMonadVCG +theory Trace_More_VCG imports - NonDetMonadVCG + Trace_VCG + Trace_In_Monad begin lemma hoare_take_disjunct: "\P\ f \\rv s. P' rv s \ (False \ P'' rv s)\ - \ \P\ f \P''\" + \ \P\ f \P''\" by (erule hoare_strengthen_post, simp) lemma hoare_post_add: @@ -36,14 +37,6 @@ lemma hoare_pre_addE: apply (subst iff_conv_conj_imp) by(intro conjI impI; rule hoare_weaken_preE, assumption, clarsimp) -lemma hoare_disjI1: - "\R\ f \P\ \ \R\ f \\r s. P r s \ Q r s\" - by (erule hoare_post_imp [rotated]) simp - -lemma hoare_disjI2: - "\R\ f \Q\ \ \R\ f \\r s. P r s \ Q r s \" - by (rule hoare_post_imp [OF _ hoare_disjI1, where P1=Q], auto) - lemma hoare_name_pre_state: "\ \s. P s \ \(=) s\ f \Q\ \ \ \P\ f \Q\" by (clarsimp simp: valid_def) @@ -52,18 +45,6 @@ lemma hoare_name_pre_stateE: "\\s. P s \ \(=) s\ f \Q\, \E\\ \ \P\ f \Q\, \E\" by (clarsimp simp: validE_def2) -lemma valid_prove_more: (* FIXME: duplicate *) - "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P\ f \Q'\" - by (rule hoare_post_add) - -lemma hoare_vcg_if_lift: - "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ - \R\ f \\rv s. if P then X rv s else Y rv s\" - - "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ - \R\ f \\rv. if P then X rv else Y rv\" - by (auto simp: valid_def split_def) - lemma hoare_vcg_if_lift_strong: "\ \P'\ f \P\; \\s. \ P' s\ f \\rv s. \ P rv s\; \Q'\ f \Q\; \R'\ f \R\ \ \ \\s. if P' s then Q' s else R' s\ f \\rv s. if P rv s then Q rv s else R rv s\" @@ -105,12 +86,12 @@ lemmas hoare_lift_Pf_pre_conj' = hoare_lift_Pf2_pre_conj[where Q=P and P=P for P lemma hoare_if_r_and: "\P\ f \\r. if R r then Q r else Q' r\ - = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" + = \P\ f \\r s. (R r \ Q r s) \ (\R r \ Q' r s)\" by (fastforce simp: valid_def) lemma hoare_convert_imp: - "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ \ - \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" + "\ \\s. \ P s\ f \\rv s. \ Q s\; \R\ f \S\ \ + \ \\s. P s \ R s\ f \\rv s. Q s \ S rv s\" apply (simp only: imp_conv_disj) apply (erule(1) hoare_vcg_disj_lift) done @@ -123,8 +104,8 @@ lemma hoare_vcg_ex_lift_R: done lemma hoare_case_option_wpR: - "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ \ - \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" + "\\P\ f None \Q\,-; \x. \P' x\ f (Some x) \Q' x\,-\ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\,-" by (cases v) auto lemma hoare_vcg_conj_liftE_R: @@ -137,12 +118,6 @@ lemma K_valid[wp]: "\K P\ f \\_. K P\" by (simp add: valid_def) -lemma hoare_vcg_exI: - "\P\ f \Q x\ \ \P\ f \\rv s. \x. Q x rv s\" - apply (simp add: valid_def split_def) - apply blast - done - lemma hoare_exI_tuple: "\P\ f \\(rv,rv') s. Q x rv rv' s\ \ \P\ f \\(rv,rv') s. \x. Q x rv rv' s\" by (fastforce simp: valid_def) @@ -190,7 +165,7 @@ lemma gets_inv [simp]: lemma select_inv: "\ P \ select S \ \r. P \" - by (simp add: select_def valid_def) + by wpsimp lemmas return_inv = hoare_return_drop_var @@ -231,9 +206,10 @@ lemma list_cases_weak_wp: assumes "\x xs. \P_B\ b x xs \Q\" shows "\P_A and P_B\ - case ts of - [] \ a - | x#xs \ b x xs \Q\" + case ts of + [] \ a + | x#xs \ b x xs + \Q\" apply (cases ts) apply (simp, rule hoare_weaken_pre, rule assms, simp)+ done @@ -242,27 +218,20 @@ lemmas hoare_FalseE_R = hoare_FalseE[where E="\\", folded validE_R_def lemma hoare_vcg_if_lift2: "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\ \ - \R\ f \\rv s. if P rv s then X rv s else Y rv s\" + \R\ f \\rv s. if P rv s then X rv s else Y rv s\" "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\ \ - \R\ f \\rv. if P' rv then X rv else Y rv\" + \R\ f \\rv. if P' rv then X rv else Y rv\" by (auto simp: valid_def split_def) lemma hoare_vcg_if_lift_ER: (* Required because of lack of rv in lifting rules *) "\R\ f \\rv s. (P rv s \ X rv s) \ (\ P rv s \ Y rv s)\, - \ - \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" + \R\ f \\rv s. if P rv s then X rv s else Y rv s\, -" "\R\ f \\rv s. (P' rv \ X rv s) \ (\ P' rv \ Y rv s)\, - \ - \R\ f \\rv. if P' rv then X rv else Y rv\, -" + \R\ f \\rv. if P' rv then X rv else Y rv\, -" by (auto simp: valid_def validE_R_def validE_def split_def) -lemma hoare_vcg_imp_liftE: - "\ \P'\ f \\rv s. \ P rv s\, \A\; \Q'\ f \Q\, \A\ \ - \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \A\" - apply (simp only: imp_conv_disj) - apply (clarsimp simp: validE_def valid_def split_def sum.case_eq_if) - done - lemma hoare_list_all_lift: "(\r. r \ set xs \ \Q r\ f \\rv. Q r\) \ \\s. list_all (\r. Q r s) xs\ f \\rv s. list_all (\r. Q r s) xs\" @@ -311,7 +280,7 @@ lemma set_shrink_proof: assumes x: "\x. \\s. x \ S s\ f \\rv s. x \ S s\" shows "\\s. \S'. S' \ S s \ P S'\ - f + f \\rv s. P (S s)\" apply (clarsimp simp: valid_def) apply (drule spec, erule mp) @@ -339,7 +308,7 @@ lemma shrinks_proof: by (metis use_valid w z) lemma use_validE_R: - "\ (Inr r, s') \ fst (f s); \P\ f \Q\,-; P s \ \ Q r s'" + "\ (Inr r, s') \ mres (f s); \P\ f \Q\,-; P s \ \ Q r s'" unfolding validE_R_def validE_def by (frule(2) use_valid, simp) @@ -353,8 +322,6 @@ lemma valid_preservation_ex: apply simp done -lemmas valid_prove_more' = valid_prove_more[where Q="\rv. Q" for Q] - lemma whenE_inv: assumes a: "\P\ f \\_. P\" shows "\P\ whenE Q f \\_. P\" @@ -366,9 +333,12 @@ lemma whenE_throwError_wp: lemma ifM_throwError_returnOk: "\Q\ test \\c s. \ c \ P s\ \ \Q\ ifM test (throwError e) (returnOk ()) \\_. P\, -" - by (fastforce simp: ifM_def returnOk_def throwError_def return_def validE_R_def valid_def - validE_def bind_def - split: if_splits) + unfolding ifM_def + apply (fold liftE_bindE) + apply wpsimp + apply assumption + apply simp + done lemma ifME_liftE: "ifME (liftE test) a b = ifM test a b" @@ -376,14 +346,10 @@ lemma ifME_liftE: lemma gets_the_inv: "\P\ gets_the V \\rv. P\" by wpsimp -lemma select_f_inv: - "\P\ select_f S \\_. P\" - by (simp add: select_f_def valid_def) - lemmas state_unchanged = in_inv_by_hoareD [THEN sym] lemma validI: - assumes rl: "\s r s'. \ P s; (r, s') \ fst (S s) \ \ Q r s'" + assumes rl: "\s r s'. \ P s; (r, s') \ mres (S s) \ \ Q r s'" shows "\P\ S \Q\" unfolding valid_def using rl by safe @@ -394,23 +360,13 @@ lemma opt_return_pres_lift: lemma valid_return_unit: "\P\ f >>= (\_. return ()) \\r. Q\ \ \P\ f \\r. Q\" - apply (rule validI) - apply (fastforce simp: valid_def return_def bind_def split_def) - done + by (auto simp: valid_def in_bind in_return Ball_def) -lemma static_imp_wp: - "\Q\ m \R\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" - by (cases P, simp_all add: valid_def) - -lemma static_imp_wpE : - "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" - by (cases P, simp_all) - -lemma static_imp_conj_wp: +lemma hoare_weak_lift_imp_conj: "\ \Q\ m \Q'\; \R\ m \R'\ \ - \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" + \ \\s. (P \ Q s) \ R s\ m \\rv s. (P \ Q' rv s) \ R' rv s\" apply (rule hoare_vcg_conj_lift) - apply (rule static_imp_wp) + apply (rule hoare_weak_lift_imp) apply assumption+ done @@ -423,23 +379,13 @@ lemma hoare_validE_R_conj: "\\P\ f \Q\, -; \P\ f \R\, -\ \ \P\ f \Q and R\, -" by (simp add: valid_def validE_def validE_R_def Let_def split_def split: sum.splits) -lemma hoare_vcg_const_imp_lift_R: - "\P\ f \Q\,- \ \\s. F \ P s\ f \\rv s. F \ Q rv s\,-" - by (cases F, simp_all) - -lemma hoare_vcg_disj_lift_R: - assumes x: "\P\ f \Q\,-" - assumes y: "\P'\ f \Q'\,-" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" - using assms - by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) - lemmas throwError_validE_R = throwError_wp [where E="\\", folded validE_R_def] lemma valid_case_option_post_wp: - "(\x. \P x\ f \\rv. Q x\) \ - \\s. case ep of Some x \ P x s | _ \ True\ - f \\rv s. case ep of Some x \ Q x s | _ \ True\" + "\\x. \P x\ f \\rv. Q x\\ \ + \\s. case ep of Some x \ P x s | _ \ True\ + f + \\rv s. case ep of Some x \ Q x s | _ \ True\" by (cases ep, simp_all add: hoare_vcg_prop) lemma P_bool_lift: @@ -462,7 +408,7 @@ lemma gets_sp: "\P\ gets f \\rv. P and (\ \P\ f \Q\; (r, s') \ fst (f s); P s \ \ Q r s'" + "\ \P\ f \Q\; (r, s') \ mres (f s); P s \ \ Q r s'" by (rule post_by_hoare, assumption+) lemma hoare_Ball_helper: @@ -519,19 +465,19 @@ lemma hoare_ex_pre: (* safe, unlike hoare_vcg_ex_lift *) by (fastforce simp: valid_def) lemma hoare_ex_pre_conj: - "(\x. \\s. P x s \ P' s\ f \Q\) - \ \\s. (\x. P x s) \ P' s\ f \Q\" + "\\x. \\s. P x s \ P' s\ f \Q\\ + \ \\s. (\x. P x s) \ P' s\ f \Q\" by (fastforce simp: valid_def) lemma hoare_conj_lift_inv: "\\P\ f \Q\; \\s. P' s \ I s\ f \\rv. I\; - \s. P s \ P' s\ + \s. P s \ P' s\ \ \\s. P s \ I s\ f \\rv s. Q rv s \ I s\" by (fastforce simp: valid_def) -lemma hoare_in_monad_post : +lemma hoare_in_monad_post: assumes x: "\P. \P\ f \\x. P\" - shows "\\\ f \\rv s. (rv, s) \ fst (f s)\" + shows "\\\ f \\rv s. (rv, s) \ mres (f s)\" apply (clarsimp simp: valid_def) apply (subgoal_tac "s = b", simp) apply (simp add: state_unchanged [OF x]) @@ -552,7 +498,7 @@ lemma validE_R_sp: lemma valid_set_take_helper: "\P\ f \\rv s. \x \ set (xs rv s). Q x rv s\ - \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" + \ \P\ f \\rv s. \x \ set (take (n rv s) (xs rv s)). Q x rv s\" apply (erule hoare_strengthen_post) apply (clarsimp dest!: in_set_takeD) done @@ -584,29 +530,9 @@ lemma wp_split_const_if_R: shows "\\s. (G \ P s) \ (\ G \ P' s)\ f \\rv s. (G \ Q rv s) \ (\ G \ Q' rv s)\,-" by (case_tac G, simp_all add: x y) -lemma wp_throw_const_imp: - assumes x: "\P\ f \Q\" - shows "\\s. G \ P s\ f \\rv s. G \ Q rv s\" - by (case_tac G, simp_all add: x hoare_vcg_prop) - -lemma wp_throw_const_impE: - assumes x: "\P\ f \Q\,\E\" - shows "\\s. G \ P s\ f \\rv s. G \ Q rv s\,\\rv s. G \ E rv s\" - apply (case_tac G, simp_all add: x) - apply wp - done - -lemma hoare_const_imp_R: - "\Q\ f \R\,- \ \\s. P \ Q s\ f \\rv s. P \ R rv s\,-" - by (cases P, simp_all) - -lemma hoare_vcg_imp_lift_R: - "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" - by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) - lemma hoare_disj_division: "\ P \ Q; P \ \R\ f \S\; Q \ \T\ f \S\ \ - \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" + \ \\s. (P \ R s) \ (Q \ T s)\ f \S\" apply safe apply (rule hoare_pre_imp) prefer 2 @@ -623,8 +549,8 @@ lemma hoare_grab_asm: by (cases G, simp+) lemma hoare_grab_asm2: - "(P' \ \\s. P s \ R s\ f \Q\) - \ \\s. P s \ P' \ R s\ f \Q\" + "\P' \ \\s. P s \ R s\ f \Q\\ + \ \\s. P s \ P' \ R s\ f \Q\" by (fastforce simp: valid_def) lemma hoare_grab_exs: @@ -639,8 +565,8 @@ lemma hoare_prop_E: "\\rv. P\ f -,\\rv s by (rule hoare_pre, wp, simp) lemma hoare_vcg_conj_lift_R: - "\ \P\ f \Q\,-; \R\ f \S\,- \ \ - \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" + "\ \P\ f \Q\,-; \R\ f \S\,- \ + \ \\s. P s \ R s\ f \\rv s. Q rv s \ S rv s\,-" apply (simp add: validE_R_def validE_def) apply (drule(1) hoare_vcg_conj_lift) apply (erule hoare_strengthen_post) @@ -657,12 +583,12 @@ lemma hoare_walk_assmsE: done lemma univ_wp: - "\\s. \(rv, s') \ fst (f s). Q rv s'\ f \Q\" + "\\s. \(rv, s') \ mres (f s). Q rv s'\ f \Q\" by (simp add: valid_def) lemma univ_get_wp: assumes x: "\P. \P\ f \\rv. P\" - shows "\\s. \(rv, s') \ fst (f s). s = s' \ Q rv s'\ f \Q\" + shows "\\s. \(rv, s') \ mres (f s). s = s' \ Q rv s'\ f \Q\" apply (rule hoare_pre_imp [OF _ univ_wp]) apply clarsimp apply (drule bspec, assumption, simp) @@ -670,16 +596,11 @@ lemma univ_get_wp: apply (simp add: state_unchanged [OF x]) done -lemma result_in_set_wp : - assumes x: "\P. \P\ fn \\rv. P\" - shows "\\s. True\ fn \\v s'. (v, s') \ fst (fn s')\" - by (rule hoare_pre_imp [OF _ univ_get_wp], simp_all add: x split_def) clarsimp - -lemma other_result_in_set_wp: +lemma other_hoare_in_monad_post: assumes x: "\P. \P\ fn \\rv. P\" - shows "\\s. \(v, s) \ fst (fn s). F v = v\ fn \\v s'. (F v, s') \ fst (fn s')\" + shows "\\s. \(v, s) \ mres (fn s). F v = v\ fn \\v s'. (F v, s') \ mres (fn s')\" proof - - have P: "\v s. (F v = v) \ (v, s) \ fst (fn s) \ (F v, s) \ fst (fn s)" + have P: "\v s. (F v = v) \ (v, s) \ mres (fn s) \ (F v, s) \ mres (fn s)" by simp show ?thesis apply (rule hoare_post_imp [OF P], assumption) @@ -687,7 +608,7 @@ lemma other_result_in_set_wp: defer apply (rule hoare_vcg_conj_lift) apply (rule univ_get_wp [OF x]) - apply (rule result_in_set_wp [OF x]) + apply (rule hoare_in_monad_post [OF x]) apply clarsimp apply (drule bspec, assumption, simp) done @@ -695,7 +616,7 @@ lemma other_result_in_set_wp: lemma weak_if_wp: "\ \P\ f \Q\; \P'\ f \Q'\ \ \ - \P and P'\ f \\r. if C r then Q r else Q' r\" + \P and P'\ f \\r. if C r then Q r else Q' r\" by (auto simp add: valid_def split_def) lemma weak_if_wp': @@ -704,12 +625,12 @@ lemma weak_if_wp': by (auto simp add: valid_def split_def) lemma bindE_split_recursive_asm: - assumes x: "\x s'. \ (Inr x, s') \ fst (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" + assumes x: "\x s'. \ (Inr x, s') \ mres (f s) \ \ \\s. B x s \ s = s'\ g x \C\, \E\" shows "\A\ f \B\, \E\ \ \\st. A st \ st = s\ f >>=E g \C\, \E\" - apply (clarsimp simp: validE_def valid_def bindE_def bind_def lift_def) + apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def) apply (erule allE, erule(1) impE) apply (drule(1) bspec, simp) - apply (case_tac a, simp_all add: throwError_def return_def) + apply (case_tac x, simp_all add: in_throwError) apply (drule x) apply (clarsimp simp: validE_def valid_def) apply (drule(1) bspec, simp) @@ -721,7 +642,7 @@ lemma validE_R_abstract_rv: lemma validE_cases_valid: "\P\ f \\rv s. Q (Inr rv) s\,\\rv s. Q (Inl rv) s\ - \ \P\ f \Q\" + \ \P\ f \Q\" apply (simp add: validE_def) apply (erule hoare_strengthen_post) apply (simp split: sum.split_asm) @@ -746,12 +667,10 @@ lemma hoare_gen_asm_conj: "(P \ \P'\ f \Q\) \ \\s. P' s \ P\ f \Q\" by (fastforce simp: valid_def) - lemma hoare_add_K: "\P\ f \Q\ \ \\s. P s \ I\ f \\rv s. Q rv s \ I\" by (fastforce simp: valid_def) - lemma valid_rv_lift: "\P'\ f \\rv s. rv \ Q rv s\ \ \\s. P \ P' s\ f \\rv s. rv \ P \ Q rv s\" by (fastforce simp: valid_def) @@ -762,20 +681,19 @@ lemma valid_imp_ex: lemma valid_rv_split: "\\P\ f \\rv s. rv \ Q s\; \P\ f \\rv s. \rv \ Q' s\\ - \ - \P\ f \\rv s. if rv then Q s else Q' s\" + \ \P\ f \\rv s. if rv then Q s else Q' s\" by (fastforce simp: valid_def) lemma hoare_rv_split: "\\P\ f \\rv s. rv \ (Q rv s)\; \P\ f \\rv s. (\rv) \ (Q rv s)\\ - \ \P\ f \Q\" + \ \P\ f \Q\" apply (clarsimp simp: valid_def) apply (case_tac a, fastforce+) done -lemma combine_validE: "\ \ P \ x \ Q \,\ E \; - \ P' \ x \ Q' \,\ E' \ \ \ - \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" +lemma combine_validE: + "\ \ P \ x \ Q \,\ E \; \ P' \ x \ Q' \,\ E' \ \ + \ \ P and P' \ x \ \r. (Q r) and (Q' r) \,\\r. (E r) and (E' r) \" apply (clarsimp simp: validE_def valid_def split: sum.splits) apply (erule allE, erule (1) impE)+ apply (drule (1) bspec)+ @@ -827,9 +745,9 @@ lemma hoare_name_pre_state2: by (auto simp: valid_def intro: hoare_name_pre_state) lemma returnOk_E': "\P\ returnOk r -,\E\" - by (clarsimp simp: returnOk_def validE_E_def validE_def valid_def return_def) + by wpsimp lemma throwError_R': "\P\ throwError e \Q\,-" - by (clarsimp simp:throwError_def validE_R_def validE_def valid_def return_def) + by wpsimp end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_No_Fail.thy b/lib/Monads/trace/Trace_No_Fail.thy new file mode 100644 index 0000000000..0c0d958edf --- /dev/null +++ b/lib/Monads/trace/Trace_No_Fail.thy @@ -0,0 +1,231 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about the no_fail predicate. *) + +theory Trace_No_Fail + imports + Trace_In_Monad + Trace_VCG + WPSimp +begin + +subsection "Non-Failure" + +text \ + With the failure result, we can formulate non-failure separately from validity. + A monad @{text m} does not fail under precondition @{text P}, if for no start + state that satisfies the precondition it returns a @{term Failed} result. +\ +definition no_fail :: "('s \ bool) \ ('s,'a) tmonad \ bool" where + "no_fail P m \ \s. P s \ Failed \ snd ` (m s)" + + +subsection \@{method wpc} setup\ + +lemma no_fail_pre[wp_pre]: + "\ no_fail P f; \s. Q s \ P s\ \ no_fail Q f" + by (simp add: no_fail_def) + +lemma wpc_helper_no_fail_final: + "no_fail Q f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_fail P f)" + by (clarsimp simp: wpc_helper_def elim!: no_fail_pre) + +wpc_setup "\m. no_fail P m" wpc_helper_no_fail_final + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] no_fail_pre [wp_pre del] + +bundle classic_wp_pre = + hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] + all_classic_wp_combs[wp_comb] + + +subsection \Lemmas\ + +lemma no_failD: + "\ no_fail P m; P s \ \ Failed \ snd ` m s" + by (simp add: no_fail_def) + +lemma no_fail_modify[wp,simp]: + "no_fail \ (modify f)" + by (simp add: no_fail_def modify_def get_def put_def bind_def) + +lemma no_fail_gets_simp[simp]: + "no_fail P (gets f)" + unfolding no_fail_def gets_def get_def return_def bind_def + by simp + +lemma no_fail_gets[wp]: + "no_fail \ (gets f)" + by simp + +lemma no_fail_select[simp]: + "no_fail \ (select S)" + by (simp add: no_fail_def select_def image_def) + +lemma no_fail_alt[wp]: + "\ no_fail P f; no_fail Q g \ \ no_fail (P and Q) (f \ g)" + by (auto simp: no_fail_def alternative_def) + +lemma no_fail_return[simp, wp]: + "no_fail \ (return x)" + by (simp add: return_def no_fail_def) + +lemma no_fail_get[simp, wp]: + "no_fail \ get" + by (simp add: get_def no_fail_def) + +lemma no_fail_put[simp, wp]: + "no_fail \ (put s)" + by (simp add: put_def no_fail_def) + +lemma no_fail_when[wp]: + "(P \ no_fail Q f) \ no_fail (if P then Q else \) (when P f)" + by (simp add: when_def) + +lemma no_fail_unless[wp]: + "(\P \ no_fail Q f) \ no_fail (if P then \ else Q) (unless P f)" + by (simp add: unless_def when_def) + +lemma no_fail_fail[simp, wp]: + "no_fail \ fail" + by (simp add: fail_def no_fail_def) + +lemma no_fail_assert[simp, wp]: + "no_fail (\_. P) (assert P)" + by (simp add: assert_def) + +lemma no_fail_assert_opt[simp, wp]: + "no_fail (\_. P \ None) (assert_opt P)" + by (simp add: assert_opt_def split: option.splits) + +lemma no_fail_case_option[wp]: + assumes f: "no_fail P f" + assumes g: "\x. no_fail (Q x) (g x)" + shows "no_fail (if x = None then P else Q (the x)) (case_option f g x)" + by (clarsimp simp add: f g) + +lemma no_fail_if[wp]: + "\ P \ no_fail Q f; \P \ no_fail R g \ \ no_fail (if P then Q else R) (if P then f else g)" + by simp + +lemma no_fail_apply[wp]: + "no_fail P (f (g x)) \ no_fail P (f $ g x)" + by simp + +lemma no_fail_undefined[simp, wp]: + "no_fail \ undefined" + by (simp add: no_fail_def) + +lemma no_fail_returnOK[simp, wp]: + "no_fail \ (returnOk x)" + by (simp add: returnOk_def) + +lemma no_fail_bind[wp]: + "\ no_fail P f; \x. no_fail (R x) (g x); \Q\ f \R\ \ \ no_fail (P and Q) (f >>= (\rv. g rv))" + apply (simp add: no_fail_def bind_def' image_Un image_image + in_image_constant) + apply (intro allI conjI impI) + apply (fastforce simp: image_def) + apply clarsimp + apply (drule(1) post_by_hoare, erule in_mres) + apply (fastforce simp: image_def) + done + +lemma no_fail_assume_pre: + "(\s. P s \ no_fail P f) \ no_fail P f" + by (simp add: no_fail_def) + +\ \lemma no_fail_liftM_eq[simp]: + "no_fail P (liftM f m) = no_fail P m" + by (auto simp: liftM_def no_fail_def bind_def return_def)\ + +lemma no_fail_liftM[wp]: + "no_fail P m \ no_fail P (liftM f m)" + unfolding liftM_def + by wpsimp + +lemma no_fail_pre_and: + "no_fail P f \ no_fail (P and Q) f" + by (erule no_fail_pre) simp + +lemma no_fail_spec: + "\ \s. no_fail (((=) s) and P) f \ \ no_fail P f" + by (simp add: no_fail_def) + +lemma no_fail_assertE[wp]: + "no_fail (\_. P) (assertE P)" + by (simp add: assertE_def split: if_split) + +lemma no_fail_spec_pre: + "\ no_fail (((=) s) and P') f; \s. P s \ P' s \ \ no_fail (((=) s) and P) f" + by (erule no_fail_pre, simp) + +lemma no_fail_whenE[wp]: + "\ G \ no_fail P f \ \ no_fail (\s. G \ P s) (whenE G f)" + by (simp add: whenE_def split: if_split) + +lemma no_fail_unlessE[wp]: + "\ \ G \ no_fail P f \ \ no_fail (\s. \ G \ P s) (unlessE G f)" + by (simp add: unlessE_def split: if_split) + +lemma no_fail_throwError[wp]: + "no_fail \ (throwError e)" + by (simp add: throwError_def) + +lemma no_fail_liftE[wp]: + "no_fail P f \ no_fail P (liftE f)" + unfolding liftE_def by wpsimp + +lemma no_fail_gets_the[wp]: + "no_fail (\s. f s \ None) (gets_the f)" + unfolding gets_the_def + by wpsimp + +lemma no_fail_lift: + "(\y. x = Inr y \ no_fail P (f y)) \ no_fail (\s. \isl x \ P s) (lift f x)" + unfolding lift_def + by (wpsimp wp: no_fail_throwError split: sum.splits | assumption)+ + +lemma validE_R_valid_eq: + "\Q\ f \R\, - = \Q\ f \\rv s. \ isl rv \ R (projr rv) s\" + unfolding validE_R_def validE_def valid_def + by (fastforce split: sum.splits prod.split) + +lemma no_fail_bindE[wp]: + "\ no_fail P f; \rv. no_fail (R rv) (g rv); \Q\ f \R\,- \ + \ no_fail (P and Q) (f >>=E g)" + unfolding bindE_def + by (wpsimp wp: no_fail_lift simp: validE_R_valid_eq | assumption)+ + +lemma no_fail_False[simp]: + "no_fail (\_. False) X" + by (clarsimp simp: no_fail_def) + +lemma no_fail_gets_map[wp]: + "no_fail (\s. f s p \ None) (gets_map f p)" + unfolding gets_map_def by wpsimp + +lemma no_fail_or: + "\no_fail P a; no_fail Q a\ \ no_fail (P or Q) a" + by (clarsimp simp: no_fail_def) + +lemma no_fail_state_assert[wp]: + "no_fail P (state_assert P)" + unfolding state_assert_def + by wpsimp + +lemma no_fail_condition: + "\no_fail Q A; no_fail R B\ \ no_fail (\s. (C s \ Q s) \ (\ C s \ R s)) (condition C A B)" + unfolding condition_def no_fail_def + by clarsimp + +end diff --git a/lib/Monads/trace/Trace_No_Throw.thy b/lib/Monads/trace/Trace_No_Throw.thy new file mode 100644 index 0000000000..f5174f1ecd --- /dev/null +++ b/lib/Monads/trace/Trace_No_Throw.thy @@ -0,0 +1,102 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Lemmas about no_throw. Usually should have a conclusion "no_throw P m". + Includes some monad equations that have no_throw as a main assumption. *) + +theory Trace_No_Throw + imports + Trace_VCG +begin + +section "Basic exception reasoning" + +text \ + The predicates @{text no_throw} and @{text no_return} allow us to reason about functions in + the exception monad that never throw an exception or never return normally.\ + +definition no_throw :: "('s \ bool) \ ('s, 'e + 'a) tmonad \ bool" where + "no_throw P A \ \ P \ A \ \_ _. True \,\ \_ _. False \" + +definition no_return :: "('a \ bool) \ ('a, 'b + 'c) tmonad \ bool" where + "no_return P A \ \ P \ A \\_ _. False\,\\_ _. True \" + +(* Alternative definition of no_throw; easier to work with than unfolding validE. *) +lemma no_throw_def': + "no_throw P A = (\s. P s \ (\(r, t) \ mres (A s). (\x. r = Inr x)))" + by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits) + + +subsection \no_throw rules\ + +lemma no_throw_returnOk[simp]: + "no_throw P (returnOk a)" + unfolding no_throw_def + by wp + +lemma no_throw_liftE[simp]: + "no_throw P (liftE x)" + by (wpsimp simp: liftE_def no_throw_def validE_def) + +lemma no_throw_bindE: + "\ no_throw A X; \a. no_throw B (Y a); \ A \ X \ \_. B \,\ \_ _. True \ \ + \ no_throw A (X >>=E Y)" + unfolding no_throw_def + using hoare_validE_cases seqE by blast + +lemma no_throw_bindE_simple: + "\ no_throw \ L; \x. no_throw \ (R x) \ \ no_throw \ (L >>=E R)" + using hoareE_TrueI no_throw_bindE by blast + +lemma no_throw_handleE_simple: + "\ \x. no_throw \ L \ no_throw \ (R x) \ \ no_throw \ (L R)" + by (fastforce simp: no_throw_def' handleE_def handleE'_def validE_def valid_def bind_def return_def + mres_def image_def + split: sum.splits tmres.splits) + +lemma no_throw_handle2: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + by (fastforce simp: no_throw_def' handleE'_def validE_def valid_def bind_def return_def mres_def + image_def + split: sum.splits tmres.splits) + +lemma no_throw_handle: + "\ \a. no_throw Y (B a); \ X \ A \ \_ _. True \,\ \_. Y \ \ \ no_throw X (A B)" + unfolding handleE_def + by (rule no_throw_handle2) + +lemma no_throw_fail[simp]: + "no_throw P fail" + by (clarsimp simp: no_throw_def) + +lemma handleE'_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + unfolding no_throw_def + using handleE'_wp[rotated] by fastforce + +lemma handleE'_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + unfolding no_throw_def + by (metis hoareE_TrueI no_throw_def no_throw_handle2) + +lemma handleE_nothrow_lhs: + "no_throw \ L \ no_throw \ (L R)" + by (metis handleE'_nothrow_lhs handleE_def) + +lemma handleE_nothrow_rhs: + "\ \x. no_throw \ (R x) \ \ no_throw \ (L R)" + by (metis no_throw_handleE_simple) + +lemma condition_nothrow: + "\ no_throw \ L; no_throw \ R \ \ no_throw \ (condition C L R)" + by (clarsimp simp: condition_def no_throw_def validE_def2) + +lemma no_throw_Inr: + "\ x \ mres (A s); no_throw P A; P s \ \ \y. fst x = Inr y" + by (fastforce simp: no_throw_def' split: sum.splits) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_No_Trace.thy b/lib/Monads/trace/Trace_No_Trace.thy new file mode 100644 index 0000000000..5b59c87d72 --- /dev/null +++ b/lib/Monads/trace/Trace_No_Trace.thy @@ -0,0 +1,77 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_No_Trace + imports + Trace_Monad + WPSimp +begin + +subsection "No Trace" + +text \ + A monad of type @{text tmonad} does not have a trace iff for all starting + states, all of the potential outcomes have the empty list as a trace and do + not return an @{term Incomplete} result.\ +definition no_trace :: "('s,'a) tmonad \ bool" where + "no_trace f = (\tr res s. (tr, res) \ f s \ tr = [] \ res \ Incomplete)" + +lemmas no_traceD = no_trace_def[THEN iffD1, rule_format] + +lemma no_trace_emp: + "\no_trace f; (tr, r) \ f s\ \ tr = []" + by (simp add: no_traceD) + +lemma no_trace_Incomplete_mem: + "no_trace f \ (tr, Incomplete) \ f s" + by (auto dest: no_traceD) + +lemma no_trace_Incomplete_eq: + "\no_trace f; (tr, res) \ f s\ \ res \ Incomplete" + by (auto dest: no_traceD) + + +subsection \Set up for @{method wp}\ + +lemma no_trace_is_triple[wp_trip]: + "no_trace f = triple_judgement \ f (\() f. id no_trace f)" + by (simp add: triple_judgement_def split: unit.split) + + +subsection \Rules\ + +lemma no_trace_prim: + "no_trace get" + "no_trace (put s)" + "no_trace (modify f)" + "no_trace (return v)" + "no_trace fail" + by (simp_all add: no_trace_def get_def put_def modify_def bind_def + return_def select_def fail_def) + +lemma no_trace_select: + "no_trace (select S)" + by (clarsimp simp add: no_trace_def select_def) + +lemma no_trace_bind: + "no_trace f \ \rv. no_trace (g rv) + \ no_trace (do rv \ f; g rv od)" + apply (subst no_trace_def) + apply (clarsimp simp add: bind_def split: tmres.split_asm; + auto dest: no_traceD[rotated]) + done + +lemma no_trace_extra: + "no_trace (gets f)" + "no_trace (assert P)" + "no_trace (assert_opt Q)" + by (simp_all add: gets_def assert_def assert_opt_def no_trace_bind no_trace_prim + split: option.split) + +lemmas no_trace_all[wp, iff] = no_trace_prim no_trace_select no_trace_extra + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_RG.thy b/lib/Monads/trace/Trace_RG.thy new file mode 100644 index 0000000000..3dec5f51b0 --- /dev/null +++ b/lib/Monads/trace/Trace_RG.thy @@ -0,0 +1,761 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_RG + imports + Trace_VCG + Trace_Monad_Equations + Trace_No_Trace +begin + +section \Rely-Guarantee Logic\ + +subsection \Validity\ + +text \ + This section defines a Rely_Guarantee logic for partial correctness for + the interference trace monad. + + The logic is defined semantically. Rules work directly on the + validity predicate. + + In the interference trace monad, RG validity is a quintuple of precondition, + rely, monad, guarantee, and postcondition. The precondition is a function + from initial state to current state to bool (a state predicate), the rely and + guarantee are functions from state before to state after to bool, and the + postcondition is a function from return value to last state in the trace to + final state to bool. A quintuple is valid if for all initial and current + states that satisfy the precondition and all traces that satisfy the rely, + all of the pssible self steps performed by the monad satisfy the guarantee + and all of the result values and result states that are returned by the monad + satisfy the postcondition. Note that if the computation returns an empty + trace and no successful results then the quintuple is trivially valid. This + means @{term "assert P"} does not require us to prove that @{term P} holds, + but rather allows us to assume @{term P}! Proving non-failure is done via a + separate predicate and calculus (see Trace_No_Fail).\ + +primrec trace_steps :: "(tmid \ 's) list \ 's \ (tmid \ 's \ 's) set" where + "trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \ trace_steps trace (snd elem)" + | "trace_steps [] s0 = {}" + +lemma trace_steps_nth: + "trace_steps xs s0 = (\i. (fst (xs ! i), (if i = 0 then s0 else snd (xs ! (i - 1))), snd (xs ! i))) ` {..< length xs}" +proof (induct xs arbitrary: s0) + case Nil + show ?case by simp +next + case (Cons a xs) + show ?case + apply (simp add: lessThan_Suc_eq_insert_0 Cons image_image nth_Cons') + apply (intro arg_cong2[where f=insert] refl image_cong) + apply simp + done +qed + +text \rg_pred type: Rely-Guaranty predicates (state before => state after => bool)\ +type_synonym 's rg_pred = "'s \ 's \ bool" + +text \Abbreviations for trivial postconditions (taking three arguments):\ +abbreviation(input) + toptoptop :: "'a \ 'b \ 'b \ bool" ("\\\") where + "\\\ \ \_ _ _. True" + +abbreviation(input) + botbotbot :: "'a \ 'b \ 'b \ bool" ("\\\") where + "\\\ \ \_ _ _. False" + +text \ + Test whether the environment steps in @{text tr} satisfy the rely condition @{text R}, + assuming that @{text s0s} was the initial state before the first step in the trace.\ +definition rely_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" where + "rely_cond R s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Env \ R s0 s)" + +text \ + Test whether the self steps in @{text tr} satisfy the guarantee condition @{text G}, + assuming that @{text s0s} was the initial state before the first step in the trace.\ +definition guar_cond :: "'s rg_pred \ 's \ (tmid \ 's) list \ bool" where + "guar_cond G s0s tr = (\(ident, s0, s) \ trace_steps (rev tr) s0s. ident = Me \ G s0 s)" + +lemma rg_empty_conds[simp]: + "rely_cond R s0s []" + "guar_cond G s0s []" + by (simp_all add: rely_cond_def guar_cond_def) + +text \ + @{text "rely f R s0s"} constructs a new function from @{text f}, where the environment + steps are constrained by @{text R} and @{text s0s} was the initial state before the first + step in the trace.\ +definition rely :: "('s, 'a) tmonad \ 's rg_pred \ 's \ ('s, 'a) tmonad" where + "rely f R s0s \ (\s. f s \ ({tr. rely_cond R s0s tr} \ UNIV))" + +definition prefix_closed :: "('s, 'a) tmonad \ bool" where + "prefix_closed f = (\s. \x xs. (x # xs) \ fst ` f s \ (xs, Incomplete) \ f s)" + +definition validI :: + "('s \ 's \ bool) \ 's rg_pred \ ('s,'a) tmonad \ 's rg_pred \ ('a \ 's \ 's \ bool) \ bool" + ("(\_\,/ \_\)/ _ /(\_\,/ \_\)") where + "\P\,\R\ f \G\,\Q\ \ + prefix_closed f + \ (\s0 s tr res. P s0 s \ (tr, res) \ (rely f R s0 s) + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s'))" + +(* +text \Validity for exception monad with interferences. Not as easy to phrase + as we need to \ +definition validIE :: "('s, 'a + 'b) tmonad \ + 's rg_pred \ + 's rg_pred \ 's rg_pred \ + ('b \ 's rg_pred) \ + ('a \ 's rg_pred) \ bool" + ("_ //PRE _//RELY _//GUAR _//POST _//EXC _" [59,0,0,0,0,0] 60) where + "validIE f P R G Q E \ f SAT [P,R,G,\v. case v of Inr r \ Q r | Inl e \ E e]" + +abbreviation (input) + validIEsat :: "('s, 'a + 'b) tmonad \ + 's rg_pred \ + 's rg_pred \ 's rg_pred \ + ('b \ 's rg_pred) \ + ('a \ 's rg_pred) \ bool" + ("_ //SAT [_, _, _, _, _]" [59,0,0,0,0,0] 60) + where + "validIEsat f P R G Q E \ validIE f P R G Q E" + *) + +lemma in_rely: + "\ (tr, res) \ f s; rely_cond R s0s tr \ \ (tr, res) \ rely f R s0s s" + by (simp add: rely_def) + +lemmas validI_D = + validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, rule_format, + OF _ conjI, OF _ _ in_rely] +lemmas validI_GD = validI_D[THEN conjunct1] +lemmas validI_rvD = validI_D[THEN conjunct2, rule_format, rotated -1, OF refl] +lemmas validI_prefix_closed = validI_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct1] +lemmas validI_prefix_closed_T = + validI_prefix_closed[where P="\_ _. False" and R="\_ _. False" and G="\_ _. True" + and Q="\_ _ _. True"] + +lemmas prefix_closedD1 = prefix_closed_def[THEN iffD1, rule_format] + +lemma in_fst_snd_image_eq: + "x \ fst ` S = (\y. (x, y) \ S)" + "y \ snd ` S = (\x. (x, y) \ S)" + by (auto elim: image_eqI[rotated]) + +lemma in_fst_snd_image: + "(x, y) \ S \ x \ fst ` S" + "(x, y) \ S \ y \ snd ` S" + by (auto simp: in_fst_snd_image_eq) + +lemmas prefix_closedD = prefix_closedD1[OF _ in_fst_snd_image(1)] + + +section \Lemmas\ + +lemma validI_weaken_pre: + "\\P'\,\R\ f \G\,\Q\; \s0 s. P s0 s \ P' s0 s\ + \ \P\,\R\ f \G\,\Q\" + by (simp add: validI_def, blast) + +lemma rely_weaken: + "\\s0 s. R s0 s \ R' s0 s; (tr, res) \ rely f R s s0\ + \ (tr, res) \ rely f R' s s0" + by (simp add: rely_def rely_cond_def, blast) + +lemma validI_weaken_rely: + "\\P\,\R'\ f \G\,\Q\; \s0 s. R s0 s \ R' s0 s\ + \ \P\,\R\ f \G\,\Q\" + apply (simp add: validI_def) + by (metis rely_weaken) + +lemmas validI_pre[wp_pre] = + validI_weaken_pre + validI_weaken_rely + +lemma validI_strengthen_post: + "\\P\,\R\ f \G\,\Q'\; \v s0 s. Q' v s0 s \ Q v s0 s\ + \ \P\,\R\ f \G\,\Q\" + by (simp add: validI_def) + +lemma validI_strengthen_guar: + "\\P\, \R\ f \G'\, \Q\; \s0 s. G' s0 s \ G s0 s\ + \ \P\, \R\ f \G\, \Q\" + by (force simp: validI_def guar_cond_def) + + +subsection \Setting up the precondition case splitter.\ + +(* FIXME: this needs adjustment, case_prod Q is unlikely to unify *) +lemma wpc_helper_validI: + "(\Q\,\R\ g \G\,\S\) \ wpc_helper (P, P', P'') (case_prod Q, Q', Q'') (\curry P\,\R\ g \G\,\S\)" + by (clarsimp simp: wpc_helper_def elim!: validI_weaken_pre) + +wpc_setup "\m. \P\,\R\ m \G\,\S\" wpc_helper_validI + + +subsection \RG Logic Rules\ + +lemma trace_steps_append: + "trace_steps (xs @ ys) s + = trace_steps xs s \ trace_steps ys (last_st_tr (rev xs) s)" + by (induct xs arbitrary: s, simp_all add: last_st_tr_def hd_append) + +lemma rely_cond_append: + "rely_cond R s (xs @ ys) = (rely_cond R s ys \ rely_cond R (last_st_tr ys s) xs)" + by (simp add: rely_cond_def trace_steps_append ball_Un conj_comms) + +lemma guar_cond_append: + "guar_cond G s (xs @ ys) = (guar_cond G s ys \ guar_cond G (last_st_tr ys s) xs)" + by (simp add: guar_cond_def trace_steps_append ball_Un conj_comms) + +lemma prefix_closed_bind: + "\prefix_closed f; \x. prefix_closed (g x)\ \ prefix_closed (f >>= g)" + apply (subst prefix_closed_def, clarsimp simp: bind_def) + apply (auto simp: Cons_eq_append_conv split: tmres.split_asm + dest!: prefix_closedD[rotated]; + fastforce elim: rev_bexI) + done + +lemmas prefix_closed_bind[rule_format, wp_split] + +lemma last_st_tr_append[simp]: + "last_st_tr (tr @ tr') s = last_st_tr tr (last_st_tr tr' s)" + by (simp add: last_st_tr_def hd_append) + +lemma last_st_tr_Nil[simp]: + "last_st_tr [] s = s" + by (simp add: last_st_tr_def) + +lemma last_st_tr_Cons[simp]: + "last_st_tr (x # xs) s = snd x" + by (simp add: last_st_tr_def) + +lemma no_trace_last_st_tr: + "\no_trace f; (tr, res) \ f s\ \ last_st_tr tr s0 = s0" + by (fastforce simp: no_trace_def) + +lemma no_trace_rely_cond: + "\no_trace f; (tr, res) \ f s\ \ rely_cond R s0 tr" + by (fastforce simp: no_trace_def rely_cond_def) + +lemma bind_twp[wp_split]: + "\ \r. \Q' r\,\R\ g r \G\,\Q\; \P\,\R\ f \G\,\Q'\ \ + \ \P\,\R\ f >>= (\r. g r) \G\,\Q\" + apply (subst validI_def, rule conjI) + apply (blast intro: prefix_closed_bind validI_prefix_closed) + apply (clarsimp simp: bind_def rely_def) + apply (drule(2) validI_D) + apply (clarsimp simp: rely_cond_append split: tmres.split_asm) + apply (clarsimp split: tmres.split_asm) + apply (drule meta_spec, frule(2) validI_D) + apply (clarsimp simp: rely_cond_append split: if_split_asm) + apply (clarsimp simp: guar_cond_append) + done + +lemma trace_steps_rev_drop_nth: + "trace_steps (rev (drop n tr)) s + = (\i. (fst (rev tr ! i), (if i = 0 then s else snd (rev tr ! (i - 1))), + snd (rev tr ! i))) ` {..< length tr - n}" + apply (simp add: trace_steps_nth) + apply (intro image_cong refl) + apply (simp add: rev_nth) + done + +lemma prefix_closed_drop: + "\(tr, res) \ f s; prefix_closed f\ \ \res'. (drop n tr, res') \ f s" +proof (induct n arbitrary: res) + case 0 then show ?case by auto +next + case (Suc n) + have drop_1: "\tr res. (tr, res) \ f s \ \res'. (drop 1 tr, res') \ f s" + by (case_tac tr; fastforce dest: prefix_closedD[rotated] intro: Suc) + show ?case + using Suc.hyps[OF Suc.prems] + by (metis drop_1[simplified] drop_drop add_0 add_Suc) +qed + +lemma validI_GD_drop: + "\ \P\, \R\ f \G\, \Q\; P s0 s; (tr, res) \ f s; + rely_cond R s0 (drop n tr) \ + \ guar_cond G s0 (drop n tr)" + apply (drule prefix_closed_drop[where n=n], erule validI_prefix_closed) + apply (auto dest: validI_GD) + done + +lemma parallel_prefix_closed[wp_split]: + "\prefix_closed f; prefix_closed g\ + \ prefix_closed (parallel f g)" + apply (subst prefix_closed_def, clarsimp simp: parallel_def) + apply (case_tac f_steps; clarsimp) + apply (drule(1) prefix_closedD)+ + apply fastforce + done + +lemma rely_cond_drop: + "rely_cond R s0 xs \ rely_cond R s0 (drop n xs)" + using rely_cond_append[where xs="take n xs" and ys="drop n xs"] + by simp + +lemma rely_cond_is_drop: + "\rely_cond R s0 xs; (ys = drop (length xs - length ys) xs)\ + \ rely_cond R s0 ys" + by (metis rely_cond_drop) + +lemma bounded_rev_nat_induct: + "\(\n. N \ n \ P n); \n. \n < N; P (Suc n)\ \ P n\ + \ P n" + by (induct diff\"N - n" arbitrary: n; simp) + +lemma drop_n_induct: + "\P []; \n. \n < length xs; P (drop (Suc n) xs)\ \ P (drop n xs)\ + \ P (drop n xs)" + by (induct len\"length (drop n xs)" arbitrary: n xs; simp) + +lemma guar_cond_Cons_eq: + "guar_cond R s0 (x # xs) + = (guar_cond R s0 xs \ (fst x \ Env \ R (last_st_tr xs s0) (snd x)))" + by (cases "fst x"; simp add: guar_cond_def trace_steps_append conj_comms) + +lemma guar_cond_Cons: + "\guar_cond R s0 xs; fst x \ Env \ R (last_st_tr xs s0) (snd x)\ + \ guar_cond R s0 (x # xs)" + by (simp add: guar_cond_Cons_eq) + +lemma guar_cond_drop_Suc_eq: + "n < length xs + \ guar_cond R s0 (drop n xs) = (guar_cond R s0 (drop (Suc n) xs) + \ (fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" + apply (rule trans[OF _ guar_cond_Cons_eq]) + apply (simp add: Cons_nth_drop_Suc) + done + +lemma guar_cond_drop_Suc: + "\guar_cond R s0 (drop (Suc n) xs); + fst (xs ! n) \ Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\ + \ guar_cond R s0 (drop n xs)" + by (case_tac "n < length xs"; simp add: guar_cond_drop_Suc_eq) + +lemma rely_cond_Cons_eq: + "rely_cond R s0 (x # xs) + = (rely_cond R s0 xs \ (fst x = Env \ R (last_st_tr xs s0) (snd x)))" + by (simp add: rely_cond_def trace_steps_append conj_comms) + +lemma rely_cond_Cons: + "\rely_cond R s0 xs; fst x = Env \ R (last_st_tr xs s0) (snd x)\ + \ rely_cond R s0 (x # xs)" + by (simp add: rely_cond_Cons_eq) + +lemma rely_cond_drop_Suc_eq: + "n < length xs + \ rely_cond R s0 (drop n xs) = (rely_cond R s0 (drop (Suc n) xs) + \ (fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))))" + apply (rule trans[OF _ rely_cond_Cons_eq]) + apply (simp add: Cons_nth_drop_Suc) + done + +lemma rely_cond_drop_Suc: + "\rely_cond R s0 (drop (Suc n) xs); + fst (xs ! n) = Env \ R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\ + \ rely_cond R s0 (drop n xs)" + by (cases "n < length xs"; simp add: rely_cond_drop_Suc_eq) + +lemma last_st_tr_map_zip_hd: + "\length flags = length tr; tr \ [] \ snd (f (hd flags, hd tr)) = snd (hd tr)\ + \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" + apply (cases tr; simp) + apply (cases flags; simp) + apply (simp add: fun_eq_iff) + done + +lemma last_st_tr_drop_map_zip_hd: + "\length flags = length tr; + n < length tr \ snd (f (flags ! n, tr ! n)) = snd (tr ! n)\ + \ last_st_tr (drop n (map f (zip flags tr))) = last_st_tr (drop n tr)" + apply (simp add: drop_map drop_zip) + apply (rule last_st_tr_map_zip_hd; clarsimp) + apply (simp add: hd_drop_conv_nth) + done + +lemma last_st_tr_map_zip: + "\length flags = length tr; \fl tmid s. snd (f (fl, (tmid, s))) = s\ + \ last_st_tr (map f (zip flags tr)) = last_st_tr tr" + apply (erule last_st_tr_map_zip_hd) + apply (clarsimp simp: neq_Nil_conv) + done + +lemma parallel_rely_induct: + assumes preds: "(tr, v) \ parallel f g s" "Pf s0 s" "Pg s0 s" + and validI: "\Pf\,\Rf\ f' \Gf\,\Qf\" + "\Pg\,\Rg\ g' \Gg\,\Qg\" + "f s \ f' s" "g s \ g' s" + and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" "Gf \ Rg" "Gg \ Rf" + and rely: "rely_cond R s0 (drop n tr)" + shows + "\tr_f tr_g. (tr_f, v) \ f s \ (tr_g, v) \ g s + \ rely_cond Rf s0 (drop n tr_f) \ rely_cond Rg s0 (drop n tr_g) + \ map snd tr_f = map snd tr \ map snd tr_g = map snd tr + \ (\i \ length tr. last_st_tr (drop i tr_g) s0 = last_st_tr (drop i tr_f) s0) + \ guar_cond G s0 (drop n tr)" + (is "\ys zs. _ \ _ \ ?concl ys zs") +proof - + obtain ys zs where tr: "tr = map parallel_mrg (zip ys zs)" + and all2: "list_all2 (\y z. (fst y = Env \ fst z = Env) \ snd y = snd z) ys zs" + and ys: "(ys, v) \ f s" and zs: "(zs, v) \ g s" + using preds + by (clarsimp simp: parallel_def2) + note len[simp] = list_all2_lengthD[OF all2] + + have ys': "(ys, v) \ f' s" and zs': "(zs, v) \ g' s" + using ys zs validI by auto + + note rely_f_step = validI_GD_drop[OF validI(1) preds(2) ys' rely_cond_drop_Suc] + note rely_g_step = validI_GD_drop[OF validI(2) preds(3) zs' rely_cond_drop_Suc] + + note snd[simp] = list_all2_nthD[OF all2, THEN conjunct2] + + have "?concl ys zs" + using rely tr all2 rely_f_step rely_g_step + apply (induct n rule: bounded_rev_nat_induct) + apply (subst drop_all, assumption) + apply clarsimp + apply (simp add: list_all2_conv_all_nth last_st_tr_def drop_map[symmetric] + hd_drop_conv_nth hd_append) + apply (fastforce simp: split_def intro!: nth_equalityI) + apply clarsimp + apply (erule_tac x=n in meta_allE)+ + apply (drule meta_mp, erule rely_cond_is_drop, simp) + apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp) + apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\x. x = Env"] + split_def) + apply (intro conjI; (rule guar_cond_drop_Suc rely_cond_drop_Suc, assumption)) + apply (auto simp: guar_cond_drop_Suc_eq last_st_tr_drop_map_zip_hd + intro: compat[THEN predicate2D]) + done + + thus ?thesis + using ys zs + by auto +qed + +lemmas parallel_rely_induct0 = parallel_rely_induct[where n=0, simplified] + +lemma rg_validI: + assumes validI: "\Pf\,\Rf\ f \Gf\,\Qf\" + "\Pg\,\Rg\ g \Gg\,\Qg\" + and compat: "R \ Rf" "R \ Rg" "Gf \ G" "Gg \ G" "Gf \ Rg" "Gg \ Rf" + shows "\Pf and Pg\,\R\ parallel f g \G\,\\rv. Qf rv and Qg rv\" + apply (clarsimp simp: validI_def rely_def pred_conj_def + parallel_prefix_closed validI[THEN validI_prefix_closed]) + apply (drule(3) parallel_rely_induct0[OF _ _ _ validI order_refl order_refl compat]) + apply clarsimp + apply (drule(2) validI[THEN validI_rvD])+ + apply (simp add: last_st_tr_def) + done + +lemma rely_prim[simp]: + "rely (\s. insert (v s) (f s)) R s0 = (\s. {x. x = v s \ rely_cond R s0 (fst x)} \ (rely f R s0 s))" + "rely (\s. {}) R s0 = (\_. {})" + by (auto simp: rely_def prod_eq_iff) + +lemma prefix_closed_put_trace_elem[iff]: + "prefix_closed (put_trace_elem x)" + by (clarsimp simp: prefix_closed_def put_trace_elem_def) + +lemma prefix_closed_return[iff]: + "prefix_closed (return x)" + by (simp add: prefix_closed_def return_def) + +lemma prefix_closed_put_trace[iff]: + "prefix_closed (put_trace tr)" + by (induct tr; clarsimp simp: prefix_closed_bind) + +lemma put_trace_eq_drop: + "put_trace xs s + = ((\n. (drop n xs, if n = 0 then Result ((), s) else Incomplete)) ` {.. length xs})" + apply (induct xs) + apply (clarsimp simp: return_def) + apply (clarsimp simp: put_trace_elem_def bind_def) + apply (simp add: atMost_Suc_eq_insert_0 image_image) + apply (rule equalityI; clarsimp) + apply (split if_split_asm; clarsimp) + apply (auto intro: image_eqI[where x=0])[1] + apply (rule rev_bexI, simp) + apply clarsimp + done + +lemma put_trace_res: + "(tr, res) \ put_trace xs s + \ \n. tr = drop n xs \ n \ length xs + \ res = (case n of 0 \ Result ((), s) | _ \ Incomplete)" + apply (clarsimp simp: put_trace_eq_drop) + apply (case_tac n; auto intro: exI[where x=0]) + done + +lemma put_trace_twp[wp]: + "\\s0 s. (\n. rely_cond R s0 (drop n xs) \ guar_cond G s0 (drop n xs)) + \ (rely_cond R s0 xs \ Q () (last_st_tr xs s0) s)\,\R\ + put_trace xs + \G\,\Q\" + apply (clarsimp simp: validI_def rely_def) + apply (drule put_trace_res) + apply (clarsimp; clarsimp split: nat.split_asm) + done + +lemmas put_trace_elem_twp = put_trace_twp[where xs="[x]" for x, simplified] + +lemma prefix_closed_select[iff]: + "prefix_closed (select S)" + by (simp add: prefix_closed_def select_def image_def) + +lemma rely_cond_rtranclp: + "rely_cond R s (map (Pair Env) xs) \ rtranclp R s (last_st_tr (map (Pair Env) xs) s)" + apply (induct xs arbitrary: s rule: rev_induct) + apply simp + apply (clarsimp simp add: rely_cond_def) + apply (erule converse_rtranclp_into_rtranclp) + apply simp + done + + +subsection \Setting up the @{method wp} method\ + +(* Attempt to define triple_judgement to use valid_validI_wp as wp_comb rule. + It doesn't work. It seems that wp_comb rules cannot take more than 1 assumption *) +lemma validI_is_triple[wp_trip]: + "\P\,\R\ f \G\,\Q\ + = triple_judgement (\(s0, s). prefix_closed f \ P s0 s) f + (\(s0,s) f. prefix_closed f \ (\tr res. (tr, res) \ rely f R s0 s + \ guar_cond G s0 tr + \ (\rv s'. res = Result (rv, s') \ Q rv (last_st_tr tr s0) s')))" + apply (simp add: triple_judgement_def validI_def ) + apply (cases "prefix_closed f"; fastforce) + done + +lemma no_trace_prefix_closed: + "no_trace f \ prefix_closed f" + by (auto simp add: prefix_closed_def dest: no_trace_emp) + +lemma validI_valid_no_trace_eq: + "no_trace f \ \P\,\R\ f \G\,\Q\ = (\s0. \P s0\ f \\v. Q v s0\)" + apply (rule iffI) + apply (fastforce simp: rely_def validI_def valid_def mres_def + dest: no_trace_emp) + apply (clarsimp simp: rely_def validI_def valid_def mres_def no_trace_prefix_closed) + apply (fastforce simp: eq_snd_iff dest: no_trace_emp) + done + +lemma valid_validI_wp[wp_comb]: + "\no_trace f; \s0. \P s0\ f \\v. Q v s0 \\ + \ \P\,\R\ f \G\,\Q\" + by (clarsimp simp: validI_valid_no_trace_eq) + + +lemma env_steps_twp[wp]: + "\\s0 s. (\s'. R\<^sup>*\<^sup>* s0 s' \ Q () s' s') \ Q () s0 s\,\R\ env_steps \G\,\Q\" + apply (simp add: interference_def env_steps_def) + apply wp + apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) + apply (drule rely_cond_rtranclp) + apply (clarsimp simp add: last_st_tr_def hd_append) + done + +lemma interference_twp[wp]: + "\\s0 s. (\s'. R\<^sup>*\<^sup>* s s' \ Q () s' s') \ G s0 s\,\R\ interference \G\,\Q\" + apply (simp add: interference_def commit_step_def del: put_trace.simps) + apply wp + apply clarsimp + apply (simp add: drop_Cons nat.split rely_cond_def guar_cond_def) + done + +(* what Await does if we haven't committed our step is a little + strange. this assumes we have, which means s0 = s. we should + revisit this if we find a use for Await when this isn't the + case *) +lemma Await_sync_twp: + "\\s0 s. s = s0 \ (\x. R\<^sup>*\<^sup>* s0 x \ c x \ Q () x x)\,\R\ Await c \G\,\Q\" + apply (simp add: Await_def split_def) + apply wp + apply clarsimp + apply (clarsimp simp: guar_cond_def trace_steps_rev_drop_nth rev_nth) + apply (drule rely_cond_rtranclp) + apply (simp add: o_def) + done + +lemma mres_union: + "mres (a \ b) = mres a \ mres b" + by (simp add: mres_def image_Un) + +lemma mres_Failed_empty: + "mres ((\xs. (xs, Failed)) ` X ) = {}" + "mres ((\xs. (xs, Incomplete)) ` X ) = {}" + by (auto simp add: mres_def image_def) + +lemma det_set_option_eq: + "(\a\m. set_option (snd a)) = {(r, s')} \ + (ts, Some (rr, ss)) \ m \ rr = r \ ss = s'" + by (metis UN_I option.set_intros prod.inject singleton_iff snd_conv) + +lemma det_set_option_eq': + "(\a\m. set_option (snd a)) = {(r, s')} \ + Some (r, s') \ snd ` m" + using image_iff by fastforce + +lemma validI_name_pre: + "prefix_closed f \ + (\s0 s. P s0 s \ \\s0' s'. s0' = s0 \ s' = s\,\R\ f \G\,\Q\) + \ \P\,\R\ f \G\,\Q\" + unfolding validI_def + by metis + +lemma validI_well_behaved': + "\prefix_closed f; \P\,\R'\ f \G'\,\Q\; R \ R'; G' \ G\ + \ \P\,\R\ f \G\,\Q\" + apply (subst validI_def, clarsimp) + apply (clarsimp simp add: rely_def) + apply (drule (2) validI_D) + apply (fastforce simp: rely_cond_def guar_cond_def)+ + done + +lemmas validI_well_behaved = validI_well_behaved'[unfolded le_fun_def, simplified] + +lemma prefix_closed_mapM[rule_format, wp_split]: + "(\x \ set xs. prefix_closed (f x)) \ prefix_closed (mapM f xs)" + apply (induct xs) + apply (simp add: mapM_def sequence_def) + apply (clarsimp simp: mapM_Cons) + apply (intro prefix_closed_bind allI; clarsimp) + done + +lemmas bind_promote_If = + if_distrib[where f="\f. bind f g" for g] + if_distrib[where f="\g. bind f g" for f] + +lemma bind_promote_If2: + "do x \ f; if P then g x else h x od + = (if P then bind f g else bind f h)" + by simp + +lemma exec_put_trace[unfolded K_bind_def]: + "(do put_trace xs; f od) s + = (\n \ {n. 0 < n \ n \ length xs}. {(drop n xs, Incomplete)}) + \ ((\(a, b). (a @ xs, b)) ` f s)" + apply (simp add: put_trace_eq_drop bind_def) + apply (safe; (clarsimp split: if_split_asm)?; + fastforce intro: bexI[where x=0] rev_bexI) + done + +lemma UN_If_distrib: + "(\x \ S. if P x then A x else B x) + = ((\x \ S \ {x. P x}. A x) \ (\x \ S \ {x. \ P x}. B x))" + by (fastforce split: if_split_asm) + +lemma Await_redef: + "Await P = do + s1 \ select {s. P s}; + env_steps; + s \ get; + select (if P s then {()} else {}) + od" + apply (simp add: Await_def env_steps_def bind_assoc) + apply (cases "{s. P s} = {}") + apply (simp add: select_def bind_def get_def) + apply (rule ext) + apply (simp add: exec_get select_bind_UN put_then_get_then) + apply (simp add: bind_promote_If2 if_distribR if_distrib[where f=select]) + apply (simp add: exec_put_trace cong: if_cong) + apply (simp add: put_def bind_def select_def cong: if_cong) + apply (strengthen equalityI) + apply clarsimp + apply (strengthen exI[where x="s # xs" for s xs]) + apply (strengthen exI[where x="Suc n" for n]) + apply simp + apply blast + done + +lemma eq_Me_neq_Env: + "(x = Me) = (x \ Env)" + by (cases x; simp) + +lemma validI_invariant_imp: + assumes v: "\P\,\R\ f \G\,\Q\" + and P: "\s0 s. P s0 s \ I s0" + and R: "\s0 s. I s0 \ R s0 s \ I s" + and G: "\s0 s. I s0 \ G s0 s \ I s" + shows "\P\,\R\ f \\s0 s. I s0 \ I s \ G s0 s\,\\rv s0 s. I s0 \ Q rv s0 s\" +proof - + { fix tr s0 i + assume r: "rely_cond R s0 tr" and g: "guar_cond G s0 tr" + and I: "I s0" + hence imp: "\(_, s, s') \ trace_steps (rev tr) s0. I s \ I s'" + apply (clarsimp simp: guar_cond_def rely_cond_def) + apply (drule(1) bspec)+ + apply (clarsimp simp: eq_Me_neq_Env) + apply (metis R G) + done + hence "i < length tr \ I (snd (rev tr ! i))" + using I + apply (induct i) + apply (clarsimp simp: neq_Nil_conv[where xs="rev tr" for tr, simplified]) + apply clarsimp + apply (drule bspec, fastforce simp add: trace_steps_nth) + apply simp + done + } + note I = this + show ?thesis + using v + apply (clarsimp simp: validI_def rely_def imp_conjL) + apply (drule spec2, drule(1) mp)+ + apply clarsimp + apply (frule P[rule_format]) + apply (clarsimp simp: guar_cond_def trace_steps_nth I last_st_tr_def + hd_append last_rev[symmetric] + last_conv_nth rev_map) + done +qed + +lemma validI_guar_post_conj_lift: + "\\P\,\R\ f \G1\,\Q1\; \P\,\R\ f \G2\,\Q2\\ + \ \P\,\R\ f \\s0 s. G1 s0 s \ G2 s0 s\,\\rv s0 s. Q1 rv s0 s \ Q2 rv s0 s\" + apply (frule validI_prefix_closed) + apply (subst validI_def, clarsimp simp: rely_def) + apply (drule(3) validI_D)+ + apply (auto simp: guar_cond_def) + done + +lemmas modify_prefix_closed[simp] = + modify_wp[THEN valid_validI_wp[OF no_trace_all(3)], THEN validI_prefix_closed] +lemmas await_prefix_closed[simp] = Await_sync_twp[THEN validI_prefix_closed] + +lemma repeat_prefix_closed[intro!]: + "prefix_closed f \ prefix_closed (repeat f)" + apply (simp add: repeat_def) + apply (rule prefix_closed_bind; clarsimp) + apply (rename_tac n) + apply (induct_tac n; simp) + apply (auto intro: prefix_closed_bind) + done + +lemma rely_cond_True[simp]: + "rely_cond \\ s0 tr = True" + by (clarsimp simp: rely_cond_def) + +lemma guar_cond_True[simp]: + "guar_cond \\ s0 tr = True" + by (clarsimp simp: guar_cond_def) + +lemma validI_valid_wp: + "\\P\,\\\\ f \G\,\\rv _ s. Q rv s\\ + \ \P s0\ f \Q\" + by (auto simp: rely_def validI_def valid_def mres_def) + +lemma validI_triv_valid_eq: + "prefix_closed f \ \P\,\\\\ f \\\\,\\rv _ s. Q rv s\ = (\s0. \\s. P s0 s\ f \Q\)" + by (fastforce simp: rely_def validI_def valid_def mres_def image_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Sat.thy b/lib/Monads/trace/Trace_Sat.thy new file mode 100644 index 0000000000..af8fcc8ce7 --- /dev/null +++ b/lib/Monads/trace/Trace_Sat.thy @@ -0,0 +1,155 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Sat + imports + Trace_Monad + WPSimp +begin + +section \Satisfiability\ + +text \ + The dual to validity: an existential instead of a universal quantifier for the post condition. + In refinement, it is often sufficient to know that there is one state that satisfies a condition.\ +definition exs_valid :: + "('a \ bool) \ ('a, 'b) tmonad \ ('b \ 'a \ bool) \ bool" + ("\_\ _ \\_\") where + "\P\ f \\Q\ \ \s. P s \ (\(rv, s') \ mres (f s). Q rv s')" + +text \The above for the exception monad\ +definition ex_exs_validE :: + "('a \ bool) \ ('a, 'e + 'b) tmonad \ ('b \ 'a \ bool) \ ('e \ 'a \ bool) \ bool" + ("\_\ _ \\_\, \_\") where + "\P\ f \\Q\, \E\ \ \P\ f \\\rv. case rv of Inl e \ E e | Inr v \ Q v\" + +text \ + Seen as predicate transformer, @{const exs_valid} is the so-called conjugate wp in the literature, + i.e. with + @{term "wp f Q \ \s. mres (f s) \ {(rv,s). Q rv s}"} and + @{term "cwp f Q \ not (wp f (not Q))"}, we get + @{prop "valid P f Q = (\s. P s \ wp f Q s)"} and + @{prop "exs_valid P f Q = (\s. P s \ cwp f Q s)"}. + + See also "Predicate Calculus and Program Semantics" by E. W. Dijkstra and C. S. Scholten.\ +experiment +begin + +definition + "wp f Q \ \s. mres (f s) \ {(rv,s). Q rv s}" + +definition + "cwp f Q \ not (wp f (not Q))" + +lemma + "exs_valid P f Q = (\s. P s \ cwp f Q s)" + unfolding exs_valid_def cwp_def wp_def by auto + +end + + +subsection \Set up for @{method wp}\ + +definition exs_postcondition where + "exs_postcondition P f \ \a b. \(rv, s) \ f a b. P rv s" + +lemma exs_valid_is_triple[wp_trip]: + "exs_valid P f Q = triple_judgement P f (exs_postcondition Q (\s f. mres (f s)))" + by (simp add: triple_judgement_def exs_postcondition_def exs_valid_def) + + +subsection \Rules\ + +lemma exs_hoare_post_imp: + "\\r s. Q r s \ R r s; \P\ a \\Q\\ \ \P\ a \\R\" + unfolding exs_valid_def by blast + +lemma use_exs_valid: + "\ \P\ f \\Q\; P s \ \ \(r, s') \ mres (f s). Q r s'" + by (simp add: exs_valid_def) + +lemma exs_valid_weaken_pre[wp_pre]: + "\ \P'\ f \\Q\; \s. P s \ P' s \ \ \P\ f \\Q\" + by (clarsimp simp: exs_valid_def) + +lemma exs_valid_chain: + "\ \P\ f \\Q\; \s. R s \ P s; \r s. Q r s \ S r s \ \ \R\ f \\S\" + by (fastforce simp: exs_valid_def Bex_def) + +lemma exs_valid_assume_pre: + "\ \s. P s \ \P\ f \\Q\ \ \ \P\ f \\Q\" + by (fastforce simp: exs_valid_def) + +lemma exs_valid_bind[wp_split]: + "\ \rv. \B rv\ g rv \\C\; \A\ f \\B\ \ \ \A\ f >>= (\rv. g rv) \\C\" + apply atomize + apply (clarsimp simp: exs_valid_def bind_def' mres_def) + apply (drule spec, drule(1) mp, clarsimp) + apply (drule spec2, drule(1) mp, clarsimp) + apply (simp add: image_def bex_Un) + apply (strengthen subst[where P="\x. x \ f s" for s, mk_strg I _ E], simp) + apply (fastforce elim: rev_bexI) + done + +lemma exs_valid_return[wp]: + "\Q v\ return v \\Q\" + by (clarsimp simp: exs_valid_def return_def mres_def) + +lemma exs_valid_select[wp]: + "\\s. \r \ S. Q r s\ select S \\Q\" + by (auto simp: exs_valid_def select_def mres_def image_def) + +lemma exs_valid_alt[wp]: + "\ \P\ f \\Q\; \P'\ g \\Q\ \ \ \P or P'\ f \ g \\Q\" + by (fastforce simp: exs_valid_def alternative_def mres_def image_def) + +lemma exs_valid_get[wp]: + "\\s. Q s s\ get \\ Q \" + by (clarsimp simp: exs_valid_def get_def mres_def) + +lemma exs_valid_gets[wp]: + "\\s. Q (f s) s\ gets f \\Q\" + by (clarsimp simp: gets_def) wp + +lemma exs_valid_put[wp]: + "\Q v\ put v \\Q\" + by (clarsimp simp: put_def exs_valid_def mres_def) + +lemma exs_valid_fail[wp]: + "\\s. False\ fail \\Q\" + unfolding fail_def exs_valid_def + by simp + +lemma exs_valid_assert[wp]: + "\\s. Q () s \ G\ assert G \\Q\" + unfolding assert_def + by (wpsimp | rule conjI)+ + +lemma exs_valid_state_assert[wp]: + "\\s. Q () s \ G s\ state_assert G \\Q\" + unfolding state_assert_def + by wp + +lemmas exs_valid_guard = exs_valid_state_assert + +lemma exs_valid_condition[wp]: + "\ \P\ l \\Q\; \P'\ r \\Q\ \ \ \\s. (C s \ P s) \ (\ C s \ P' s)\ condition C l r \\Q\" + by (clarsimp simp: condition_def exs_valid_def split: sum.splits) + +lemma gets_exs_valid: + "\(=) s\ gets f \\\r. (=) s\" + by (rule exs_valid_gets) + +lemma exs_valid_assert_opt[wp]: + "\\s. \x. G = Some x \ Q x s\ assert_opt G \\Q\" + by (clarsimp simp: assert_opt_def exs_valid_def return_def mres_def) + +lemma gets_the_exs_valid[wp]: + "\\s. \x. h s = Some x \ Q x s\ gets_the h \\Q\" + by (wpsimp simp: gets_the_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Strengthen_Setup.thy b/lib/Monads/trace/Trace_Strengthen_Setup.thy new file mode 100644 index 0000000000..4980330968 --- /dev/null +++ b/lib/Monads/trace/Trace_Strengthen_Setup.thy @@ -0,0 +1,82 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_Strengthen_Setup + imports + Strengthen + Trace_No_Fail + Trace_RG +begin + +section \Strengthen setup.\ + +context strengthen_implementation begin + +lemma strengthen_hoare[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\) (\P\ f \R\)" + by (cases F, auto elim: hoare_strengthen_post) + +lemma strengthen_validE_R_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s)\ + \ st F (\) (\P\ f \Q\, -) (\P\ f \R\, -)" + by (cases F, auto intro: hoare_post_imp_R) + +lemma strengthen_validE_cong[strg]: + "\\r s. st F (\) (Q r s) (R r s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P\ f \R\, \T\)" + by (cases F, auto elim: hoare_post_impErr) + +lemma strengthen_validE_E_cong[strg]: + "\\r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P\ f -, \T\)" + by (cases F, auto elim: hoare_post_impErr simp: validE_E_def) + +lemma strengthen_validI[strg]: + "\\r s0 s. st F (\) (Q r s0 s) (Q' r s0 s)\ + \ st F (\) (\P\,\G\ f \R\,\Q\) (\P\,\G\ f \R\,\Q'\)" + by (cases F, auto elim: validI_strengthen_post) + +lemma wpfix_strengthen_hoare: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\) (\P'\ f \Q'\)" + by (cases F, auto elim: hoare_chain) + +lemma wpfix_strengthen_validE_R_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (Q' r s)\ + \ st F (\) (\P\ f \Q\, -) (\P'\ f \Q'\, -)" + by (cases F, auto elim: hoare_chainE simp: validE_R_def) + +lemma wpfix_strengthen_validE_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (Q r s) (R r s); + \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f \Q\, \S\) (\P'\ f \R\, \T\)" + by (cases F, auto elim: hoare_chainE) + +lemma wpfix_strengthen_validE_E_cong: + "\\s. st (\ F) (\) (P s) (P' s); \r s. st F (\) (S r s) (T r s)\ + \ st F (\) (\P\ f -, \S\) (\P'\ f -, \T\)" + by (cases F, auto elim: hoare_chainE simp: validE_E_def) + +lemma wpfix_no_fail_cong: + "\\s. st (\ F) (\) (P s) (P' s)\ + \ st F (\) (no_fail P f) (no_fail P' f)" + by (cases F, auto elim: no_fail_pre) + +lemmas nondet_wpfix_strgs = + wpfix_strengthen_validE_R_cong + wpfix_strengthen_validE_E_cong + wpfix_strengthen_validE_cong + wpfix_strengthen_hoare + wpfix_no_fail_cong + +end + +lemmas nondet_wpfix_strgs[wp_fix_strgs] + = strengthen_implementation.nondet_wpfix_strgs + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_Total.thy b/lib/Monads/trace/Trace_Total.thy new file mode 100644 index 0000000000..ea86da1600 --- /dev/null +++ b/lib/Monads/trace/Trace_Total.thy @@ -0,0 +1,353 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +(* Total correctness Hoare logic for the Trace_Monad (= valid + no_fail) *) + +theory Trace_Total + imports Trace_No_Fail +begin + +section \Total correctness for Trace_Monad and Trace_Monad with exceptions\ + +subsection Definitions + +text \ + It is often desired to prove non-failure and a Hoare triple simultaneously, as the reasoning + is often similar. The following definitions allow such reasoning to take place.\ + +definition validNF :: + "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\!") where + "\P\ f \Q\! \ \P\ f \Q\ \ no_fail P f" + +lemma validNF_alt_def: + "\P\ f \Q\! = (\s. P s \ ((\(r', s') \ mres (f s). Q r' s') \ Failed \ snd ` (f s)))" + by (auto simp: validNF_def valid_def no_fail_def) + +definition validE_NF :: + "('s \ bool) \ ('s, 'a + 'b) tmonad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\!)") where + "\P\ f \Q\, \E\! \ \P\ f \Q\, \E\ \ no_fail P f" + +lemma validE_NF_alt_def: + "\P\ f \Q\, \E\! = \P\ f \\v s. case v of Inl e \ E e s | Inr r \ Q r s\!" + by (clarsimp simp: validE_NF_def validE_def validNF_def) + + +subsection \@{method wpc} setup\ + +lemma wpc_helper_validNF: + "\Q\ g \S\! \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\!" + unfolding wpc_helper_def + by clarsimp (metis hoare_vcg_precond_imp no_fail_pre validNF_def) + +wpc_setup "\m. \P\ m \Q\!" wpc_helper_validNF + + +subsection \Basic @{const validNF} theorems\ + +lemma validNF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \!" + by (fastforce simp: valid_def validNF_def no_fail_def mres_def image_def + split: prod.splits) + +lemma validE_NF_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \!) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \!" + by (fastforce simp: validE_NF_def validE_def valid_def no_fail_def mres_def image_def + split: prod.splits sum.splits) + +lemma validNF_conjD1: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_conjD2: + "\ P \ f \ \rv s. Q rv s \ Q' rv s \! \ \ P \ f \ Q' \!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF[intro?]: (* FIXME lib: should be validNFI *) + "\ \ P \ f \ Q \; no_fail P f \ \ \ P \ f \ Q \!" + by (clarsimp simp: validNF_def) + +lemma validNFE: + "\ \ P \ f \ Q \!; \ \ P \ f \ Q \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validNF_def) + +lemma validNF_valid: + "\ \ P \ f \ Q \! \ \ \ P \ f \ Q \" + by (erule validNFE) + +lemma validNF_no_fail: + "\ \ P \ f \ Q \! \ \ no_fail P f" + by (erule validNFE) + +lemma validNF_not_failed: + "\ \ P \ f \ Q \!; P s \ \ Failed \ snd ` (f s)" + by (clarsimp simp: validNF_def no_fail_def) + +lemma use_validNF: + "\ (r', s') \ mres (f s); \ P \ f \ Q \!; P s \ \ Q r' s'" + by (fastforce simp: validNF_def valid_def) + + +subsection \@{const validNF} weakest precondition rules\ + +lemma validNF_return[wp]: + "\ P x \ return x \ P \!" + by (wp validNF)+ + +lemma validNF_get[wp]: + "\ \s. P s s \ get \ P \!" + by (wp validNF)+ + +lemma validNF_put[wp]: + "\ \s. P () x \ put x \ P \!" + by (wp validNF)+ + +lemma validNF_K_bind[wp]: + "\ P \ x \ Q \! \ \ P \ K_bind x f \ Q \!" + by simp + +lemma validNF_fail[wp]: + "\ \s. False \ fail \ Q \!" + by (clarsimp simp: validNF_def fail_def no_fail_def) + +lemma validNF_prop[wp_unsafe]: + "\ no_fail (\s. P) f \ \ \ \s. P \ f \ \rv s. P \!" + by (wp validNF)+ + +lemma validNF_post_conj[intro!]: + "\ \ P \ a \ Q \!; \ P \ a \ R \! \ \ \ P \ a \ Q and R \!" + by (auto simp: validNF_def) + +lemma validNF_pre_disj[intro!]: + "\ \ P \ a \ R \!; \ Q \ a \ R \! \ \ \ P or Q \ a \ R \!" + by (rule validNF) (auto dest: validNF_valid validNF_no_fail intro: no_fail_or) + +text \ + Set up combination rules for @{method wp}, which also requires a @{text wp_trip} rule for + @{const validNF}.\ +definition validNF_property :: "('a \ 's \ bool) \ 's \ ('s,'a) tmonad \ bool" where + "validNF_property Q s b \ Failed \ snd ` (b s) \ (\(r', s') \ mres (b s). Q r' s')" + +lemma validNF_is_triple[wp_trip]: + "validNF P f Q = triple_judgement P f (validNF_property Q)" + by (auto simp: validNF_def triple_judgement_def validNF_property_def no_fail_def valid_def) + +lemma validNF_weaken_pre[wp_pre]: + "\\Q\ a \R\!; \s. P s \ Q s\ \ \P\ a \R\!" + by (metis hoare_pre_imp no_fail_pre validNF_def) + +lemma validNF_post_comb_imp_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\!; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def) + +lemma validNF_post_comb_conj_L: + "\ \P'\ f \Q\!; \P\ f \Q'\ \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj_R: + "\ \P'\ f \Q\; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_post_comb_conj: + "\ \P'\ f \Q\!; \P\ f \Q'\! \ \ \\s. P s \ P' s \ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce simp: validNF_def valid_def no_fail_def) + +lemma validNF_if_split[wp_split]: + "\P \ \Q\ f \S\!; \ P \ \R\ g \S\!\ \ + \\s. (P \ Q s) \ (\ P \ R s)\ if P then f else g \S\!" + by simp + +lemma validNF_vcg_conj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (fastforce intro!: validNF_post_conj[unfolded pred_conj_def] intro: validNF_weaken_pre) + +lemma validNF_vcg_disj_lift: + "\ \P\ f \Q\!; \P'\ f \Q'\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_disj_lift) + +lemma validNF_vcg_all_lift[wp]: + "\ \x. \P x\ f \Q x\! \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\!" + by (auto simp: validNF_def no_fail_def intro!: hoare_vcg_all_lift) + +lemma validNF_bind[wp_split]: + "\ \x. \B x\ g x \C\!; \A\ f \B\! \ \ \A\ do x \ f; g x od \C\!" + unfolding validNF_def + by (auto intro: hoare_seq_ext no_fail_bind[where P=Q and Q=Q for Q, simplified]) + +lemmas validNF_seq_ext = validNF_bind + + +subsection "validNF compound rules" + +lemma validNF_state_assert[wp]: + "\ \s. P () s \ G s \ state_assert G \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_modify[wp]: + "\ \s. P () (f s) \ modify f \ P \!" + by (rule validNF; wpsimp) + +lemma validNF_gets[wp]: + "\\s. P (f s) s\ gets f \P\!" + by (rule validNF; wpsimp) + +lemma validNF_condition[wp]: + "\ \ Q \ A \P\!; \ R \ B \P\!\ \ \\s. if C s then Q s else R s\ condition C A B \P\!" + by (erule validNFE)+ + (rule validNF; wpsimp wp: no_fail_condition) + +lemma validNF_assert[wp]: + "\ (\s. P) and (R ()) \ assert P \ R \!" + by (rule validNF; wpsimp) + +lemma validNF_false_pre: + "\ \_. False \ P \ Q \!" + by (rule validNF; wpsimp) + +lemma validNF_chain: + "\\P'\ a \R'\!; \s. P s \ P' s; \r s. R' r s \ R r s\ \ \P\ a \R\!" + by (fastforce simp: validNF_def valid_def no_fail_def Ball_def) + +lemma validNF_case_prod[wp]: + "\\x y. \P x y\ B x y \Q\!\ \ \case v of (x, y) \ P x y\ case v of (x, y) \ B x y \Q\!" + by (metis prod.exhaust split_conv) + +lemma validE_NF_case_prod[wp]: + "\ \a b. \P a b\ f a b \Q\, \E\! \ \ + \case x of (a, b) \ P a b\ case x of (a, b) \ f a b \Q\, \E\!" + unfolding validE_NF_alt_def + by (erule validNF_case_prod) + +lemma no_fail_is_validNF_True: + "no_fail P s = (\ P \ s \ \_ _. True \!)" + by (clarsimp simp: no_fail_def validNF_def valid_def) + + +subsection \@{const validNF} reasoning in the exception monad\ + +lemma validE_NF[intro?]: + "\ \ P \ f \ Q \,\ E \; no_fail P f \ \ \ P \ f \ Q \,\ E \!" + by (clarsimp simp: validE_NF_def) + +lemma validE_NFE: + "\ \ P \ f \ Q \,\ E \!; \ \ P \ f \ Q \,\ E \; no_fail P f \ \ R \ \ R" + by (clarsimp simp: validE_NF_def) + +lemma validE_NF_valid: + "\ \ P \ f \ Q \,\ E \! \ \ \ P \ f \ Q \,\ E \" + by (rule validE_NFE) + +lemma validE_NF_no_fail: + "\ \ P \ f \ Q \,\ E \! \ \ no_fail P f" + by (rule validE_NFE) + +lemma validE_NF_weaken_pre[wp_pre]: + "\\Q\ a \R\,\E\!; \s. P s \ Q s\ \ \P\ a \R\,\E\!" + by (simp add: validE_NF_alt_def validNF_weaken_pre) + +lemma validE_NF_post_comb_conj_L: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \\_ _. True\ \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def + by (fastforce simp: validE_def validNF_def valid_def no_fail_def split: sum.splits) + +lemma validE_NF_post_comb_conj_R: + "\ \P\ f \Q\, \\_ _. True\; \P'\ f \Q'\, \E\! \ \ + \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (force split: sum.splits) + +lemma validE_NF_post_comb_conj: + "\ \P\ f \Q\, \E\!; \P'\ f \Q'\, \E\! \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\!" + unfolding validE_NF_alt_def validE_def validNF_def valid_def no_fail_def + by (force split: sum.splits) + +lemma validE_NF_chain: + "\ \P'\ a \R'\,\E'\!; \s. P s \ P' s; \r' s'. R' r' s' \ R r' s'; + \r'' s''. E' r'' s'' \ E r'' s''\ \ + \\s. P s \ a \\r' s'. R r' s'\,\\r'' s''. E r'' s''\!" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def Ball_def split: sum.splits) + +lemma validE_NF_bind_wp[wp]: + "\\x. \B x\ g x \C\, \E\!; \A\ f \B\, \E\!\ \ \A\ f >>=E (\x. g x) \C\, \E\!" + by (blast intro: validE_NF hoare_vcg_seqE no_fail_pre no_fail_bindE validE_validE_R validE_weaken + elim!: validE_NFE) + +lemma validNF_catch[wp]: + "\\x. \E x\ handler x \Q\!; \P\ f \Q\, \E\!\ \ \P\ f (\x. handler x) \Q\!" + unfolding validE_NF_alt_def catch_def lift_def throwError_def + by (clarsimp simp: validNF_return split: sum.splits elim!: validNF_bind[rotated]) + +lemma validNF_throwError[wp]: + "\E e\ throwError e \P\, \E\!" + by (unfold validE_NF_alt_def throwError_def o_def) wpsimp + +lemma validNF_returnOk[wp]: + "\P e\ returnOk e \P\, \E\!" + by (clarsimp simp: validE_NF_alt_def returnOk_def) wpsimp + +lemma validNF_whenE[wp]: + "(P \ \Q\ f \R\, \E\!) \ \if P then Q else R ()\ whenE P f \R\, \E\!" + unfolding whenE_def by wpsimp + +lemma validNF_nobindE[wp]: + "\ \B\ g \C\,\E\!; \A\ f \\r s. B s\,\E\! \ \ \A\ doE f; g odE \C\,\E\!" + by wpsimp + +text \ + Set up triple rules for @{term validE_NF} so that we can use @{method wp} combinator rules.\ +definition validE_NF_property :: + "('a \ 's \ bool) \ ('c \ 's \ bool) \ 's \ ('s, 'c+'a) tmonad \ bool" + where + "validE_NF_property Q E s b \ + Failed \ snd ` (b s) \ (\(r', s') \ mres (b s). case r' of Inl x \ E x s' | Inr x \ Q x s')" + +lemma validE_NF_is_triple[wp_trip]: + "validE_NF P f Q E = triple_judgement P f (validE_NF_property Q E)" + by (fastforce simp: validE_NF_def validE_def2 no_fail_def triple_judgement_def + validE_NF_property_def + split: sum.splits) + +lemma validNF_cong: + "\ \s. P s = P' s; \s. P s \ m s = m' s; + \r' s' s. \ P s; (r', s') \ mres (m s) \ \ Q r' s' = Q' r' s' \ \ + (\P\ m \Q\!) = (\P'\ m' \Q'\!)" + by (fastforce simp: validNF_alt_def) + +lemma validE_NF_liftE[wp]: + "\P\ f \Q\! \ \P\ liftE f \Q\,\E\!" + by (wpsimp simp: validE_NF_alt_def liftE_def) + +lemma validE_NF_handleE'[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f (\x. handler x) \Q\,\E\!" + unfolding validE_NF_alt_def handleE'_def + apply (erule validNF_bind[rotated]) + apply (clarsimp split: sum.splits) + apply wpsimp + done + +lemma validE_NF_handleE[wp]: + "\ \x. \F x\ handler x \Q\,\E\!; \P\ f \Q\,\F\! \ \ + \P\ f handler \Q\,\E\!" + unfolding handleE_def + by (metis validE_NF_handleE') + +lemma validE_NF_condition[wp]: + "\ \ Q \ A \P\,\ E \!; \ R \ B \P\,\ E \!\ \ + \\s. if C s then Q s else R s\ condition C A B \P\,\ E \!" + by (erule validE_NFE)+ (wpsimp wp: no_fail_condition validE_NF) + +lemma hoare_assume_preNF: + "(\s. P s \ \P\ f \Q\!) \ \P\ f \Q\!" + by (simp add: validNF_alt_def) + +end \ No newline at end of file diff --git a/lib/Monads/trace/Trace_VCG.thy b/lib/Monads/trace/Trace_VCG.thy new file mode 100644 index 0000000000..f3fca936c0 --- /dev/null +++ b/lib/Monads/trace/Trace_VCG.thy @@ -0,0 +1,1458 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory Trace_VCG + imports + Trace_Lemmas + WPSimp +begin + +section \Hoare Logic\ + +subsection \Validity\ + +text \ + This section defines a Hoare logic for partial correctness for + the interference trace monad as well as the exception monad. + The logic talks only about the behaviour part of the monad and ignores + failures and the trace. + + The logic is defined semantically. Rules work directly on the + validity predicate. + + In the interference trace monad, validity is a triple of precondition, + monad, and postcondition. The precondition is a function from state to + bool (a state predicate), the postcondition is a function from return value + to state to bool. A triple is valid if for all states that satisfy the + precondition, all result values and result states that are returned by + the monad satisfy the postcondition. Note that if the result of the + computation is the empty set then the triple is trivially valid. This means + @{term "assert P"} does not require us to prove that @{term P} holds, but + rather allows us to assume @{term P}! Proving non-failure is done via a + separate predicate and calculus (see Trace_No_Fail).\ +definition valid :: + "('s \ bool) \ ('s,'a) tmonad \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /\_\") where + "\P\ f \Q\ \ \s. P s \ (\(r,s') \ mres (f s). Q r s')" + +text \ + We often reason about invariant predicates. The following provides shorthand syntax + that avoids repeating potentially long predicates.\ +abbreviation (input) invariant :: "('s,'a) tmonad \ ('s \ bool) \ bool" ("_ \_\" [59,0] 60) where + "invariant f P \ \P\ f \\_. P\" + +text \ + Validity for the exception monad is similar and build on the standard + validity above. Instead of one postcondition, we have two: one for + normal and one for exceptional results.\ +definition validE :: + "('s \ bool) \ ('s, 'a + 'b) tmonad \ ('b \ 's \ bool) \ ('a \ 's \ bool) \ bool" + ("\_\/ _ /(\_\,/ \_\)" ) where + "\P\ f \Q\,\E\ \ \P\ f \ \v s. case v of Inr r \ Q r s | Inl e \ E e s \" + +lemma validE_def2: + "\P\ f \Q\,\E\ \ \s. P s \ (\(r,s') \ mres (f s). case r of Inr b \ Q b s' | Inl a \ E a s')" + by (unfold valid_def validE_def) + +text \ + The following two instantiations are convenient to separate reasoning for exceptional and + normal case.\ +(* Narrator: they are in fact not convenient, and are now considered a mistake that should have + been an abbreviation instead. *) +definition validE_R :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) tmonad \ ('a \ 's \ bool) \ bool" ("\_\/ _ /\_\, -") where + "\P\ f \Q\,- \ validE P f Q (\x y. True)" + +definition validE_E :: (* FIXME lib: this should be an abbreviation *) + "('s \ bool) \ ('s, 'e + 'a) tmonad \ ('e \ 's \ bool) \ bool" ("\_\/ _ /-, \_\") where + "\P\ f -,\Q\ \ validE P f (\x y. True) Q" + +(* These lemmas are useful to apply to rules to convert valid rules into a format suitable for wp. *) +lemma valid_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') \ f \ Q' \" + by (auto simp add: valid_def split: prod.splits) + +lemma validE_make_schematic_post: + "(\s0. \ \s. P s0 s \ f \ \rv s. Q s0 rv s \, \ \rv s. E s0 rv s \) \ + \ \s. \s0. P s0 s \ (\rv s'. Q s0 rv s' \ Q' rv s') + \ (\rv s'. E s0 rv s' \ E' rv s') \ f \ Q' \, \ E' \" + by (auto simp add: validE_def valid_def split: prod.splits sum.splits) + + +section \Lemmas\ + +lemma hoare_pre_imp: + "\ \s. P s \ Q s; \Q\ a \R\ \ \ \P\ a \R\" + by (fastforce simp: valid_def) + +lemmas hoare_weaken_pre = hoare_pre_imp[rotated] + +lemma hoare_vcg_precond_impE: (* FIXME lib: eliminate in favour of hoare_weaken_preE *) + "\ \Q\ f \R\,\E\; \s. P s \ Q s \ \ \P\ f \R\,\E\" + by (fastforce simp add:validE_def2) + +lemmas hoare_weaken_preE = hoare_vcg_precond_impE + +lemma hoare_vcg_precond_impE_R: (* FIXME lib: rename to hoare_weaken_preE_R *) + "\ \P'\ f \Q\,-; \s. P s \ P' s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (rule hoare_vcg_precond_impE) + +lemma hoare_weaken_preE_E: + "\ \P'\ f -,\Q\; \s. P s \ P' s \ \ \P\ f -,\Q\" + by (fastforce simp add: validE_E_def validE_def valid_def) + +lemmas hoare_pre [wp_pre] = + hoare_weaken_pre + hoare_weaken_preE + hoare_vcg_precond_impE_R + hoare_weaken_preE_E + + +subsection \Setting up the precondition case splitter.\ + +lemma wpc_helper_valid: + "\Q\ g \S\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ g \S\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE: + "\Q\ f \R\,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validE_R: + "\Q\ f \R\,- \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f \R\,-" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + +lemma wpc_helper_validR_R: + "\Q\ f -,\E\ \ wpc_helper (P, P', P'') (Q, Q', Q'') \P\ f -,\E\" + by (clarsimp simp: wpc_helper_def elim!: hoare_pre) + + +wpc_setup "\m. \P\ m \Q\" wpc_helper_valid +wpc_setup "\m. \P\ m \Q\,\E\" wpc_helper_validE +wpc_setup "\m. \P\ m \Q\,-" wpc_helper_validE_R +wpc_setup "\m. \P\ m -,\E\" wpc_helper_validR_R + + +subsection "Hoare Logic Rules" + +lemma bind_wp[wp_split]: + "\ \r. \Q' r\ g r \Q\; \P\f \Q'\ \ \ \P\ f >>= (\rv. g rv) \Q\" + by (fastforce simp: valid_def bind_def' mres_def intro: image_eqI[rotated]) + +lemma seq': + "\ \A\ f \B\; \x. P x \ \C\ g x \D\; \x s. B x s \ P x \ C s \ \ + \A\ do x \ f; g x od \D\" + apply (erule bind_wp[rotated]) + apply (clarsimp simp: valid_def) + apply (fastforce elim: rev_bexI image_eqI[rotated]) + done + +lemma seq: + assumes f_valid: "\A\ f \B\" + assumes g_valid: "\x. P x \ \C\ g x \D\" + assumes bind: "\x s. B x s \ P x \ C s" + shows "\A\ do x \ f; g x od \D\" + apply (insert f_valid g_valid bind) + apply (blast intro: seq') + done + +lemma seq_ext': + "\ \A\ f \B\; \x. \B x\ g x \C\ \ \ + \A\ do x \ f; g x od \C\" + by (metis bind_wp) + +lemma seq_ext: + "\ \A\ f \B\; \x. \B x\ g x \C\ \ \ \A\ do x \ f; g x od \C\" + by (rule bind_wp) + +lemma seqE': + "\ \A\ f \B\,\E\; \x. \B x\ g x \C\,\E\ \ \ + \A\ doE x \ f; g x odE \C\,\E\" + apply (simp add: bindE_def validE_def) + apply (erule seq_ext') + apply (auto simp: lift_def valid_def throwError_def return_def mres_def + split: sum.splits) + done + +lemma seqE: + assumes f_valid: "\A\ f \B\,\E\" + assumes g_valid: "\x. \B x\ g x \C\,\E\" + shows "\A\ doE x \ f; g x odE \C\,\E\" + apply (insert f_valid g_valid) + apply (blast intro: seqE') + done + +lemma hoare_TrueI: + "\P\ f \\_. \\" + by (simp add: valid_def) + +lemma hoareE_TrueI: + "\P\ f \\_. \\, \\r. \\" + by (simp add: validE_def valid_def) + +lemma hoare_True_E_R[simp]: + "\P\ f \\_ s. True\, -" + by (auto simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_post_conj[intro]: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \Q and R\" + by (fastforce simp: valid_def) + +lemma hoare_pre_disj[intro]: + "\ \P\ f \R\; \Q\ f \R\ \ \ \P or Q\ f \R\" + by (simp add:valid_def pred_disj_def) + +lemma hoare_conj: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \P and P'\ f \Q and Q'\" + unfolding valid_def by auto + +lemmas hoare_post_taut = hoare_TrueI (* FIXME lib: eliminate *) + +lemmas wp_post_taut = hoare_TrueI[where P=\] +lemmas wp_post_tautE = hoareE_TrueI[where P=\] + +lemma hoare_pre_cont[simp]: + "\\\ f \P\" + by (simp add:valid_def) + +lemma hoare_return_drop_var[iff]: + "\Q\ return x \\r. Q\" + by (simp add: valid_def return_def mres_def) + +lemma hoare_gets[intro]: + "\ \s. P s \ Q (f s) s \ \ \P\ gets f \Q\" + by (simp add:valid_def gets_def get_def bind_def return_def mres_def) + +lemma hoare_modifyE_var: + "\ \s. P s \ Q (f s) \ \ \P\ modify f \\_ s. Q s\" + by(simp add: valid_def modify_def put_def get_def bind_def mres_def) + +lemma hoare_if: + "\ P \ \Q\ a \R\; \ P \ \Q\ b \R\ \ \ \Q\ if P then a else b \R\" + by (simp add: valid_def) + +lemma hoare_pre_subst: + "\ A = B; \A\ a \C\ \ \ \B\ a \C\" + by (erule subst) + +lemma hoare_post_subst: + "\ B = C; \A\ a \B\ \ \ \A\ a \C\" + by (erule subst) + +lemma hoare_post_imp: + "\ \rv s. Q rv s \ R rv s; \P\ a \Q\ \ \ \P\ a \R\" + by(fastforce simp:valid_def split_def) + +lemma hoare_post_impErr': (* FIXME lib: eliminate *) + "\ \P\ a \Q\,\E\; \rv s. Q rv s \ R rv s; \e s. E e s \ F e s \ \ \P\ a \R\,\F\" + unfolding validE_def valid_def + by (fastforce split: sum.splits) + +lemma hoare_post_impErr: + "\ \P\ a \Q\,\E\; \rv s. Q rv s \ R rv s; \e s. E e s \ F e s \ \ \P\ a \R\,\F\" + by (blast intro: hoare_post_impErr') + +lemma hoare_validE_cases: + "\ \P\ f \Q\, \\_ _. True\; \P\ f \\_ _. True\, \R\ \ \ \P\ f \Q\, \R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. R\, \\_. \\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2E: + "\\P\ a \\_. Q\; \s. Q s \ R s\ \ \P\ a \\_. \\, \\_. R\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_dc2_actual: + "\P\ a \\_. R\ \ \P\ a \\_. R\, \\_. \\" + by (rule hoare_post_imp_dc2) + +lemma hoare_post_imp_dc2E_actual: + "\P\ a \\_. R\ \ \P\ a \\_. \\, \\_. R\" + by (rule hoare_post_imp_dc2E) + +lemmas hoare_post_impE = hoare_post_imp (* FIXME lib: eliminate; probably should be on validE *) + +lemma hoare_conjD1: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. Q rv\" + unfolding valid_def by auto + +lemma hoare_conjD2: + "\P\ f \\rv. Q rv and R rv\ \ \P\ f \\rv. R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI1: + "\P\ f \\rv. Q rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemma hoare_post_disjI2: + "\P\ f \\rv. R rv\ \ \P\ f \\rv. Q rv or R rv\" + unfolding valid_def by auto + +lemmas hoare_strengthen_post = hoare_post_imp[rotated] + +lemma use_valid: + "\(r, s') \ mres (f s); \P\ f \Q\; P s \ \ Q r s'" + unfolding valid_def by blast + +lemmas post_by_hoare = use_valid[rotated] + +lemma use_valid_inv: + assumes step: "(r, s') \ mres (f s)" + assumes pres: "\N. \\s. N (P s) \ E s\ f \\rv s. N (P s)\" + shows "E s \ P s = P s'" + using use_valid[where f=f, OF step pres[where N="\p. p = P s"]] by simp + +lemma use_validE_norm: + "\ (Inr r', s') \ mres (B s); \P\ B \Q\,\ E \; P s \ \ Q r' s'" + unfolding validE_def valid_def by force + +lemma use_validE_except: + "\ (Inl r', s') \ mres (B s); \P\ B \Q\,\ E \; P s \ \ E r' s'" + unfolding validE_def valid_def by force + +lemma in_inv_by_hoareD: + "\ \P. f \P\; (x,s') \ mres (f s) \ \ s' = s" + by (auto simp add: valid_def) blast + + +subsection \Misc\ + +lemma hoare_return_simp: + "\P\ return x \Q\ = (\s. P s \ Q x s)" + by (simp add: valid_def return_def mres_def) + +lemma hoare_gen_asm: + "(P \ \P'\ f \Q\) \ \P' and K P\ f \Q\" + by (fastforce simp add: valid_def) + +lemmas hoare_gen_asm_single = hoare_gen_asm[where P'="\", simplified pred_conj_def simp_thms] + +lemma hoare_gen_asm_lk: + "(P \ \P'\ f \Q\) \ \K P and P'\ f \Q\" + by (fastforce simp add: valid_def) + +\ \Useful for forward reasoning, when P is known. + The first version allows weakening the precondition.\ +lemma hoare_gen_asm_spec': + "\ \s. P s \ S \ R s; S \ \R\ f \Q\ \ \ \P\ f \Q\" + by (fastforce simp: valid_def) + +lemma hoare_gen_asm_spec: + "\ \s. P s \ S; S \ \P\ f \Q\ \ \ \P\ f \Q\" + by (rule hoare_gen_asm_spec'[where S=S and R=P]) simp + +lemma hoare_conjI: + "\ \P\ f \Q\; \P\ f \R\ \ \ \P\ f \\r s. Q r s \ R r s\" + unfolding valid_def by blast + +lemma hoare_disjI1: + "\ \P\ f \Q\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_disjI2: + "\ \P\ f \R\ \ \ \P\ f \\rv s. Q rv s \ R rv s \" + unfolding valid_def by blast + +lemma hoare_assume_pre: + "(\s. P s \ \P\ f \Q\) \ \P\ f \Q\" + by (auto simp: valid_def) + +lemma hoare_assume_preE: + "(\s. P s \ \P\ f \Q\,\R\) \ \P\ f \Q\,\R\" + by (auto simp: valid_def validE_def) + +lemma hoare_allI: + "(\x. \P\f\Q x\) \ \P\f\\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma validE_allI: + "(\x. \P\ f \\r s. Q x r s\,\E\) \ \P\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: valid_def validE_def split: sum.splits) + +lemma hoare_exI: + "\P\ f \Q x\ \ \P\ f \\rv s. \x. Q x rv s\" + by (simp add: valid_def) blast + +lemma hoare_impI: + "(R \ \P\ f \Q\) \ \P\ f \\rv s. R \ Q rv s\" + by (simp add: valid_def) blast + +lemma validE_impI: + "\\E. \P\ f \\_ _. True\,\E\; (P' \ \P\ f \Q\,\E\)\ \ + \P\ f \\rv s. P' \ Q rv s\, \E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_case_option_wp: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv. case v of None \ Q rv | Some x \ Q' x rv\" + by (cases v) auto + +lemma hoare_case_option_wp2: + "\ \P\ f None \Q\; \x. \P' x\ f (Some x) \Q' x\ \ + \ \case_option P P' v\ f v \\rv s. case v of None \ Q rv s | Some x \ Q' x rv s\" + by (cases v) auto + +(* Might be useful for forward reasoning, when P is known. *) +lemma hoare_when_cases: + "\\s. \\B; P s\ \ Q s; B \ \P\ f \\_. Q\\ \ \P\ when B f \\_. Q\" + by (cases B; simp add: valid_def return_def mres_def) + +lemma hoare_vcg_prop: + "\\s. P\ f \\rv s. P\" + by (simp add: valid_def) + +lemma validE_eq_valid: + "\P\ f \\rv. Q\,\\rv. Q\ = \P\ f \\rv. Q\" + by (simp add: validE_def) + + +subsection \@{const valid} and @{const validE}, @{const validE_R}, @{const validE_E}\ + +lemma valid_validE: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\, \\_. Q\" + by (rule hoare_post_imp_dc) + +lemma valid_validE2: + "\ \P\ f \\_. Q'\; \s. Q' s \ Q s; \s. Q' s \ E s \ \ \P\ f \\_. Q\, \\_. E\" + unfolding valid_def validE_def + by (clarsimp split: sum.splits) blast + +lemma validE_valid: + "\P\ f \\_. Q\, \\_. Q\ \ \P\ f \\_. Q\" + unfolding validE_def valid_def + by fastforce + +lemma valid_validE_R: + "\P\ f \\_. Q\ \ \P\ f \\_. Q\,-" + by (simp add: validE_R_def hoare_post_impErr [OF valid_validE]) + +lemma valid_validE_E: + "\P\ f \\_. Q\ \ \P\ f -,\\_. Q\" + by (simp add: validE_E_def hoare_post_impErr [OF valid_validE]) + +lemma validE_validE_R: + "\P\ f \Q\,\\\\ \ \P\ f \Q\,-" + by (simp add: validE_R_def) + +lemma validE_R_validE: + "\P\ f \Q\,- \ \P\ f \Q\,\\\\" + by (simp add: validE_R_def) + +lemma validE_validE_E: + "\P\ f \\\\, \E\ \ \P\ f -, \E\" + by (simp add: validE_E_def) + +lemma validE_E_validE: + "\P\ f -, \E\ \ \P\ f \\\\, \E\" + by (simp add: validE_E_def) + + +subsection \@{const liftM}\ + +lemma in_image_constant: + "(x \ (\_. v) ` S) = (x = v \ S \ {})" + by (simp add: image_constant_conv) + +lemma hoare_liftM_subst: + "\P\ liftM f m \Q\ = \P\ m \Q \ f\" + apply (simp add: liftM_def bind_def' return_def split_def) + apply (simp add: valid_def Ball_def mres_def image_Un) + apply (simp add: image_image in_image_constant) + apply force + done + +lemma hoare_liftME_subst: + "\P\ liftME f m \Q\, \E\ = \P\ m \Q \ f\, \E\" + unfolding validE_def liftME_liftM hoare_liftM_subst o_def + by (fastforce intro!: arg_cong[where f="valid P m"] split: sum.splits) + +lemma liftE_validE[simp]: + "\P\ liftE f \Q\, \E\ = \P\ f \Q\" + by (simp add: liftE_liftM validE_def hoare_liftM_subst o_def) + + +subsection \Operator lifting/splitting\ + +lemma hoare_vcg_if_split: + "\ P \ \Q\ f \S\; \P \ \R\ g \S\ \ \ \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\" + by simp + +lemma hoare_vcg_if_splitE: + "\ P \ \Q\ f \S\,\E\; \P \ \R\ g \S\,\E\ \ \ + \\s. (P \ Q s) \ (\P \ R s)\ if P then f else g \S\,\E\" + by simp + +lemma hoare_vcg_split_case_option: + "\ \x. x = None \ \P x\ f x \R x\; \x y. x = Some y \ \Q x y\ g x y \R x\ \ \ + \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_optionE: + "\ \x. x = None \ \P x\ f x \R x\,\E x\; \x y. x = Some y \ \Q x y\ g x y \R x\,\E x\ \ \ + \\s. (x = None \ P x s) \ (\y. x = Some y \ Q x y s)\ + case x of None \ f x | Some y \ g x y + \R x\, \E x\" + by (cases x; simp) + +lemma hoare_vcg_split_case_sum: + "\ \x a. x = Inl a \ \P x a\ f x a \R x\; \x b. x = Inr b \ \Q x b\ g x b \R x\ \ \ + \\s. (\a. x = Inl a \ P x a s) \ (\b. x = Inr b \ Q x b s) \ + case x of Inl a \ f x a | Inr b \ g x b + \R x\" + by (cases x; simp) + +lemmas hoare_vcg_precond_imp = hoare_weaken_pre (* FIXME lib: eliminate *) + +lemmas hoare_seq_ext = seq_ext[rotated] +lemmas hoare_vcg_seqE = seqE[rotated] + +lemma hoare_seq_ext_nobind: + "\ \B\ g \C\; \A\ f \\_. B\ \ \ \A\ do f; g od \C\" + by (erule seq_ext) (clarsimp simp: valid_def) + +lemma hoare_seq_ext_nobindE: + "\ \B\ g \C\, \E\; \A\ f \\_. B\, \E\ \ \ \A\ doE f; g odE \C\, \E\" + by (erule seqE) (clarsimp simp: validE_def) + +lemmas hoare_seq_ext_skip' = hoare_seq_ext[where B=C and C=C for C] + +lemma hoare_chain: + "\ \P\ f \Q\; \s. R s \ P s; \rv s. Q rv s \ S rv s \ \ \R\ f \S\" + by (wp_pre, rule hoare_post_imp) + +lemma validE_weaken: (* FIXME lib: eliminate in favour of hoare_chainE *) + "\ \P'\ A \Q'\,\E'\; \s. P s \ P' s; \rv s. Q' rv s \ Q rv s; \rv s. E' rv s \ E rv s \ + \ \P\ A \Q\,\E\" + by wp_pre (rule hoare_post_impErr) + +lemmas hoare_chainE = validE_weaken + +lemma hoare_vcg_conj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +\ \A variant which works nicely with subgoals that do not contain schematics\ +lemmas hoare_vcg_conj_lift_pre_fix = hoare_vcg_conj_lift[where P=R and P'=R for R, simplified] + +lemma hoare_vcg_conj_liftE1: + "\ \P\ f \Q\,-; \P'\ f \Q'\,\E\ \ \ \P and P'\ f \\rv s. Q rv s \ Q' rv s\,\E\" + unfolding valid_def validE_R_def validE_def + by (fastforce simp: split_def split: sum.splits) + +lemma hoare_vcg_conj_liftE_weaker: + assumes "\P\ f \Q\, \E\" + assumes "\P'\ f \Q'\, \E\" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\, \E\" + apply (rule hoare_pre) + apply (fastforce intro: assms hoare_vcg_conj_liftE1 validE_validE_R hoare_post_impErr) + apply simp + done + +lemma hoare_vcg_disj_lift: + "\ \P\ f \Q\; \P'\ f \Q'\ \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\" + unfolding valid_def + by fastforce + +lemma hoare_vcg_const_Ball_lift: + "\ \x. x \ S \ \P x\ f \Q x\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_const_Ball_lift_R: + "\ \x. x \ S \ \P x\ f \Q x\,- \ \ \\s. \x \ S. P x s\ f \\rv s. \x \ S. Q x rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_strengthen_post) + (fastforce intro!: hoare_vcg_const_Ball_lift split: sum.splits)+ + +lemma hoare_vcg_all_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (fastforce simp: valid_def) + +lemma hoare_vcg_all_lift_R: + "(\x. \P x\ f \Q x\, -) \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\, -" + by (rule hoare_vcg_const_Ball_lift_R[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_lift: + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (simp only: imp_conv_disj) (rule hoare_vcg_disj_lift) + +lemma hoare_vcg_imp_lift': + "\ \P'\ f \\rv s. \ P rv s\; \Q'\ f \Q\ \ \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\" + by (wpsimp wp: hoare_vcg_imp_lift) + +lemma hoare_vcg_imp_liftE: + "\ \P'\ f \\rv s. \ P rv s\, \A\; \Q'\ f \Q\, \A\ \ + \ \\s. \ P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, \A\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R: + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_lift_R': + "\ \P'\ f \\rv s. \ P rv s\, -; \Q'\ f \Q\, - \ \ \\s. \P' s \ Q' s\ f \\rv s. P rv s \ Q rv s\, -" + by (auto simp add: valid_def validE_R_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_imp_conj_lift[wp_comb]: + "\ \P\ f \\rv s. Q rv s \ Q' rv s\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_vcg_imp_conj_lift'[wp_unsafe] = hoare_vcg_imp_conj_lift[where Q'''="\\", simplified] + +lemma hoare_absorb_imp: + "\ P \ f \\rv s. Q rv s \ R rv s\ \ \ P \ f \\rv s. Q rv s \ R rv s\" + by (erule hoare_post_imp[rotated], blast) + +lemma hoare_weaken_imp: + "\ \rv s. Q rv s \ Q' rv s ; \P\ f \\rv s. Q' rv s \ R rv s\ \ + \ \P\ f \\rv s. Q rv s \ R rv s\" + by (clarsimp simp: valid_def split_def) + +lemma hoare_vcg_const_imp_lift: + "\ P \ \Q\ m \R\ \ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\" + by (cases P, simp_all add: hoare_vcg_prop) + +lemma hoare_vcg_const_imp_lift_E: + "(P \ \Q\ f -, \R\) \ \\s. P \ Q s\ f -, \\rv s. P \ R rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_vcg_const_imp_lift_R: + "(P \ \Q\ m \R\,-) \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (fastforce simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_weak_lift_imp: + "\P'\ f \Q\ \ \\s. P \ P' s\ f \\rv s. P \ Q rv s\" + by (auto simp add: valid_def split_def) + +lemma hoare_weak_lift_impE: + "\Q\ m \R\,\E\ \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,\\rv s. P \ E rv s\" + by (cases P; simp add: validE_def hoare_vcg_prop) + +lemma hoare_weak_lift_imp_R: + "\Q\ m \R\,- \ \\s. P \ Q s\ m \\rv s. P \ R rv s\,-" + by (cases P, simp_all) + +lemmas hoare_vcg_weaken_imp = hoare_weaken_imp (* FIXME lib: eliminate *) + +lemma hoare_vcg_ex_lift: + "\ \x. \P x\ f \Q x\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\" + by (clarsimp simp: valid_def, blast) + +lemma hoare_vcg_ex_lift_R1: + "(\x. \P x\ f \Q\, -) \ \\s. \x. P x s\ f \Q\, -" + by (fastforce simp: valid_def validE_R_def validE_def split: sum.splits) + +lemma hoare_liftP_ext: + assumes "\P x. m \\s. P (f s x)\" + shows "m \\s. P (f s)\" + unfolding valid_def + apply clarsimp + apply (erule subst[rotated, where P=P]) + apply (rule ext) + apply (drule use_valid, rule assms, rule refl) + apply simp + done + +(* for instantiations *) +lemma hoare_triv: "\P\f\Q\ \ \P\f\Q\" . +lemma hoare_trivE: "\P\ f \Q\,\E\ \ \P\ f \Q\,\E\" . +lemma hoare_trivE_R: "\P\ f \Q\,- \ \P\ f \Q\,-" . +lemma hoare_trivR_R: "\P\ f -,\E\ \ \P\ f -,\E\" . + +lemma hoare_vcg_E_conj: + "\ \P\ f -,\E\; \P'\ f \Q'\,\E'\ \ \ \\s. P s \ P' s\ f \Q'\, \\rv s. E rv s \ E' rv s\" + unfolding validE_def validE_E_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_vcg_E_elim: + "\ \P\ f -,\E\; \P'\ f \Q\,- \ \ \\s. P s \ P' s\ f \Q\,\E\" + by (rule hoare_post_impErr[OF hoare_vcg_E_conj]) (simp add: validE_R_def)+ + +lemma hoare_vcg_R_conj: + "\ \P\ f \Q\,-; \P'\ f \Q'\,- \ \ \\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + unfolding validE_R_def validE_def + by (rule hoare_post_imp[OF _ hoare_vcg_conj_lift]; simp split: sum.splits) + +lemma hoare_lift_Pf_E_R: + "\ \x. \P x\ m \\_. P x\, -; \P. \\s. P (f s)\ m \\_ s. P (f s)\, - \ \ + \\s. P (f s) s\ m \\_ s. P (f s) s\, -" + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_lift_Pf_E_E: + "\ \x. \P x\ m -, \\_. P x\; \P. \\s. P (f s)\ m -, \\_ s. P (f s)\ \ \ + \\s. P (f s) s\ m -, \\_ s. P (f s) s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_lift_E_E: + "(\x. x \ S \ \P x\ f -,\Q x\) \ \\s. \x \ S. P x s\ f -,\\rv s. \x \ S. Q x rv s\" + unfolding validE_E_def validE_def valid_def + by (fastforce split: sum.splits) + +lemma hoare_vcg_all_liftE_E: + "(\x. \P x\ f -, \Q x\) \ \\s. \x. P x s\ f -,\\rv s. \x. Q x rv s\" + by (rule hoare_vcg_const_Ball_lift_E_E[where S=UNIV, simplified]) + +lemma hoare_vcg_imp_liftE_E: + "\\P'\ f -, \\rv s. \ P rv s\; \Q'\ f -, \Q\\ \ + \\s. \ P' s \ Q' s\ f -, \\rv s. P rv s \ Q rv s\" + by (auto simp add: valid_def validE_E_def validE_def split_def split: sum.splits) + +lemma hoare_vcg_ex_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_ex_liftE_E: + "\ \x. \P x\ f -,\E x\ \ \ \\s. \x. P x s\ f -,\\rv s. \x. E x rv s\" + by (fastforce simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_post_imp_R: + "\ \P\ f \Q'\,-; \rv s. Q' rv s \ Q rv s \ \ \P\ f \Q\,-" + unfolding validE_R_def + by (erule hoare_post_impErr) + +lemma hoare_post_imp_E: + "\ \P\ f -,\Q'\; \rv s. Q' rv s \ Q rv s \ \ \P\ f -,\Q\" + unfolding validE_E_def + by (rule hoare_post_impErr) + +lemma hoare_post_comb_imp_conj: + "\ \P'\ f \Q\; \P\ f \Q'\; \s. P s \ P' s \ \ \P\ f \\rv s. Q rv s \ Q' rv s\" + by (wpsimp wp: hoare_vcg_conj_lift) + +lemma hoare_vcg_if_lift: + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv s. if P then X rv s else Y rv s\" + + "\R\ f \\rv s. (P \ X rv s) \ (\P \ Y rv s)\ \ + \R\ f \\rv. if P then X rv else Y rv\" + by (auto simp: valid_def split_def) + +lemma hoare_vcg_disj_lift_R: + assumes x: "\P\ f \Q\,-" + assumes y: "\P'\ f \Q'\,-" + shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,-" + using assms + by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_all_liftE: + "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_const_Ball_liftE: + "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" + by (fastforce simp: validE_def valid_def split: sum.splits) + +lemma hoare_vcg_split_lift[wp]: + "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" + by simp + +named_theorems hoare_vcg_op_lift +lemmas [hoare_vcg_op_lift] = + hoare_vcg_const_imp_lift + hoare_vcg_const_imp_lift_E + hoare_vcg_const_imp_lift_R + (* leaving out hoare_vcg_conj_lift*, because that is built into wp *) + hoare_vcg_disj_lift + hoare_vcg_disj_lift_R + hoare_vcg_ex_lift + hoare_vcg_ex_liftE + hoare_vcg_ex_liftE_E + hoare_vcg_all_lift + hoare_vcg_all_liftE + hoare_vcg_all_liftE_E + hoare_vcg_all_lift_R + hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_lift_R + hoare_vcg_const_Ball_lift_E_E + hoare_vcg_split_lift + hoare_vcg_if_lift + hoare_vcg_imp_lift' + hoare_vcg_imp_liftE + hoare_vcg_imp_lift_R + hoare_vcg_imp_liftE_E + + +subsection \Weakest Precondition Rules\ + +lemma fail_wp: + "\\\ fail \Q\" + by (simp add: valid_def fail_def mres_def vimage_def) + +lemma return_wp: + "\P x\ return x \P\" + by(simp add: valid_def return_def mres_def) + +lemma get_wp: + "\\s. P s s\ get \P\" + by (simp add: valid_def get_def mres_def) + +lemma gets_wp: + "\\s. P (f s) s\ gets f \P\" + by(simp add: valid_def split_def gets_def return_def get_def bind_def mres_def) + +lemma put_wp: + "\\_. Q () s\ put s \Q\" + by (simp add: put_def valid_def mres_def) + +lemma modify_wp: + "\\s. Q () (f s)\ modify f \Q\" + unfolding modify_def + by (wp put_wp get_wp) + +lemma failE_wp: + "\\\ fail \Q\, \E\" + by (simp add: validE_def fail_wp) + +lemma returnOk_wp: + "\P x\ returnOk x \P\,\E\" + by (simp add: validE_def2 returnOk_def return_def mres_def) + +lemma throwError_wp: + "\E e\ throwError e \P\,\E\" + by(simp add: validE_def2 throwError_def return_def mres_def) + +lemma returnOKE_R_wp: + "\P x\ returnOk x \P\, -" + by (simp add: validE_R_def validE_def valid_def returnOk_def return_def mres_def) + +lemma liftE_wp: + "\P\ f \Q\ \ \P\ liftE f \Q\,\E\" + by simp + +lemma catch_wp: + "\ \x. \E x\ handler x \Q\; \P\ f \Q\,\E\ \ \ \P\ catch f handler \Q\" + apply (unfold catch_def validE_def) + apply (erule seq_ext) + apply (simp add: return_wp split: sum.splits) + done + +lemma handleE'_wp: + "\ \x. \F x\ handler x \Q\,\E\; \P\ f \Q\,\F\ \ \ \P\ f handler \Q\,\E\" + apply (unfold handleE'_def validE_def) + apply (erule seq_ext) + apply (clarsimp split: sum.splits) + apply (simp add: valid_def return_def mres_def) + done + +lemma handleE_wp: + assumes x: "\x. \F x\ handler x \Q\,\E\" + assumes y: "\P\ f \Q\,\F\" + shows "\P\ f handler \Q\,\E\" + by (simp add: handleE_def handleE'_wp [OF x y]) + +lemma liftM_wp: + "\P\ m \Q \ f\ \ \P\ liftM f m \Q\" + by (simp add: hoare_liftM_subst) + +lemma liftME_wp: + "\P\ m \Q \ f\,\E\ \ \P\ liftME f m \Q\,\E\" + by (simp add: hoare_liftME_subst) + +lemma assert_wp: + "\\s. P \ Q () s\ assert P \Q\" + unfolding assert_def + by (wpsimp wp: return_wp fail_wp | rule conjI)+ + +lemma list_cases_wp: + assumes a: "\P_A\ a \Q\" + assumes b: "\x xs. ts = x#xs \ \P_B x xs\ b x xs \Q\" + shows "\case_list P_A P_B ts\ case ts of [] \ a | x # xs \ b x xs \Q\" + by (cases ts, auto simp: a b) + +lemma hoare_vcg_handle_elseE: + "\ \P\ f \Q\,\E\; \e. \E e\ g e \R\,\F\; \x. \Q x\ h x \R\,\F\ \ \ + \P\ f g h \R\,\F\" + unfolding handle_elseE_def validE_def + by (wpsimp wp: seq_ext | assumption | rule conjI)+ + +lemma alternative_wp: + assumes x: "\P\ f \Q\" + assumes y: "\P'\ f' \Q\" + shows "\P and P'\ f \ f' \Q\" + unfolding valid_def alternative_def mres_def + using post_by_hoare[OF x _ in_mres] post_by_hoare[OF y _ in_mres] + by fastforce + +lemma alternativeE_wp: + assumes "\P\ f \Q\,\E\" + assumes "\P'\ f' \Q\,\E\" + shows "\P and P'\ f \ f' \Q\,\E\" + unfolding validE_def + by (wpsimp wp: assms alternative_wp | fold validE_def)+ + +lemma alternativeE_R_wp: + "\ \P\ f \Q\,-; \P'\ f' \Q\,- \ \ \P and P'\ f \ f' \Q\,-" + unfolding validE_R_def + by (rule alternativeE_wp) + +lemma alternativeE_E_wp: + "\ \P\ f -,\Q\; \P'\ g -,\Q\ \ \ \P and P'\ f \ g -, \Q\" + unfolding validE_E_def + by (rule alternativeE_wp) + +lemma select_wp: + "\\s. \x \ S. Q x s\ select S \Q\" + by (simp add: select_def valid_def mres_def image_def) + +lemma state_select_wp: + "\\s. \t. (s, t) \ f \ P () t\ state_select f \P\" + unfolding state_select_def2 + by (wpsimp wp: put_wp select_wp return_wp get_wp assert_wp) + +lemma condition_wp: + "\ \Q\ A \P\; \R\ B \P\ \ \ \\s. if C s then Q s else R s\ condition C A B \P\" + by (clarsimp simp: condition_def valid_def) + +lemma conditionE_wp: + "\ \P\ A \Q\,\R\; \P'\ B \Q\,\R\ \ \ \\s. if C s then P s else P' s\ condition C A B \Q\,\R\" + by (clarsimp simp: condition_def validE_def valid_def) + +lemma state_assert_wp: + "\\s. f s \ P () s\ state_assert f \P\" + unfolding state_assert_def + by (wp seq_ext get_wp assert_wp) + +lemma when_wp[wp_split]: + "\ P \ \Q\ f \R\ \ \ \if P then Q else R ()\ when P f \R\" + by (clarsimp simp: when_def valid_def return_def mres_def) + +lemma unless_wp[wp_split]: + "(\P \ \Q\ f \R\) \ \if P then R () else Q\ unless P f \R\" + unfolding unless_def by wp auto + +lemma whenE_wp: + "(P \ \Q\ f \R\, \E\) \ \if P then Q else R ()\ whenE P f \R\, \E\" + unfolding whenE_def by clarsimp (wp returnOk_wp) + +lemma unlessE_wp: + "(\ P \ \Q\ f \R\, \E\) \ \if P then R () else Q\ unlessE P f \R\, \E\" + unfolding unlessE_def + by (wpsimp wp: returnOk_wp) + +lemma maybeM_wp: + "(\x. y = Some x \ \P x\ m x \Q\) \ + \\s. (\x. y = Some x \ P x s) \ (y = None \ Q () s)\ maybeM m y \Q\" + unfolding maybeM_def by (wpsimp wp: return_wp) auto + +lemma notM_wp: + "\P\ m \\c. Q (\ c)\ \ \P\ notM m \Q\" + unfolding notM_def by (wpsimp wp: return_wp) + +lemma ifM_wp: + assumes [wp]: "\Q\ f \S\" "\R\ g \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ R s\" + shows "\A and B\ ifM P f g \S\" + unfolding ifM_def + by (wpsimp wp: hoare_vcg_if_split hoare_vcg_conj_lift) + +lemma andM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q' s\" "\P'\ A \\c s. \ c \ Q False s\" + shows "\P and P'\ andM A B \Q\" + unfolding andM_def by (wp ifM_wp return_wp) + +lemma orM_wp: + assumes [wp]: "\Q'\ B \Q\" + assumes [wp]: "\P\ A \\c s. c \ Q True s\" "\P'\ A \\c s. \ c \ Q' s\" + shows "\P and P'\ orM A B \Q\" + unfolding orM_def by (wp ifM_wp return_wp) + +lemma whenM_wp: + assumes [wp]: "\Q\ f \S\" + assumes [wp]: "\A\ P \\c s. c \ Q s\" "\B\ P \\c s. \c \ S () s\" + shows "\A and B\ whenM P f \S\" + unfolding whenM_def by (wp ifM_wp return_wp) + +lemma hoare_K_bind[wp_split]: + "\P\ f \Q\ \ \P\ K_bind f x \Q\" + by simp + +lemma validE_K_bind[wp_split]: + "\ P \ x \ Q \, \ E \ \ \ P \ K_bind x f \ Q \, \ E \" + by simp + +lemma hoare_fun_app_wp: + "\P\ f' x \Q'\ \ \P\ f' $ x \Q'\" + "\P\ f x \Q\,\E\ \ \P\ f $ x \Q\,\E\" + "\P\ f x \Q\,- \ \P\ f $ x \Q\,-" + "\P\ f x -,\E\ \ \P\ f $ x -,\E\" + by simp+ + +lemma liftE_validE_E: + "\\\ liftE f -, \Q\" + by (clarsimp simp: validE_E_def valid_def) + +lemma returnOk_E: + "\\\ returnOk r -, \Q\" + by (simp add: validE_E_def) (wp returnOk_wp) + +lemma case_option_wp: + "\ \x. \P x\ m x \Q\; \P'\ m' \Q\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\" + by (cases x; simp) + +lemma case_option_wpE: + "\ \x. \P x\ m x \Q\,\E\; \P'\ m' \Q\,\E\ \ \ + \\s. (x = None \ P' s) \ (x \ None \ P (the x) s)\ case_option m' m x \Q\,\E\" + by (cases x; simp) + +lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF validE_E_validE] + +lemma assert_opt_wp: + "\\s. x \ None \ Q (the x) s\ assert_opt x \Q\" + unfolding assert_opt_def + by (case_tac x; wpsimp wp: fail_wp return_wp) + +lemma gets_the_wp: + "\\s. (f s \ None) \ Q (the (f s)) s\ gets_the f \Q\" + unfolding gets_the_def + by (wp seq_ext gets_wp assert_opt_wp) + +lemma gets_the_wp': (* FIXME: should prefer this one in [wp] *) + "\\s. \rv. f s = Some rv \ Q rv s\ gets_the f \Q\" + unfolding gets_the_def + by (wpsimp wp: seq_ext gets_wp assert_opt_wp) + +lemma gets_map_wp: + "\\s. f s p \ None \ Q (the (f s p)) s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: seq_ext gets_wp assert_opt_wp) + +lemma gets_map_wp': + "\\s. \rv. f s p = Some rv \ Q rv s\ gets_map f p \Q\" + unfolding gets_map_def + by (wpsimp wp: seq_ext gets_wp assert_opt_wp) + +(* FIXME: make wp *) +lemma whenE_throwError_wp: + "\\s. \Q \ P s\ whenE Q (throwError e) \\rv. P\, -" + by (simp add: whenE_def returnOk_def throwError_def return_def validE_R_def validE_def valid_def + mres_def) + +lemma select_throwError_wp: + "\\s. \x\S. Q x s\ select S >>= throwError -, \Q\" + by (clarsimp simp: bind_def throwError_def return_def select_def validE_E_def + validE_def valid_def mres_def) + + +subsection \Setting up the @{method wp} method\ + +lemma valid_is_triple: + "valid P f Q = triple_judgement P f (postcondition Q (\s f. mres (f s)))" + by (simp add: triple_judgement_def valid_def postcondition_def) + +lemma validE_is_triple: + "validE P f Q E = + triple_judgement P f + (postconditions (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ mres (f s)})) + (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ mres (f s)})))" + by (fastforce simp: validE_def triple_judgement_def valid_def postcondition_def postconditions_def + split: sum.split) + +lemma validE_R_is_triple: + "validE_R P f Q = + triple_judgement P f (postcondition Q (\s f. {(rv, s'). (Inr rv, s') \ mres (f s)}))" + by (simp add: validE_R_def validE_is_triple postconditions_def postcondition_def) + +lemma validE_E_is_triple: + "validE_E P f E = + triple_judgement P f (postcondition E (\s f. {(rv, s'). (Inl rv, s') \ mres (f s)}))" + by (simp add: validE_E_def validE_is_triple postconditions_def postcondition_def) + +lemmas hoare_wp_combs = hoare_vcg_conj_lift + +lemmas hoare_wp_combsE = + validE_validE_R + hoare_vcg_R_conj + hoare_vcg_E_elim + hoare_vcg_E_conj + +lemmas hoare_wp_state_combsE = + valid_validE_R + hoare_vcg_R_conj[OF valid_validE_R] + hoare_vcg_E_elim[OF valid_validE_E] + hoare_vcg_E_conj[OF valid_validE_E] + +lemmas hoare_classic_wp_combs = hoare_post_comb_imp_conj hoare_vcg_precond_imp hoare_wp_combs +lemmas hoare_classic_wp_combsE = hoare_vcg_precond_impE hoare_vcg_precond_impE_R hoare_wp_combsE + +lemmas hoare_classic_wp_state_combsE = + hoare_vcg_precond_impE[OF valid_validE] + hoare_vcg_precond_impE_R[OF valid_validE_R] + hoare_wp_state_combsE + +lemmas all_classic_wp_combs = + hoare_classic_wp_state_combsE + hoare_classic_wp_combsE + hoare_classic_wp_combs + +lemmas hoare_wp_splits[wp_split] = + hoare_seq_ext hoare_vcg_seqE handleE'_wp handleE_wp + validE_validE_R [OF hoare_vcg_seqE [OF validE_R_validE]] + validE_validE_R [OF handleE'_wp [OF validE_R_validE]] + validE_validE_R [OF handleE_wp [OF validE_R_validE]] + catch_wp hoare_vcg_if_split hoare_vcg_if_splitE + validE_validE_R [OF hoare_vcg_if_splitE [OF validE_R_validE validE_R_validE]] + liftM_wp liftME_wp + validE_validE_R [OF liftME_wp [OF validE_R_validE]] + validE_valid + +lemmas [wp_comb] = hoare_wp_state_combsE hoare_wp_combsE hoare_wp_combs + +(* rules towards the bottom will be matched first *) +lemmas [wp] = hoare_vcg_prop + wp_post_taut + hoare_fun_app_wp + returnOk_E + liftE_validE_E + put_wp + get_wp + gets_wp + modify_wp + return_wp + returnOk_wp + throwError_wp + fail_wp + failE_wp + assert_wp + state_assert_wp + assert_opt_wp + gets_the_wp + gets_map_wp' + liftE_wp + alternative_wp + alternativeE_R_wp + alternativeE_E_wp + alternativeE_wp + select_wp + state_select_wp + condition_wp + conditionE_wp + maybeM_wp notM_wp ifM_wp andM_wp orM_wp whenM_wp + +lemmas [wp_trip] = valid_is_triple validE_is_triple validE_E_is_triple validE_R_is_triple + +lemmas validE_E_combs[wp_comb] = + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def] + valid_validE_E + hoare_vcg_E_conj[where Q'="\\", folded validE_E_def, OF valid_validE_E] + + +text \Simplifications on conjunction\ + +lemma hoare_post_eq: + "\ Q = Q'; \P\ f \Q'\ \ \ \P\ f \Q\" + by simp + +lemma hoare_post_eqE1: + "\ Q = Q'; \P\ f \Q'\,\E\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE2: + "\ E = E'; \P\ f \Q\,\E'\ \ \ \P\ f \Q\,\E\" + by simp + +lemma hoare_post_eqE_R: + "\ Q = Q'; \P\ f \Q'\,- \ \ \P\ f \Q\,-" + by simp + +lemma pred_conj_apply_elim: + "(\rv. Q rv and Q' rv) = (\rv s. Q rv s \ Q' rv s)" + by (simp add: pred_conj_def) + +lemma pred_conj_conj_elim: + "(\rv s. (Q rv and Q' rv) s \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma conj_assoc_apply: + "(\rv s. (Q rv s \ Q' rv s) \ Q'' rv s) = (\rv s. Q rv s \ Q' rv s \ Q'' rv s)" + by simp + +lemma all_elim: + "(\rv s. \x. P rv s) = P" + by simp + +lemma all_conj_elim: + "(\rv s. (\x. P rv s) \ Q rv s) = (\rv s. P rv s \ Q rv s)" + by simp + +lemmas vcg_rhs_simps = + pred_conj_apply_elim pred_conj_conj_elim conj_assoc_apply all_elim all_conj_elim + +lemma if_apply_reduct: + "\P\ If P' (f x) (g x) \Q\ \ \P\ If P' f g x \Q\" + by (cases P'; simp) + +lemma if_apply_reductE: + "\P\ If P' (f x) (g x) \Q\,\E\ \ \P\ If P' f g x \Q\,\E\" + by (cases P'; simp) + +lemma if_apply_reductE_R: + "\P\ If P' (f x) (g x) \Q\,- \ \P\ If P' f g x \Q\,-" + by (cases P'; simp) + +lemmas hoare_wp_simps [wp_split] = + vcg_rhs_simps [THEN hoare_post_eq] vcg_rhs_simps [THEN hoare_post_eqE1] + vcg_rhs_simps [THEN hoare_post_eqE2] vcg_rhs_simps [THEN hoare_post_eqE_R] + if_apply_reduct if_apply_reductE if_apply_reductE_R TrueI + +schematic_goal if_apply_test: + "\?Q\ (if A then returnOk else K fail) x \P\,\E\" + by wpsimp + +lemma hoare_elim_pred_conj: + "\P\ f \\rv s. Q rv s \ Q' rv s\ \ \P\ f \\rv. Q rv and Q' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE1: + "\P\ f \\rv s. Q rv s \ Q' rv s\,\E\ \ \P\ f \\rv. Q rv and Q' rv\,\E\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE2: + "\P\ f \Q\, \\rv s. E rv s \ E' rv s\ \ \P\ f \Q\,\\rv. E rv and E' rv\" + by (unfold pred_conj_def) + +lemma hoare_elim_pred_conjE_R: + "\P\ f \\rv s. Q rv s \ Q' rv s\,- \ \P\ f \\rv. Q rv and Q' rv\,-" + by (unfold pred_conj_def) + +lemmas hoare_wp_pred_conj_elims = + hoare_elim_pred_conj hoare_elim_pred_conjE1 + hoare_elim_pred_conjE2 hoare_elim_pred_conjE_R + + +subsection \Bundles\ + +bundle no_pre = hoare_pre [wp_pre del] + +bundle classic_wp_pre = hoare_pre [wp_pre del] + all_classic_wp_combs[wp_comb del] all_classic_wp_combs[wp_comb] + + +text \Miscellaneous lemmas on hoare triples\ + +lemma hoare_pre_cases: + "\ \\s. R s \ P s\ f \Q\; \\s. \R s \ P' s\ f \Q\ \ \ \P and P'\ f \Q\" + unfolding valid_def by fastforce + +lemma hoare_vcg_mp: + "\ \P\ f \Q\; \P\ f \\r s. Q r s \ Q' r s\ \ \ \P\ f \Q'\" + by (auto simp: valid_def split_def) + +(* note about this precond stuff: rules get a chance to bind directly + before any of their combined forms. As a result, these precondition + implication rules are only used when needed. *) +lemma hoare_add_post: + "\ \P'\ f \Q'\; \s. P s \ P' s; \P\ f \\rv s. Q' rv s \ Q rv s\ \ \ \P\ f \Q\" + unfolding valid_def + by fastforce + +lemma hoare_gen_asmE: + "(P \ \P'\ f \Q\,-) \ \P' and K P\ f \Q\, -" + by (simp add: validE_R_def validE_def valid_def) blast + +lemma hoare_list_case: + "\ \P1\ f f1 \Q\; \y ys. xs = y#ys \ \P2 y ys\ f (f2 y ys) \Q\ \ \ + \case xs of [] \ P1 | y#ys \ P2 y ys\ f (case xs of [] \ f1 | y#ys \ f2 y ys) \Q\" + by (cases xs; simp) + +lemmas whenE_wps[wp_split] = + whenE_wp whenE_wp[THEN validE_validE_R] whenE_wp[THEN validE_validE_E] + +lemmas unlessE_wps[wp_split] = + unlessE_wp unlessE_wp[THEN validE_validE_R] unlessE_wp[THEN validE_validE_E] + +lemma hoare_use_eq: + assumes "\P. \\s. P (f s)\ m \\_ s. P (f s)\" + assumes "\f. \\s. P f s\ m \\_ s. Q f s\" + shows "\\s. P (f s) s\ m \\_ s. Q (f s) s \" + apply (rule hoare_post_imp[where Q="\_ s. \y. y = f s \ Q y s"], simp) + apply (wpsimp wp: hoare_vcg_ex_lift assms) + done + +lemma hoare_fail_any[simp]: + "\P\ fail \Q\" + by wp + +lemma hoare_failE[simp]: + "\P\ fail \Q\, \E\" + by wp + +lemma hoare_FalseE[simp]: + "\\\ f \Q\, \E\" + by (simp add: valid_def validE_def) + +lemma hoare_validE_pred_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \Q and R\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemma hoare_validE_conj: + "\ \P\ f \Q\, \E\; \P\ f \R\, \E\ \ \ \P\ f \\rv s. Q rv s \ R rv s\, \E\" + unfolding valid_def validE_def + by (simp add: split_def split: sum.splits) + +lemmas hoare_valid_validE = valid_validE (* FIXME lib: eliminate one *) + +declare validE_validE_E[wp_comb] + +lemmas if_validE_E[wp_split] = + validE_validE_E[OF hoare_vcg_if_splitE[OF validE_E_validE validE_E_validE]] + +lemma hoare_drop_imp: + "\P\ f \Q\ \ \P\ f \\rv s. R rv s \ Q rv s\" + by (auto simp: valid_def) + +lemma hoare_drop_impE: + "\\P\ f \\r. Q\, \E\\ \ \P\ f \\rv s. R rv s \ Q s\, \E\" + by (simp add: validE_weaken) + +lemma hoare_drop_impE_R: + "\P\ f \Q\,- \ \P\ f \\rv s. R rv s \ Q rv s\, -" + by (auto simp: validE_R_def validE_def valid_def split_def split: sum.splits) + +lemma hoare_drop_impE_E: + "\P\ f -,\Q\ \ \P\ f -, \\rv s. R rv s \ Q rv s\" + by (auto simp: validE_E_def validE_def valid_def split_def split: sum.splits) + +lemmas hoare_drop_imps = hoare_drop_imp hoare_drop_impE_R hoare_drop_impE_E + +(*This is unsafe, but can be very useful when supplied as a comb rule.*) +lemma hoare_drop_imp_conj[wp_unsafe]: + "\ \P\ f \Q'\; \P'\ f \\rv s. (Q rv s \ Q'' rv s) \ Q''' rv s\ \ \ + \P and P'\ f \\rv s. (Q rv s \ Q' rv s \ Q'' rv s) \ Q''' rv s\" + by (auto simp: valid_def) + +lemmas hoare_drop_imp_conj'[wp_unsafe] = hoare_drop_imp_conj[where Q'''="\\", simplified] + +lemmas bindE_E_wp[wp_split] = validE_validE_E[OF hoare_vcg_seqE [OF validE_E_validE]] + +lemma True_E_E[wp]: + "\\\ f -,\\\\" + by (auto simp: validE_E_def validE_def valid_def split: sum.splits) + +lemma hoare_vcg_set_pred_lift: + assumes "\P x. m \ \s. P (f x s) \" + shows "m \ \s. P {x. f x s} \" + using assms[where P="\x . x"] assms[where P=Not] use_valid + by (fastforce simp: valid_def elim!: subst[rotated, where P=P]) + +lemma hoare_vcg_set_pred_lift_mono: + assumes f: "\x. m \ f x \" + assumes mono: "\A B. A \ B \ P A \ P B" + shows "m \ \s. P {x. f x s} \" + by (fastforce simp: valid_def elim!: mono[rotated] dest: use_valid[OF _ f]) + +text \If a function contains an @{term assert}, or equivalent, then it might be + possible to strengthen the precondition of an already-proven hoare triple + @{text pos}, by additionally proving a side condition @{text neg}, that + violating some condition causes failure. The stronger hoare triple produced + by this theorem allows the precondition to assume that the condition is + satisfied.\ +lemma hoare_strengthen_pre_via_assert_forward: + assumes pos: "\ P \ f \ Q \" + assumes rel: "\s. S s \ P s \ N s" + assumes neg: "\ N \ f \ \\ \" + shows "\ S \ f \ Q \" + apply (rule hoare_weaken_pre) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_disj_lift[OF pos neg]) + by (auto simp: rel) + +text \Like @{thm hoare_strengthen_pre_via_assert_forward}, strengthen a precondition + by proving a side condition that the negation of that condition would cause + failure. This version is intended for backward reasoning. Apply it to a goal to + obtain a stronger precondition after proving the side condition.\ +lemma hoare_strengthen_pre_via_assert_backward: + assumes neg: "\ Not \ E \ f \ \\ \" + assumes pos: "\ P and E \ f \ Q \" + shows "\ P \ f \ Q \" + by (rule hoare_strengthen_pre_via_assert_forward[OF pos _ neg], simp) + + +subsection \Strongest postcondition rules\ + +lemma get_sp: + "\P\ get \\rv s. s = rv \ P s\" + by(simp add:get_def valid_def mres_def) + +lemma put_sp: + "\\\ put a \\_ s. s = a\" + by(simp add:put_def valid_def mres_def) + +lemma return_sp: + "\P\ return a \\rv s. rv = a \ P s\" + by(simp add:return_def valid_def mres_def) + +lemma hoare_return_sp: (* FIXME lib: eliminate *) + "\P\ return x \\rv. P and K (rv = x)\" + by (simp add: valid_def return_def mres_def) + +lemma assert_sp: + "\P\ assert Q \\_ s. P s \ Q \" + by (simp add: assert_def fail_def return_def valid_def mres_def) + +lemma hoare_gets_sp: + "\P\ gets f \\rv s. rv = f s \ P s\" + by (simp add: valid_def simpler_gets_def mres_def) + +lemma hoare_returnOk_sp: + "\P\ returnOk x \\rv s. rv = x \ P s\, \Q\" + by (simp add: valid_def validE_def returnOk_def return_def mres_def) + +\ \For forward reasoning in Hoare proofs, these lemmas allow us to step over the + left-hand-side of monadic bind, while keeping the same precondition.\ + +named_theorems forward_inv_step_rules + +lemmas hoare_forward_inv_step_nobind[forward_inv_step_rules] = + hoare_seq_ext_nobind[where B=A and A=A for A, rotated] + +lemmas hoare_seq_ext_skip[forward_inv_step_rules] = + hoare_seq_ext[where B="\_. A" and A=A for A, rotated] + +lemmas hoare_forward_inv_step_nobindE_valid[forward_inv_step_rules] = + hoare_seq_ext_nobindE[where B=A and A=A and E="\_. C" and C="\_. C" for A C, + simplified validE_eq_valid, rotated] + +lemmas hoare_forward_inv_step_valid[forward_inv_step_rules] = + hoare_vcg_seqE[where B="\_. A" and A=A and E="\_. C" and C="\_. C" for A C, + simplified validE_eq_valid, rotated] + +lemmas hoare_forward_inv_step_nobindE[forward_inv_step_rules] = + hoare_seq_ext_nobindE[where B=A and A=A for A, rotated] + +lemmas hoare_seq_ext_skipE[forward_inv_step_rules] = + hoare_vcg_seqE[where B="\_. A" and A=A for A, rotated] + +lemmas hoare_forward_inv_step_nobindE_validE_E[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where C="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_E[forward_inv_step_rules] = + hoare_seq_ext_skipE[where C="\\", simplified validE_E_def[symmetric]] + +lemmas hoare_forward_inv_step_nobindE_validE_R[forward_inv_step_rules] = + hoare_forward_inv_step_nobindE[where E="\\", simplified validE_R_def[symmetric]] + +lemmas hoare_forward_inv_step_validE_R[forward_inv_step_rules] = + hoare_seq_ext_skipE[where E="\\", simplified validE_R_def[symmetric]] + +method forward_inv_step uses wp simp = + rule forward_inv_step_rules, solves \wpsimp wp: wp simp: simp\ + +end diff --git a/lib/Monads/wp/Eisbach_WP.thy b/lib/Monads/wp/Eisbach_WP.thy index cba0f20dd0..e2ac244ee0 100644 --- a/lib/Monads/wp/Eisbach_WP.thy +++ b/lib/Monads/wp/Eisbach_WP.thy @@ -9,7 +9,7 @@ theory Eisbach_WP imports Eisbach_Tools.Eisbach_Methods - NonDetMonadVCG + Nondet_VCG Eisbach_Tools.Conjuncts Eisbach_Tools.Rule_By_Method WPI diff --git a/lib/Monads/wp/WP-method.ML b/lib/Monads/wp/WP-method.ML index d25d8274cb..67a5f45b4e 100644 --- a/lib/Monads/wp/WP-method.ML +++ b/lib/Monads/wp/WP-method.ML @@ -287,7 +287,9 @@ let val trace' = trace orelse Config.get ctxt WP_Pre.wp_trace val used_thms_ref = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref val rules = get_rules ctxt extras - val wp_pre_tac = TRY (WP_Pre.tac trace' used_thms_ref ctxt 1) + val wp_pre_tac = TRY (WP_Pre.pre_tac trace' ctxt + (Named_Theorems.get ctxt \<^named_theorems>\wp_pre\) + used_thms_ref 1) val wp_fix_tac = TRY (WPFix.both_tac ctxt 1) val cleanup_tac = TRY (REPEAT (resolve_tac ctxt [@{thm TrueI}, @{thm conj_TrueI}, @{thm conj_TrueI2}] 1 diff --git a/lib/Monads/wp/WPBang.thy b/lib/Monads/wp/WPBang.thy index 74bafa697a..3330c59adb 100644 --- a/lib/Monads/wp/WPBang.thy +++ b/lib/Monads/wp/WPBang.thy @@ -8,7 +8,7 @@ theory WPBang imports WP Eisbach_Tools.ProvePart - NonDetMonadVCG + Nondet_VCG begin lemma conj_meta_forward: @@ -20,7 +20,7 @@ ML \ structure WP_Safe = struct fun check_has_frees_tac Ps (_ : int) thm = let - val fs = Term.add_frees (Thm.prop_of thm) [] |> filter (member (=) Ps) + val fs = Term.add_frees (Thm.prop_of thm) [] |> filter (member (op =) Ps) in if null fs then Seq.empty else Seq.single thm end fun wp_bang wp_safe_rules ctxt = let diff --git a/lib/Monads/wp/WPC.thy b/lib/Monads/wp/WPC.thy index cd19db0e4f..417e236ece 100644 --- a/lib/Monads/wp/WPC.thy +++ b/lib/Monads/wp/WPC.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -7,61 +8,64 @@ theory WPC imports "WP_Pre" keywords "wpc_setup" :: thy_decl - begin -definition - wpc_helper :: "(('a \ bool) \ 'b set) - \ (('a \ bool) \ 'b set) \ bool \ bool" where - "wpc_helper \ \(P, P') (Q, Q') R. ((\s. P s \ Q s) \ P' \ Q') \ R" +(* Case splitting method producing independent guards (preconditions) for each case in a + datatype case split. The current setup can handle judgements such as valid, corres, or ccorres + with up to two independent predicate guards and one independent set-type guard. Unneeded guards + can be ignored in setup. + + The helper predicate unifies the treatment of guards in the proof method. The P guards will be + transformed into Q guards in each branch of the case. The R is the judgement (valid, corres, etc). + + The helper predicate encodes that the judgement supports a standard guard weakening rule, + from which rules for conjunction-lifting and forall-lifting follow below. These are then used + by the tactic to generate assumptions of the form "\y. x = SomeConstructor y \ P y". + + If more or other types of guards are needed, add them to the helper predicate and re-prove the + processing rules below. *) +definition wpc_helper :: + "('a \ bool) \ ('b \ bool) \ 'c set \ ('a \ bool) \ ('b \ bool) \ 'c set \ bool \ bool" + where + "wpc_helper \ \(P, P', P'') (Q, Q', Q'') R. + (\s. P s \ Q s) \ (\s. P' s \ Q' s) \ P'' \ Q'' \ R" lemma wpc_conj_process: - "\ wpc_helper (P, P') (A, A') C; wpc_helper (P, P') (B, B') D \ - \ wpc_helper (P, P') (\s. A s \ B s, A' \ B') (C \ D)" + "\ wpc_helper (P, P', P'') (A, A', A'') C; wpc_helper (P, P', P'') (B, B', B'') D \ + \ wpc_helper (P, P', P'') (\s. A s \ B s, \s. A' s \ B' s, A'' \ B'') (C \ D)" by (clarsimp simp add: wpc_helper_def) lemma wpc_all_process: - "\ \x. wpc_helper (P, P') (Q x, Q' x) (R x) \ - \ wpc_helper (P, P') (\s. \x. Q x s, {s. \x. s \ Q' x}) (\x. R x)" + "\ \x. wpc_helper (P, P', P'') (Q x, Q' x, Q'' x) (R x) \ + \ wpc_helper (P, P', P'') (\s. \x. Q x s, \s. \x. Q' x s, {s. \x. s \ Q'' x}) (\x. R x)" by (clarsimp simp: wpc_helper_def subset_iff) lemma wpc_all_process_very_weak: - "\ \x. wpc_helper (P, P') (Q, Q') (R x) \ \ wpc_helper (P, P') (Q, Q') (\x. R x)" + "\ \x. wpc_helper (P, P', P'') (Q, Q', Q'') (R x) \ + \ wpc_helper (P, P', P'') (Q, Q', Q'') (\x. R x)" by (clarsimp simp: wpc_helper_def) lemma wpc_imp_process: - "\ Q \ wpc_helper (P, P') (R, R') S \ - \ wpc_helper (P, P') (\s. Q \ R s, {s. Q \ s \ R'}) (Q \ S)" + "\ Q \ wpc_helper (P, P', P'') (R, R', R'') S \ + \ wpc_helper (P, P', P'') (\s. Q \ R s, \s. Q \ R' s, {s. Q \ s \ R''}) (Q \ S)" by (clarsimp simp add: wpc_helper_def subset_iff) lemma wpc_imp_process_weak: - "\ wpc_helper (P, P') (R, R') S \ \ wpc_helper (P, P') (R, R') (Q \ S)" + "\ wpc_helper (P, P', P'') (R, R', R'') S \ \ wpc_helper (P, P', P'') (R, R', R'') (Q \ S)" by (clarsimp simp add: wpc_helper_def) -lemmas wpc_processors - = wpc_conj_process wpc_all_process wpc_imp_process -lemmas wpc_weak_processors - = wpc_conj_process wpc_all_process wpc_imp_process_weak -lemmas wpc_vweak_processors - = wpc_conj_process wpc_all_process_very_weak wpc_imp_process_weak +lemmas wpc_processors = wpc_conj_process wpc_all_process wpc_imp_process +lemmas wpc_weak_processors = wpc_conj_process wpc_all_process wpc_imp_process_weak +lemmas wpc_vweak_processors = wpc_conj_process wpc_all_process_very_weak wpc_imp_process_weak lemma wpc_helperI: - "wpc_helper (P, P') (P, P') Q \ Q" + "wpc_helper (P, P', P'') (P, P', P'') Q \ Q" by (simp add: wpc_helper_def) lemma wpc_foo: "\ undefined x; False \ \ P x" by simp -lemma foo: - assumes foo_elim: "\P Q h. \ foo Q h; \s. P s \ Q s \ \ foo P h" - shows - "\ \x. foo (Q x) (f x); foo R g \ \ - foo (\s. (\x. Q x s) \ (y = None \ R s)) - (case y of Some x \ f x | None \ g)" - by (auto split: option.split intro: foo_elim) - ML \ - signature WPC = sig exception WPCFailed of string * term list * thm list; @@ -176,13 +180,9 @@ let val subst = split RS iffd2_thm; val subst2 = instantiate_concl_pred ctxt pred subst; in - (resolve_tac ctxt [subst2]) - THEN' - (resolve_tac ctxt [wpc_helperI]) - THEN' - (REPEAT_ALL_NEW (resolve_tac ctxt processors) - THEN_ALL_NEW - resolve_single_tac ctxt [fin]) + resolve_tac ctxt [subst2] + THEN' resolve_tac ctxt [wpc_helperI] + THEN' (REPEAT_ALL_NEW (resolve_tac ctxt processors) THEN_ALL_NEW resolve_single_tac ctxt [fin]) end; (* n.b. need to concretise the lazy sequence via a list to ensure exceptions @@ -213,67 +213,87 @@ end; val _ = Outer_Syntax.command @{command_keyword "wpc_setup"} - "Add wpc stuff" + "Add new WPC term and helper rule" (P.term -- P.name >> (fn (tm, thm) => Toplevel.local_theory NONE NONE (add_wpc tm thm))) end; end; - \ ML \ - -val wp_cases_tactic_weak = WeakestPreCases.wp_cases_tac @{thms wpc_weak_processors}; +val wp_cases_tactic_weak = WeakestPreCases.wp_cases_tac @{thms wpc_weak_processors}; val wp_cases_method_strong = WeakestPreCases.wp_cases_method @{thms wpc_processors}; val wp_cases_method_weak = WeakestPreCases.wp_cases_method @{thms wpc_weak_processors}; val wp_cases_method_vweak = WeakestPreCases.wp_cases_method @{thms wpc_vweak_processors}; - \ +(* Main proof methods: *) method_setup wpc0 = \wp_cases_method_strong\ "case splitter for weakest-precondition proofs" method_setup wpcw0 = \wp_cases_method_weak\ "weak-form case splitter for weakest-precondition proofs" +(* Instances specifically for wp (introducing schematic guards automatically): *) method wpc = (wp_pre, wpc0) method wpcw = (wp_pre, wpcw0) -definition - wpc_test :: "'a set \ ('a \ 'b) set \ 'b set \ bool" - where - "wpc_test P R S \ (R `` P) \ S" +(* Test and example *) +experiment +begin +(* Assume some kind of judgement wpc_test with a precondition P of type set and a + precondition Q of type 'a \ bool: *) +definition wpc_test :: "'a set \ ('a \ bool) \ ('a \ 'b) set \ 'b set \ bool" where + "wpc_test P Q R S \ (R `` P) \ S" + +(* Weakening rule to introduce schematics for the two guards *) lemma wpc_test_weaken: - "\ wpc_test Q R S; P \ Q \ \ wpc_test P R S" + "\ wpc_test Q X' R S; P \ Q; \s. X s \ X' s \ \ wpc_test P X R S" by (simp add: wpc_test_def, blast) -lemma wpc_helper_validF: - "wpc_test Q' R S \ wpc_helper (P, P') (Q, Q') (wpc_test P' R S)" - by (simp add: wpc_test_def wpc_helper_def, blast) +(* Setup rule, establishes connection between wpc_helper and judgment wpc_test. The precondition has + the judgement with transformed (Q) guards, the conclusion has the helper predicate with the + judgement applied to the original (P) guards. The guard arguments of wpc_helper must be in the + form below (no arguments or patterns) for the method to work properly. -setup \ -let - val tm = Thm.cterm_of @{context} (Logic.varify_global @{term "\R. wpc_test P R S"}); - val thm = @{thm wpc_helper_validF}; -in - WPCPredicateAndFinals.map (fn xs => (tm, thm) :: xs) -end -\ + Note that this example ignores the first predicate guard P, and only uses P'/P''. Use/leave out + guards as needed. *) +lemma wpc_helper_validF: + "wpc_test Q'' Q' R S \ wpc_helper (P, P', P'') (Q, Q', Q'') (wpc_test P'' P' R S)" + by (simp add: wpc_test_def wpc_helper_def) blast -lemma set_conj_Int_simp: - "{s \ S. P s} = S \ {s. P s}" - by auto +(* Set up the proof method for wpc_test. First parameter is a function that takes the argument + position on which the case split happens (here R) and returns the judgement. Second parameter + is the setup rule. *) +wpc_setup "\R. wpc_test P X R S" wpc_helper_validF +(* Demo for weak form (wpcw), produces a separate guard for each branch, no implications. *) lemma case_options_weak_wp: - "\ wpc_test P R S; \x. wpc_test P' (R' x) S \ - \ wpc_test (P \ P') (case opt of None \ R | Some x \ R' x) S" + "\ wpc_test P X R S; \x. wpc_test P' X' (R' x) S \ + \ wpc_test (P \ P') (\s. X s \ X' s) (case opt of None \ R | Some x \ R' x) S" apply (rule wpc_test_weaken) - apply wpcw + apply wpcw + apply assumption apply assumption - apply assumption + apply simp apply simp done +(* Demo for strong form (wpc), produces a separate guard for each branch with implications. *) +lemma + "\ wpc_test P X R S; \x. wpc_test (P' x) (X' x) (R' x) S \ + \ wpc_test (P \ {s. \x. opt = Some x \ s \ P' x}) + (\s. X s \ (\x. X' x s)) + (case opt of None \ R | Some x \ R' x) S" + apply (rule wpc_test_weaken) + apply wpc + apply assumption + apply assumption + apply fastforce + apply clarsimp + done + +end end diff --git a/lib/Monads/wp/WPEx.thy b/lib/Monads/wp/WPEx.thy index d24804f13b..877b2ab3a9 100644 --- a/lib/Monads/wp/WPEx.thy +++ b/lib/Monads/wp/WPEx.thy @@ -6,8 +6,8 @@ theory WPEx imports - In_Monad - NonDetMonadVCG + Nondet_In_Monad + Nondet_VCG Strengthen begin diff --git a/lib/Monads/wp/WPI.thy b/lib/Monads/wp/WPI.thy index 9d6b2ea219..3b67c283ed 100644 --- a/lib/Monads/wp/WPI.thy +++ b/lib/Monads/wp/WPI.thy @@ -33,7 +33,7 @@ theory WPI imports Eisbach_Tools.Eisbach_Methods - NonDetMonadLemmas + Nondet_Lemmas WPEx begin diff --git a/lib/Monads/wp/WP_Pre.thy b/lib/Monads/wp/WP_Pre.thy index aa997af361..827771e642 100644 --- a/lib/Monads/wp/WP_Pre.thy +++ b/lib/Monads/wp/WP_Pre.thy @@ -1,4 +1,5 @@ (* + * Copyright 2023, Proofcraft Pty Ltd * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * * SPDX-License-Identifier: BSD-2-Clause @@ -11,8 +12,6 @@ imports "HOL-Eisbach.Eisbach_Tools" begin -named_theorems wp_pre - ML \ structure WP_Pre = struct @@ -51,21 +50,22 @@ fun pre_tac trace ctxt pre_rules used_thms_ref i t = let then Seq.empty else Seq.single t2 end handle Option => Seq.empty -fun tac trace used_thms_ref ctxt = let - val pres = Named_Theorems.get ctxt @{named_theorems wp_pre} - in pre_tac trace ctxt pres used_thms_ref end - val method = let val used_thms_ref = Unsynchronized.ref [] : (string * string * term) list Unsynchronized.ref in - Args.context >> (fn _ => fn ctxt => - Method.SIMPLE_METHOD' (tac (Config.get ctxt wp_trace) used_thms_ref ctxt)) + Attrib.thms >> (fn thms => fn ctxt => + Method.SIMPLE_METHOD' (pre_tac (Config.get ctxt wp_trace) ctxt thms used_thms_ref)) end end \ -method_setup wp_pre0 = \WP_Pre.method\ +(* This method takes a list of theorems as parameter. + See wp_pre definition below for an example use. *) +method_setup pre_tac = \WP_Pre.method\ + +named_theorems wp_pre +method wp_pre0 = pre_tac wp_pre method wp_pre = wp_pre0? definition diff --git a/lib/NonDetMonadLemmaBucket.thy b/lib/NonDetMonadLemmaBucket.thy index 8c9315c1d8..f89dd7f5e1 100644 --- a/lib/NonDetMonadLemmaBucket.thy +++ b/lib/NonDetMonadLemmaBucket.thy @@ -8,18 +8,18 @@ theory NonDetMonadLemmaBucket imports Lib - Monads.More_NonDetMonadVCG + Monads.Nondet_More_VCG Monad_Lists - Monads.Monad_Equations + Monads.Nondet_Monad_Equations Monad_Commute - Monads.No_Fail - Monads.No_Throw + Monads.Nondet_No_Fail + Monads.Nondet_No_Throw CutMon Oblivious Injection_Handler - Monads.WhileLoopRulesCompleteness + Monads.Nondet_While_Loop_Rules_Completeness "Word_Lib.Distinct_Prop" (* for distinct_tuple_helper *) - Monads.OptionMonadWP + Monads.Reader_Option_VCG begin lemma distinct_tuple_helper: diff --git a/lib/Oblivious.thy b/lib/Oblivious.thy index 8618f44c58..7cc15fa478 100644 --- a/lib/Oblivious.thy +++ b/lib/Oblivious.thy @@ -12,8 +12,8 @@ theory Oblivious imports - Monads.In_Monad - Monads.NonDetMonadVCG + Monads.Nondet_In_Monad + Monads.Nondet_VCG begin diff --git a/lib/Qualify.thy b/lib/Qualify.thy index f8353cf152..4e73330173 100644 --- a/lib/Qualify.thy +++ b/lib/Qualify.thy @@ -110,7 +110,7 @@ val _ = Toplevel.theory (set_global_qualify {name = str, target_name = case target of SOME (nm, _) => nm | _ => str}))); fun syntax_alias global_alias local_alias b name = - Local_Theory.declaration {syntax = true, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = true, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); diff --git a/lib/ROOT b/lib/ROOT index 687e845c0a..9ff4b9c4a1 100644 --- a/lib/ROOT +++ b/lib/ROOT @@ -34,6 +34,7 @@ session Lib (lib) = Word_Lib + Crunch_Instances_Trace StateMonad Corres_UL + Corres_Method Find_Names LemmaBucket Try_Methods @@ -58,7 +59,7 @@ session Lib (lib) = Word_Lib + DataMap FastMap RangeMap - Corres_Method + CorresK_Method DetWPLib Guess_ExI GenericTag @@ -103,6 +104,7 @@ session LibTest (lib) in test = Refine + ASpec ExecSpec theories + Corres_Test Crunch_Test_NonDet Crunch_Test_Qualified_NonDet Crunch_Test_Qualified_Trace @@ -122,9 +124,10 @@ session LibTest (lib) in test = Refine + Value_Type_Test Named_Eta_Test Rules_Tac_Test + MonadicRewrite_Test (* use virtual memory function as an example, only makes sense on ARM: *) theories [condition = "L4V_ARCH_IS_ARM"] - Corres_Test + CorresK_Test session SepTactics (lib) in Hoare_Sep_Tactics = Sep_Algebra + theories diff --git a/lib/Requalify.thy b/lib/Requalify.thy index 34facce853..b06fdb3719 100644 --- a/lib/Requalify.thy +++ b/lib/Requalify.thy @@ -49,7 +49,7 @@ in end fun syntax_alias global_alias local_alias b (name : string) = - Local_Theory.declaration {syntax = false, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = false, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); diff --git a/lib/SubMonadLib.thy b/lib/SubMonadLib.thy index fe30d2ef48..3546b9e157 100644 --- a/lib/SubMonadLib.thy +++ b/lib/SubMonadLib.thy @@ -6,7 +6,7 @@ theory SubMonadLib imports - Monads.Empty_Fail + Monads.Nondet_Empty_Fail Corres_UL begin diff --git a/lib/Word_Lib/Bitwise.thy b/lib/Word_Lib/Bitwise.thy index 4c5ffdfbeb..55e363a291 100644 --- a/lib/Word_Lib/Bitwise.thy +++ b/lib/Word_Lib/Bitwise.thy @@ -365,7 +365,7 @@ lemma upt_eq_list_intros: by (simp_all add: upt_eq_Cons_conv) -subsection \Tactic definition\ +text \Tactic definition\ lemma if_bool_simps: "If p True y = (p \ y) \ If p False y = (\ p \ y) \ diff --git a/lib/Word_Lib/More_Word.thy b/lib/Word_Lib/More_Word.thy index e4823f5085..6dd119ef44 100644 --- a/lib/Word_Lib/More_Word.thy +++ b/lib/Word_Lib/More_Word.thy @@ -1872,14 +1872,14 @@ lemma nth_0: "\ bit (0 :: 'a::len word) n" lemma nth_minus1: "bit (-1 :: 'a::len word) n \ n < LENGTH('a)" by transfer simp -lemma nth_ucast: +lemma nth_ucast_weak: "bit (ucast w::'a::len word) n = (bit w n \ n < LENGTH('a))" by transfer (simp add: bit_take_bit_iff ac_simps) -lemma drop_bit_numeral_bit0_1 [simp]: - \drop_bit (Suc 0) (numeral k) = - (word_of_int (drop_bit (Suc 0) (take_bit LENGTH('a) (numeral k))) :: 'a::len word)\ - by (metis Word_eq_word_of_int drop_bit_word.abs_eq of_int_numeral) +lemma nth_ucast: + "bit (ucast (w::'a::len word)::'b::len word) n = + (bit w n \ n < min LENGTH('a) LENGTH('b))" + by (auto simp: not_le nth_ucast_weak dest: bit_imp_le_length) lemma nth_mask: \bit (mask n :: 'a::len word) i \ i < n \ i < size (mask n :: 'a word)\ diff --git a/lib/Word_Lib/More_Word_Operations.thy b/lib/Word_Lib/More_Word_Operations.thy index 820adfaf8b..4cd8f6a1ca 100644 --- a/lib/Word_Lib/More_Word_Operations.thy +++ b/lib/Word_Lib/More_Word_Operations.thy @@ -302,13 +302,21 @@ lemma alignUp_not_aligned_eq: and sz: "n < LENGTH('a)" shows "alignUp a n = (a div 2 ^ n + 1) * 2 ^ n" proof - + from \n < LENGTH('a)\ have \(2::int) ^ n < 2 ^ LENGTH('a)\ + by simp + with take_bit_int_less_exp [of n] + have *: \take_bit n k < 2 ^ LENGTH('a)\ for k :: int + by (rule less_trans) have anz: "a mod 2 ^ n \ 0" by (rule not_aligned_mod_nz) fact+ - - then have um: "unat (a mod 2 ^ n - 1) div 2 ^ n = 0" using sz - by (meson Euclidean_Division.div_eq_0_iff le_m1_iff_lt measure_unat order_less_trans - unat_less_power word_less_sub_le word_mod_less_divisor) - + then have um: "unat (a mod 2 ^ n - 1) div 2 ^ n = 0" + apply (transfer fixing: n) using sz + apply (simp flip: take_bit_eq_mod add: div_eq_0_iff) + apply (subst take_bit_int_eq_self) + using * + apply (auto simp add: diff_less_eq intro: less_imp_le) + apply (simp add: less_le) + done have "a + 2 ^ n - 1 = (a div 2 ^ n) * 2 ^ n + (a mod 2 ^ n) + 2 ^ n - 1" by (simp add: word_mod_div_equality) also have "\ = (a mod 2 ^ n - 1) + (a div 2 ^ n + 1) * 2 ^ n" diff --git a/lib/Word_Lib/Signed_Division_Word.thy b/lib/Word_Lib/Signed_Division_Word.thy index 14a7ab54cf..f5d9445df5 100644 --- a/lib/Word_Lib/Signed_Division_Word.thy +++ b/lib/Word_Lib/Signed_Division_Word.thy @@ -10,6 +10,12 @@ theory Signed_Division_Word imports "HOL-Library.Signed_Division" "HOL-Library.Word" begin +text \ + The following specification of division follows ISO C99, which in turn adopted the typical + behavior of hardware modern in the beginning of the 1990ies. + The underlying integer division is named ``T-division'' in \cite{leijen01}. +\ + instantiation word :: (len) signed_division begin diff --git a/lib/Word_Lib/Word_Lemmas.thy b/lib/Word_Lib/Word_Lemmas.thy index 354d430417..01662c2f81 100644 --- a/lib/Word_Lib/Word_Lemmas.thy +++ b/lib/Word_Lib/Word_Lemmas.thy @@ -153,8 +153,8 @@ lemma sshiftr_n1: "-1 >>> n = -1" lemma nth_sshiftr: "bit (w >>> m) n = (n < size w \ (if n + m \ size w then bit w (size w - 1) else bit w (n + m)))" - apply (clarsimp simp add: bit_simps word_size ac_simps not_less) - apply (metis add.commute bit_imp_le_length bit_shiftr_word_iff le_diff_conv not_le) + apply (auto simp add: bit_simps word_size ac_simps not_less) + apply (meson bit_imp_le_length bit_shiftr_word_iff leD) done lemma sshiftr_numeral: @@ -508,8 +508,9 @@ next also have \\ \ unat x < 2 ^ n div 2 ^ y\ using * by (simp add: less_le) finally show ?thesis - using that \x \ 0\ by (simp flip: push_bit_eq_mult drop_bit_eq_div - add: shiftr_def shiftl_def unat_drop_bit_eq word_less_iff_unsigned [where ?'a = nat]) + using that \x \ 0\ + by (simp flip: push_bit_eq_mult drop_bit_eq_div + add: shiftr_def shiftl_def unat_drop_bit_eq word_less_iff_unsigned [where ?'a = nat]) qed qed qed @@ -716,7 +717,8 @@ lemma word_and_notzeroD: lemma shiftr_le_0: "unat (w::'a::len word) < 2 ^ n \ w >> n = (0::'a::len word)" by (auto simp add: take_bit_word_eq_self_iff word_less_nat_alt shiftr_def - simp flip: take_bit_eq_self_iff_drop_bit_eq_0 intro: ccontr) + simp flip: take_bit_eq_self_iff_drop_bit_eq_0 + intro: ccontr) lemma of_nat_shiftl: "(of_nat x << n) = (of_nat (x * 2 ^ n) :: ('a::len) word)" @@ -1466,9 +1468,9 @@ lemma mask_shift_sum: "\ a \ b; unat n = unat (p AND mask b) \ \ (p AND NOT(mask a)) + (p AND mask a >> b) * (1 << b) + n = (p :: 'a :: len word)" apply (simp add: shiftl_def shiftr_def flip: push_bit_eq_mult take_bit_eq_mask word_unat_eq_iff) - apply (subst disjunctive_add, clarsimp simp add: bit_simps)+ + apply (subst disjunctive_add, fastforce simp: bit_simps)+ apply (rule bit_word_eqI) - apply (auto simp add: bit_simps) + apply (fastforce simp: bit_simps)[1] done lemma is_up_compose: @@ -1583,10 +1585,7 @@ next apply (rule impI) apply (subst bit_eq_iff) apply (simp add: bit_take_bit_iff bit_signed_take_bit_iff min_def) - apply (auto simp add: Suc_le_eq) - using less_imp_le_nat apply blast - using less_imp_le_nat apply blast - done + by (auto simp add: Suc_le_eq) (meson dual_order.strict_iff_not)+ qed lemma scast_ucast_mask_compare: @@ -1820,11 +1819,7 @@ proof (rule classical) apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] apply (clarsimp simp: word_size) apply (insert sdiv_int_range [where a="sint a" and b="sint b"])[1] - apply auto - apply (cases \size a\) - apply simp_all - apply (smt (z3) One_nat_def diff_Suc_1 signed_word_eqI sint_int_min sint_range_size wsst_TYs(3)) - done + by (smt (verit, best) One_nat_def signed_word_eqI sint_greater_eq sint_int_min sint_less wsst_TYs(3)) have result_range_simple: "(sint a sdiv sint b \ ?range) \ ?thesis" apply (insert sdiv_int_range [where a="sint a" and b="sint b"]) diff --git a/lib/Word_Lib/Word_Lemmas_Internal.thy b/lib/Word_Lib/Word_Lemmas_Internal.thy index 2f652bf30c..367d0c137b 100644 --- a/lib/Word_Lib/Word_Lemmas_Internal.thy +++ b/lib/Word_Lib/Word_Lemmas_Internal.thy @@ -772,4 +772,11 @@ lemma aligned_mask_le_mask_minus: by (metis and_mask_less' is_aligned_after_mask is_aligned_neg_mask_eq' mask_2pm1 mask_sub neg_mask_mono_le word_less_sub_le) +lemma shiftr_anti_mono: + "m \ n \ w >> n \ w >> m" for w :: "'a::len word" + apply transfer + apply (simp add: take_bit_drop_bit) + apply (simp add: drop_bit_eq_div zdiv_mono2) + done + end diff --git a/lib/Word_Lib/Word_Lib_Sumo.thy b/lib/Word_Lib/Word_Lib_Sumo.thy index c2eef56648..b12d5acdf5 100644 --- a/lib/Word_Lib/Word_Lib_Sumo.thy +++ b/lib/Word_Lib/Word_Lib_Sumo.thy @@ -131,10 +131,4 @@ notation (input) lemmas cast_simps = cast_simps ucast_down_bl -(* shadows the slightly weaker Word.nth_ucast *) -lemma nth_ucast: - "(ucast (w::'a::len word)::'b::len word) !! n = - (w !! n \ n < min LENGTH('a) LENGTH('b))" - by (auto simp: not_le dest: bit_imp_le_length) - end diff --git a/lib/clib/CCorresLemmas.thy b/lib/clib/CCorresLemmas.thy index 3618371aee..8f712fb50a 100644 --- a/lib/clib/CCorresLemmas.thy +++ b/lib/clib/CCorresLemmas.thy @@ -518,7 +518,7 @@ lemma lift_t_super_update: and eu: "export_uinfo s = typ_uinfo_t TYPE('b)" and lp: "lift_t g (h, d) p = Some v'" shows "lift_t g (heap_update (Ptr &(p\f)) v h, d) - = lift_t g (h, d)(p \ field_update (field_desc s) (to_bytes_p v) v')" + = (lift_t g (h, d)) (p \ field_update (field_desc s) (to_bytes_p v) v')" using fl eu lp apply - apply (rule trans [OF lift_t_super_field_update super_field_update_lookup]) @@ -630,6 +630,13 @@ lemma ccorres_liftE: by (fastforce split: xstate.splits simp: liftE_def ccorres_underlying_def bind_def' return_def unif_rrel_def) +lemma ccorres_liftE': + fixes \ + assumes cc: "ccorresG sr \ (r \ Inr) xf P P' hs a c" + shows "ccorresG sr \ r xf P P' hs (liftE a) c" + using cc + by (auto intro!: ccorres_liftE cong: ccorres_context_cong) + lemma ccorres_if_bind: "ccorres_underlying sr Gamm r xf arrel axf G G' hs (if a then (b >>= f) else (c >>= f)) d \ ccorres_underlying sr Gamm r xf arrel axf G G' hs ((if a then b else c) >>= f) d" @@ -871,9 +878,9 @@ proof - qed thus ?thesis using lxs j pn apply (auto simp: init_xs_def word_less_nat_alt neq_Nil_conv unat_word_ariths unat_of_nat push_mods - simp del: unsigned_of_nat elim!: ccorres_guard_imp2 - dest!: spec[where x=Nil]) + dest!: spec[where x=Nil] + cong: ccorres_all_cong) done qed @@ -1151,4 +1158,23 @@ qed lemmas ccorres_While' = ccorres_While[where C'=UNIV, simplified] + +\ \simp rules for rewriting common patterns in the return relations\ +lemma ccorres_dc_o_simp[simp]: + "ccorres_underlying srel \ (dc \ f) xf ar axf P Q hs m c + = ccorres_underlying srel \ dc xf ar axf P Q hs m c" + "ccorres_underlying srel \ r xf (dc \ f) axf P Q hs m c + = ccorres_underlying srel \ r xf dc axf P Q hs m c" + by (simp cong: ccorres_all_cong)+ + +lemma ccorres_inl_rrel_inl_rrel[simp]: + "ccorres_underlying srel \ r xf (inl_rrel (inl_rrel ar)) axf P Q hs m c + = ccorres_underlying srel \ r xf (inl_rrel ar) axf P Q hs m c" + by (simp add: inl_rrel_inl_rrel cong: ccorres_all_cong)+ + +lemma ccorres_inr_rrel_Inr[simp]: + "ccorres_underlying srel \ (inr_rrel r \ Inr) xf ar axf P Q hs m c + = ccorres_underlying srel \ r xf ar axf P Q hs m c" + by (simp cong: ccorres_context_cong)+ + end diff --git a/lib/clib/CCorres_Rewrite.thy b/lib/clib/CCorres_Rewrite.thy index 86455e345b..6fd0f053f7 100644 --- a/lib/clib/CCorres_Rewrite.thy +++ b/lib/clib/CCorres_Rewrite.thy @@ -17,7 +17,7 @@ lemma ccorres_com_eq_hom: elim: ccorres_semantic_equivD2) method ccorres_rewrite declares C_simp C_simp_pre C_simp_simps C_simp_throws - = simpl_rewrite hom: ccorres_com_eq_hom + = (simpl_rewrite hom: ccorres_com_eq_hom, no_name_eta) lemma hoarep_com_eq_hom: "com_eq_hom \ (\c. hoarep \ {} F P c Q A)" diff --git a/lib/clib/Corres_UL_C.thy b/lib/clib/Corres_UL_C.thy index a8ab1afb83..2a5c678be8 100644 --- a/lib/clib/Corres_UL_C.thy +++ b/lib/clib/Corres_UL_C.thy @@ -14,7 +14,7 @@ imports CParser.LemmaBucket_C Lib.LemmaBucket SIMPL_Lemmas - Monads.OptionMonadWP + Monads.Reader_Option_VCG begin declare word_neq_0_conv [simp del] @@ -48,7 +48,7 @@ lemma exec_handlers_use_hoare_nothrow: apply - apply (drule hoare_sound) apply (clarsimp elim: exec_Normal_elim_cases - simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (erule exec_handlers.cases) apply clarsimp apply (drule spec, drule spec, drule (1) mp) @@ -1045,7 +1045,7 @@ lemma ccorres_liftM_simp [simp]: apply (rule ccorresI') apply simp apply (erule (5) ccorresE) - apply (simp add: liftM_def NonDetMonad.bind_def return_def) + apply (simp add: liftM_def Nondet_Monad.bind_def return_def) apply (erule bexI [rotated]) apply (simp add: unif_rrel_def split: if_split_asm) done @@ -1719,4 +1719,110 @@ lemma ccorres_grab_asm: ccorres_underlying sr G rr xf ar ax (P and K Q) P' hs f g" by (fastforce simp: ccorres_underlying_def) + +\ \ An experimental cong rule for rewriting everywhere reasonable, with full context. + Can cause problems when there are schematic variables or when one of the return relations + takes a pair as a parameter. \ +lemma ccorres_context_cong_helper': + assumes c: "ccorres_underlying sr \ r xf ar axf P Q hs a c" + assumes "\s. P s = P' s" + \ \Don't use membership equality when rewriting Q, as the LHS can be simplified into something + that is unable to unify with the RHS. + assumes "\s s'. \ (s,s') \ sr; P' s \ \ s' \ Q = (s' \ Q')"\ + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "hs = hs'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ semantic_equiv \ s' s'' c c'" + assumes "\s s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + xf t' = xf' t'" + assumes "\x s t s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); + \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + r x (xf' t') = r' x (xf' t')" + assumes "\s s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; n \ size hs'; \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + axf t' = axf' t'" + assumes "\x s t s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); n \ size hs'; + \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + ar x (axf' t') = ar' x (axf' t')" + shows "ccorres_underlying sr \ r' xf' ar' axf' P' Q' hs' a' c'" + using c + apply - + apply (rule ccorresI') + apply (erule (1) ccorresE) + apply (force simp: assms) + apply (force simp: assms) + apply (force simp: assms) + apply (clarsimp simp: assms) + apply (erule exec_handlers_semantic_equivD2) + apply (force simp: assms) + apply (fastforce simp: unif_rrel_def assms) + done + +lemma ccorres_context_cong_helper: + assumes "\s. P s = P' s" + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "hs = hs'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ semantic_equiv \ s' s'' c c'" + assumes "\s s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + xf t' = xf' t'" + assumes "\x s t s' t'. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); + \ \\<^sub>h \c' # hs', s'\ \ (size hs', Normal t') \ \ + r x (xf' t') = r' x (xf' t')" + assumes "\s s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; n \ size hs'; \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + axf t' = axf' t'" + assumes "\x s t s' t' n. + \ (s, s') \ sr; P' s; s' \ Q'; (t, t') \ sr; (x, t) \ fst (a' s); n \ size hs'; + \ \\<^sub>h \c' # hs', s'\ \ (n, Normal t') \ \ + ar x (axf' t') = ar' x (axf' t')" + shows "ccorres_underlying sr \ r xf ar axf P Q hs a c + = ccorres_underlying sr \ r' xf' ar' axf' P' Q' hs' a' c'" + using assms + apply - + apply rule + apply (erule ccorres_context_cong_helper'; assumption) + apply (erule ccorres_context_cong_helper') + by (fastforce simp: semantic_equiv_sym exec_handlers_semantic_equiv[where a=c and b=c'])+ + +lemmas ccorres_context_cong = ccorres_context_cong_helper[OF _ _ _ _ semantic_equivI] + +\ \ Only rewrite guards, the handler stack and function bodies, with context. + This is often more useful, as we generally want the return relations and extraction + functions to be stable while working with a ccorres_underlying statement. \ +lemma ccorres_context_weak_cong: + assumes "\s. P s = P' s" + assumes "\s s'. \ (s, s') \ sr; P' s \ \ Q = Q'" + assumes "\s s'. \ (s, s') \ sr; P' s; s' \ Q' \ \ a s = a' s" + assumes "\s s' s''. \ (s, s') \ sr; P' s; s' \ Q' \ \ \\ \c,Normal s'\ \ s'' = \\ \c',Normal s'\ \ s''" + shows "ccorres_underlying sr \ r xf ar axf P Q hs a c + = ccorres_underlying sr \ r xf ar axf P' Q' hs a' c'" + by (clarsimp simp: assms cong: ccorres_context_cong) + +\ \ Even more restrictive: only rewrite the abstract monad. \ +lemma ccorres_abstract_cong: + "\ \s s'. \ (s, s') \ sr; P s ; s' \ P' \ \ a s = b s \ \ + ccorres_underlying sr G r xf ar axf P P' hs a c + = ccorres_underlying sr G r xf ar axf P P' hs b c" + by (clarsimp cong: ccorres_context_weak_cong) + +\ \ Rewrite almost everywhere, without context. This should behave the same as with normal + term rewriting with no cong rule, except it will not rewrite the state relation or function + environment. \ +lemma ccorres_all_cong: + "\ r=r'; xf=xf'; ar=ar'; axf=axf'; P=P'; Q=Q'; hs=hs'; m=m'; c=c' \ \ + ccorres_underlying srel \ r xf ar axf P Q hs m c + = ccorres_underlying srel \ r' xf' ar' axf' P' Q' hs' m' c'" + by (simp cong: ccorres_context_cong) + +\ \ Only rewrite guards, the handler stack and function bodies, without context. + We make this the default behaviour, so that the the return relations and extraction + functions are stable under simplification. \ +lemmas ccorres_weak_cong = ccorres_all_cong[OF refl refl refl refl, cong] + end diff --git a/lib/concurrency/Atomicity_Lib.thy b/lib/concurrency/Atomicity_Lib.thy index 728dbdac88..8f50ac0e02 100644 --- a/lib/concurrency/Atomicity_Lib.thy +++ b/lib/concurrency/Atomicity_Lib.thy @@ -5,8 +5,9 @@ *) theory Atomicity_Lib -imports "Prefix_Refinement" - +imports + Prefix_Refinement + Monads.Trace_Det begin text \This library introduces a number of proofs about the question of @@ -186,7 +187,7 @@ lemma repeat_n_nothing: lemma repeat_nothing: "repeat (\_. {}) = return ()" by (simp add: repeat_def bind_def select_def repeat_n_nothing - Sigma_def if_fun_lift UN_If_distrib return_def + Sigma_def if_distribR UN_If_distrib return_def cong del: image_cong_simp) lemma detrace_env_steps: diff --git a/lib/concurrency/Prefix_Refinement.thy b/lib/concurrency/Prefix_Refinement.thy index 336f673b89..20f0e5bf9b 100644 --- a/lib/concurrency/Prefix_Refinement.thy +++ b/lib/concurrency/Prefix_Refinement.thy @@ -7,8 +7,6 @@ theory Prefix_Refinement imports Triv_Refinement - "Monads.TraceMonadLemmas" - begin section \Definition of prefix fragment refinement.\ @@ -1242,7 +1240,7 @@ lemma prefix_refinement_repeat: apply simp apply (rule prefix_refinement_repeat_n, assumption+) apply (rule validI_weaken_pre, assumption, simp) - apply (wp select_wp) + apply wp apply wp apply clarsimp apply clarsimp diff --git a/lib/concurrency/Triv_Refinement.thy b/lib/concurrency/Triv_Refinement.thy index 85ef2fd96f..b7dcb869b4 100644 --- a/lib/concurrency/Triv_Refinement.thy +++ b/lib/concurrency/Triv_Refinement.thy @@ -6,8 +6,8 @@ theory Triv_Refinement imports - "Monads.TraceMonadVCG" - "Monads.Strengthen" + "Monads.Trace_RG" + "Monads.Trace_Strengthen_Setup" begin @@ -23,7 +23,7 @@ lemma triv_refinement_mono_bind: "(\x. triv_refinement (b x) (d x)) \ triv_refinement (a >>= b) (a >>= d)" apply (simp add: triv_refinement_def bind_def) apply (intro allI UN_mono; simp) - apply (simp only: triv_refinement_def bind_def2 split_def) + apply (simp only: triv_refinement_def bind_def' split_def) apply (intro Un_mono allI order_refl UN_mono image_mono) apply simp done diff --git a/lib/defs.ML b/lib/defs.ML index 324867ed36..57e72712bd 100644 --- a/lib/defs.ML +++ b/lib/defs.ML @@ -29,7 +29,7 @@ val opt_unchecked_overloaded = @{keyword "overloaded"} >> K (false, true)) --| @{keyword ")"})) (false, false); fun syntax_alias global_alias local_alias b name = - Local_Theory.declaration {syntax = true, pervasive = true} (fn phi => + Local_Theory.declaration {syntax = true, pos = Position.none, pervasive = true} (fn phi => let val b' = Morphism.binding phi b in Context.mapping (global_alias b' name) (local_alias b' name) end); diff --git a/lib/test/CorresK_Test.thy b/lib/test/CorresK_Test.thy new file mode 100644 index 0000000000..c13943c6c4 --- /dev/null +++ b/lib/test/CorresK_Test.thy @@ -0,0 +1,409 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Test proofs for corres methods. Builds on AInvs image. +*) + +theory CorresK_Test +imports "Refine.VSpace_R" "Lib.CorresK_Method" +begin + +chapter \The Corres Method\ + +section \Introduction\ + +text \The @{method corresK} method tries to do for corres-style refinement proofs what +@{method wp} did for hoare logic proofs. The intention is to automate the application +of corres calculational rules, so that the bulk of the manual proof is now handling +a verification condition. In general refinement proofs are difficult to automate, so here we +exploit the fact that in l4v the abstract and executable specifications tend to be structurally +similar. Corres proofs are based on the @{const corres_underlying} constant, which takes a number +of parameters that allow it to be specialized for different flavours of refinement. + +A corres statement has the following form: @{term "corres_underlying sr nf nf' r P P' f f'"}, where +@{term sr} is a state-relation, @{term nf} and @{term nf'} refer to whether or not the left and +right hand functions may fail, @{term r} is a return value relation between the functions, @{term P} +and @{term P'} are preconditions for the functions @{term f} and @{term f'} respectively. Informally +the statement says that: under the given preconditions, for every execution of @{term f'} there exists +an execution of @{term f} that is related by the given state relation @{term sr} and return-value +relation @{term r}. + +If the left and right side of a corres statement share similar structure, we can "unzip" the function +into one corres obligation for each atomic function. This is done through the application of + @{thm corres_split}. +\ + +thm corres_split[no_vars] + +text \Briefly this states that: given a corres goal proving refinement between @{term "a >>= b"} and + @{term "c >>= d"}, we can decompose this into a proof showing refinement between @{term a} and +@{term c}, and between @{term a} and @{term c}. Additionally @{term a} and @{term c} must establish +appropriate postconditions to satisfy the obligations of proving refinement between @{term b} and @{term d}. + +The first subgoal that is produced has an important characteristic: the preconditions for each +side may only discuss the return value of its respective side. This means that rules such as +@{term "corres_underlying sr nf nf' r (\s. x = x') (\_. True) (f x) (f' x')"} will not apply to a goal + if @{term x} and @{term x'} are variables generated by applying @{thm corres_split} (i.e. the +return values of functions). + +This means that any such conditions must instead be phrased as an assumption to the rule, and our rule must be +rephrased as follows: + @{term "x = x' \ corres_underlying sr nf nf' r (\_. True) (\_. True) (f x) (f' x')"}. +The result is that we must solve @{term "x = x'"} immediately after applying our rule. While this +is not a major concern for a manual proof, it proves to be a significant obstacle if we're trying +to focus on automating the "corres" part of the refinement. +\ + +section \corres_underlyingK and corres_rv\ + +text \To remedy this situation, we augment the @{const corres_underlying} definition to include +yet another flag: a single boolean. This new constant: @{const corres_underlyingK}, +will form the basis of the calculus for our corres method.\ + +thm corres_underlyingK_def[no_vars] + +text \The boolean in @{const corres_underlyingK} can be thought of as a stateless precondition. It +is used to propagate additional proof obligations for rules that either do not need to discuss +either the left or right hand state, or must discuss bound variables from both sides.\ + +thm corresK_split[no_vars] + +text \In this split rule for @{const corres_underlyingK} we see that the additional precondition @{term F'} +may discuss both @{term rv} and @{term rv'}. To show that this condition is satisified, however, +we can't use hoare logic and instead need a new definition: @{const corres_rv}.\ + +thm corres_rv_def_I_know_what_I'm_doing[no_vars] + +text \This is a weaker form of @{const corres_underlying} that is only interested in the return value +of the functions. In essence, it states the given functions will establish @{term Q} after executing, +assuming the given return-value relation @{term r} holds, along with the given stateless precondition +@{term F} and left/right preconditions @{term P} and @{term P'}. + +The assumption in general is that corresK_rv rules should never be written, instead corresK_rv obligations +should be propagated into either the stateless precondition (@{term F} from @{term corres_underlyingK}), +the left precondition (@{term P}) or the right precondition @{term P'}. This is implicitly handled +by @{method corresK_rv} (called from @{method corresK}) by applying one of the following rules to each conjunct:\ + +thm corres_rv_defer +thm corres_rv_wp_left +thm corres_rv_wp_right + +text \If none of these rules can be safely applied, then @{method corresK_rv} will leave the + obligation untouched. The user can manually apply one of them if desired, but this is liable to + create unsolvable proof obligations. In the worst case, the user may manually solve the goal in-place.\ + +thm corres_rv_proveT[no_vars] + +section \The corres method\ + +text \The core algorithm of the corres method is simple: + 1) start by applying any necessary weakening rules to ensure the goal has schematic preconditions + 2) apply a known @{thm corres} or @{thm corresK} rule (see next section) + 3) if unsuccessful, apply a split rule (i.e. @{thm corresK_split}) and go to 2 + +Importantly, @{method corresK} will not split a goal if it ultimately is not able to apply at least +one @{thm corres} or @{thm corresK} rule. +\ + +subsection \The corres and corresK named_theorems\ + +text \To address the fact that existing refinement rules are phrased as @{const corres_underlying} +and not @{const corres_underlyingK} there are two different named_theorems that are used for different +kind of rules @{thm corres} and @{thm corresK}. A @{thm corres} rule is understood to be phrased +with @{const corres_underlying} and may have additional assumptions. These assumptions will be +propagated through the additional @{term F} flag in @{const corres_underlyingK}, rather than presented +as proof obligations immediately. A @{thm corresK} rule is understood to be phrased with +@{const corres_underlyingK}, and is meant for calculational rules which may have proper assumptions that +should not be propagated. +\ +thm corresK +thm corres + +subsection \The corresc method\ + +text \Similar to @{method wpc}, @{method corresKc} can handle case statements in @{const corres_underlyingK} +proof goals. Importantly, however, it is split into two sub-methods @{method corresKc_left} and +@{method corresKc_right}, which perform case-splitting on each side respectively. The combined method +@{method corresKc}, however, attempts to discharge the contradictions that arise from the quadratic +blowup of a case analysis on both the left and right sides.\ + +subsection \corres_concrete_r, corres_concrete_rE\ + +text \Some @{thm corresK} rules should only be applied if certain variables are concrete +(i.e. not schematic) in the goal. These are classified separately with the named_theorems +@{thm corresK_concrete_r} and @{thm corresK_concrete_rER}. The first +indicates that the return value relation of the goal must be concrete, the second indicates that +only the left side of the error relation must be concrete.\ + +thm corresK_concrete_r +thm corresK_concrete_rER + +subsection \The corresK_search method\ + +text \The purpose of @{method corresK_search} is to address cases where there is non-trivial control flow. +In particular: in the case where there is an "if" statement or either side needs to be symbolically +executed. The core idea is that corresK_search should be provided with a "search" rule that acts +as an anchoring point. Symbolic execution and control flow is decomposed until either the given +rule is successfully applied or all search branches are exhausted.\ + +subsubsection \Symbolic Execution\ + +text \Symbolic execution is handled by two named theorems: + @{thm corresK_symb_exec_ls} and @{thm corresK_symb_exec_rs}, which perform symbolic execution on +the left and right hand sides of a corres goal.\ + +thm corresK_symb_exec_ls +thm corresK_symb_exec_rs + +text \A function may be symbolically executed if it does not modify the state, i.e. its only purpose +is to compute some value and return it. After being symbolically executed, +this value can only be discussed by the precondition of the associated side or the stateless +precondition of corresK. The resulting @{const corres_rv} goal has @{const corres_noop} as the +function on the alternate side. This gives @{method corresK_rv} a hint that the resulting obligation +should be aggressively re-written into a hoare triple over @{term m} if it can't be propagated +back statelessly safely. +\ + + +section \Demo\ + + +context begin interpretation Arch . + +(* VSpace_R *) + + +lemmas load_hw_asid_corres_args[corres] = + loadHWASID_corres[@lift_corres_args] + +lemmas invalidate_asid_corres_args[corres] = + invalidateASID_corres[@lift_corres_args] + +lemmas invalidate_hw_asid_entry_corres_args[corres] = + invalidateHWASIDEntry_corres[@lift_corres_args] + +lemma invalidateASIDEntry_corres: + "corres dc (valid_vspace_objs and valid_asid_map + and K (asid \ mask asid_bits \ asid \ 0) + and vspace_at_asid asid pd and valid_vs_lookup + and unique_table_refs o caps_of_state + and valid_global_objs and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (invalidate_asid_entry asid) (invalidateASIDEntry asid)" + apply (simp add: invalidate_asid_entry_def invalidateASIDEntry_def) + apply_debug (trace) (* apply_trace between steps *) + (tags "corres") (* break at breakpoints labelled "corres" *) + corresK (* weaken precondition *) + continue (* split *) + continue (* solve load_hw_asid *) + continue (* split *) + continue (* apply corres_when *) + continue (* trivial simplification *) + continue (* invalidate _hw_asid_entry *) + finish (* invalidate_asid *) + + apply (corresKsimp wp: load_hw_asid_wp)+ + apply (fastforce simp: pd_at_asid_uniq) + done + + +crunch typ_at'[wp]: invalidateASIDEntry, flushSpace "typ_at' T t" +crunch ksCurThread[wp]: invalidateASIDEntry, flushSpace "\s. P (ksCurThread s)" +crunch obj_at'[wp]: invalidateASIDEntry, flushSpace "obj_at' P p" + +lemmas flush_space_corres_args[corres] = + flushSpace_corres[@lift_corres_args] + +lemmas invalidate_asid_entry_corres_args[corres] = + invalidateASIDEntry_corres[@lift_corres_args] + + +lemma corres_inst_eq_ext: + "(\x. corres_inst_eq (f x) (f' x)) \ corres_inst_eq f f'" + by (auto simp add: corres_inst_eq_def) + +lemma delete_asid_corresb: + notes [corres] = corres_gets_asid getCurThread_corres setObject_ASIDPool_corres and + [@lift_corres_args, corres] = get_asid_pool_corres_inv' + invalidateASIDEntry_corres + setVMRoot_corres + notes [wp] = set_asid_pool_asid_map_unmap set_asid_pool_vs_lookup_unmap' + set_asid_pool_vspace_objs_unmap' + invalidate_asid_entry_invalidates + getASID_wp + notes if_weak_cong[cong] option.case_cong_weak[cong] + shows + "corres dc + (invs and valid_etcbs and K (asid \ mask asid_bits \ asid \ 0)) + (pspace_aligned' and pspace_distinct' and no_0_obj' + and valid_arch_state' and cur_tcb') + (delete_asid asid pd) (deleteASID asid pd)" + apply (simp add: delete_asid_def deleteASID_def) + apply_debug (trace) (* apply_trace between steps *) + (tags "corres") (* break at breakpoints labelled "corres" *) + corresK (* weaken precondition *) + continue (* split *) + continue (* gets rule *) + continue (* corresc *) + continue (* return rule *) + continue (* split *) + continue (* function application *) + continue (* liftM rule *) + continue (* get_asid_pool_corres_inv' *) + continue (* function application *) + continue (* function application *) + continue (* corresK_when *) + continue (* split *) + continue (* flushSpace_corres *) + continue (* K_bind *) + continue (* K_bind *) + continue (* split *) + continue (* invalidateASIDEntry_corres *) + continue (* K_bind *) + continue (* return bind *) + continue (* K_bind *) + continue (* split *) + continue (* backtracking *) + continue (* split *) + continue (* function application *) + continue (* setObject_ASIDPool_corres *) + continue (* K_bind *) + continue (* K_bind *) + continue (* split *) + continue (* getCurThread_corres *) + continue (* setVMRoot_corres *) + finish (* backtracking? *) + apply (corresKsimp simp: mask_asid_low_bits_ucast_ucast + | fold cur_tcb_def | wps)+ + apply (frule arm_asid_table_related,clarsimp) + apply (rule conjI) + apply (intro impI allI) + apply (rule conjI) + apply (safe; assumption?) + apply (rule ext) + apply (fastforce simp: inv_def dest: ucast_ucast_eq) + apply (rule context_conjI) + apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) + apply (intro allI impI) + apply (subgoal_tac "vspace_at_asid asid pd s") + prefer 2 + apply (simp add: vspace_at_asid_def) + apply (rule vs_lookupI) + apply (simp add: vs_asid_refs_def) + apply (rule image_eqI[OF refl]) + apply (rule graph_ofI) + apply fastforce + apply (rule r_into_rtrancl) + apply simp + apply (rule vs_lookup1I [OF _ _ refl], assumption) + apply (simp add: vs_refs_def) + apply (rule image_eqI[rotated], erule graph_ofI) + apply (simp add: mask_asid_low_bits_ucast_ucast) + prefer 2 + apply (intro allI impI context_conjI; assumption?) + apply (rule aligned_distinct_relation_asid_pool_atI'; fastforce?) + apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) + apply (simp add: cur_tcb'_def) + apply (safe; assumption?) + apply (erule ko_at_weakenE) + apply (clarsimp simp: graph_of_def) + apply (fastforce split: if_split_asm) + apply (frule invs_vspace_objs) + apply (drule (2) valid_vspace_objsD) + apply (erule ranE) + apply (fastforce split: if_split_asm) + apply (erule ko_at_weakenE) + apply (clarsimp simp: graph_of_def) + apply (fastforce split: if_split_asm) + done + +lemma cte_wp_at_ex: + "cte_wp_at (\_. True) p s \ (\cap. cte_wp_at ((=) cap) p s)" + by (simp add: cte_wp_at_def) + +(* Sadly broken: +lemma setVMRootForFlush_corres: + notes [corres] = getCurThread_corres getSlotCap_corres + shows + "corres (=) + (cur_tcb and vspace_at_asid asid pd + and K (asid \ 0 \ asid \ mask asid_bits) + and valid_asid_map and valid_vs_lookup + and valid_vspace_objs and valid_global_objs + and unique_table_refs o caps_of_state + and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (set_vm_root_for_flush pd asid) + (setVMRootForFlush pd asid)" + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply corres + apply_debug (trace) (tags "corresK_search") (corresK_search search: armv_contextSwitch_corres) + continue (* step left *) + continue (* if rule *) + continue (* failed corres on first subgoal, trying next *) + continue (* fail corres on last subgoal, trying reverse if rule *) + continue (* can't make corres progress here, trying other goal *) + finish (* successful goal discharged by corres *) + + apply (corresKsimp wp: get_cap_wp getSlotCap_wp)+ + apply (rule context_conjI) + subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def + tcbVTableSlot_def to_bl_1 cte_level_bits_def) + apply (rule context_conjI) + subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) + apply (rule conjI) + subgoal by (fastforce simp: isCap_simps) + apply (drule cte_wp_at_ex) + apply clarsimp + apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) + apply (drule cte_wp_at_norm') + apply clarsimp + apply (rule_tac x="cteCap cte" in exI) + apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) + done + +text \Note we can wrap it all up in corresKsimp\ + +lemma setVMRootForFlush_corres': + notes [corres] = getCurThread_corres getSlotCap_corres + shows + "corres (=) + (cur_tcb and vspace_at_asid asid pd + and K (asid \ 0 \ asid \ mask asid_bits) + and valid_asid_map and valid_vs_lookup + and valid_vspace_objs and valid_global_objs + and unique_table_refs o caps_of_state + and valid_arch_state + and pspace_aligned and pspace_distinct) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (set_vm_root_for_flush pd asid) + (setVMRootForFlush pd asid)" + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply (corresKsimp search: armv_contextSwitch_corres + wp: get_cap_wp getSlotCap_wp + simp: isCap_simps) + apply (rule context_conjI) + subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def + tcbVTableSlot_def to_bl_1 cte_level_bits_def) + apply (rule context_conjI) + subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) + apply (rule conjI) + subgoal by (fastforce) + apply (drule cte_wp_at_ex) + apply clarsimp + apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) + apply (drule cte_wp_at_norm') + apply clarsimp + apply (rule_tac x="cteCap cte" in exI) + apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) + done +*) + +end +end diff --git a/lib/test/Corres_Test.thy b/lib/test/Corres_Test.thy old mode 100755 new mode 100644 index b58b898a68..eec44b9e1e --- a/lib/test/Corres_Test.thy +++ b/lib/test/Corres_Test.thy @@ -1,409 +1,312 @@ (* - * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * Copyright 2023, Proofcraft Pty Ltd * - * SPDX-License-Identifier: GPL-2.0-only + * SPDX-License-Identifier: BSD-2-Clause *) -(* - Test proofs for corres methods. Builds on AInvs image. -*) - theory Corres_Test -imports "Refine.VSpace_R" "Lib.Corres_Method" +imports Lib.Corres_Method begin -chapter \The Corres Method\ - -section \Introduction\ - -text \The @{method corres} method tries to do for corres-style refinement proofs what -@{method wp} did for hoare logic proofs. The intention is to automate the application -of corres calculational rules, so that the bulk of the manual proof is now handling -a verification condition. In general refinement proofs are difficult to automate, so here we -exploit the fact that in l4v the abstract and executable specifications tend to be structurally -similar. Corres proofs are based on the @{const corres_underlying} constant, which takes a number -of parameters that allow it to be specialized for different flavours of refinement. - -A corres statement has the following form: @{term "corres_underlying sr nf nf' r P P' f f'"}, where -@{term sr} is a state-relation, @{term nf} and @{term nf'} refer to whether or not the left and -right hand functions may fail, @{term r} is a return value relation between the functions, @{term P} -and @{term P'} are preconditions for the functions @{term f} and @{term f'} respectively. Informally -the statement says that: under the given preconditions, for every execution of @{term f'} there exists -an execution of @{term f} that is related by the given state relation @{term sr} and return-value -relation @{term r}. - -If the left and right side of a corres statement share similar structure, we can "unzip" the function -into one corres obligation for each atomic function. This is done through the application of - @{thm corres_split}. -\ - -thm corres_split[no_vars] - -text \Briefly this states that: given a corres goal proving refinement between @{term "a >>= b"} and - @{term "c >>= d"}, we can decompose this into a proof showing refinement between @{term a} and -@{term c}, and between @{term a} and @{term c}. Additionally @{term a} and @{term c} must establish -appropriate postconditions to satisfy the obligations of proving refinement between @{term b} and @{term d}. - -The first subgoal that is produced has an important characteristic: the preconditions for each -side may only discuss the return value of its respective side. This means that rules such as -@{term "corres_underlying sr nf nf' r (\s. x = x') (\_. True) (f x) (f' x')"} will not apply to a goal - if @{term x} and @{term x'} are variables generated by applying @{thm corres_split} (i.e. the -return values of functions). - -This means that any such conditions must instead be phrased as an assumption to the rule, and our rule must be -rephrased as follows: - @{term "x = x' \ corres_underlying sr nf nf' r (\_. True) (\_. True) (f x) (f' x')"}. -The result is that we must solve @{term "x = x'"} immediately after applying our rule. While this -is not a major concern for a manual proof, it proves to be a significant obstacle if we're trying -to focus on automating the "corres" part of the refinement. -\ +(* Test cases and tutorial/docs for Corres_Method *) -section \corres_underlyingK and corres_rv\ -text \To remedy this situation, we augment the @{const corres_underlying} definition to include -yet another flag: a single boolean. This new constant: @{const corres_underlyingK}, -will form the basis of the calculus for our corres method.\ - -thm corres_underlyingK_def[no_vars] - -text \The boolean in @{const corres_underlyingK} can be thought of as a stateless precondition. It -is used to propagate additional proof obligations for rules that either do not need to discuss -either the left or right hand state, or must discuss bound variables from both sides.\ +section "Setup" -thm corresK_split[no_vars] +(* Setting up some monads and lemmas to play with later *) +experiment + fixes sr nf nf' -text \In this split rule for @{const corres_underlyingK} we see that the additional precondition @{term F'} -may discuss both @{term rv} and @{term rv'}. To show that this condition is satisified, however, -we can't use hoare logic and instead need a new definition: @{const corres_rv}.\ + fixes f f' :: "('s, nat) nondet_monad" + assumes f: "corres_underlying sr nf nf' (=) \ \ f f'" -thm corres_rv_def_I_know_what_I'm_doing[no_vars] + fixes Q g g' t + assumes g: "\x x'::nat. x = t x' \ corres_underlying sr nf nf' (=) Q \ (g x) (g' x')" + assumes t: "\x. t x = x" -text \This is a weaker form of @{const corres_underlying} that is only interested in the return value -of the functions. In essence, it states the given functions will establish @{term Q} after executing, -assuming the given return-value relation @{term r} holds, along with the given stateless precondition -@{term F} and left/right preconditions @{term P} and @{term P'}. + fixes P + assumes Q: "\P\ f \\_. Q\" -The assumption in general is that corres_rv rules should never be written, instead corres_rv obligations -should be propagated into either the stateless precondition (@{term F} from @{term corres_underlyingK}), -the left precondition (@{term P}) or the right precondition @{term P'}. This is implicitly handled -by @{method corres_rv} (called from @{method corres}) by applying one of the following rules to each conjunct:\ - -thm corres_rv_defer -thm corres_rv_wp_left -thm corres_rv_wp_right - -text \If none of these rules can be safely applied, then @{method corres_rv} will leave the - obligation untouched. The user can manually apply one of them if desired, but this is liable to - create unsolvable proof obligations. In the worst case, the user may manually solve the goal in-place.\ - -thm corres_rv_proveT[no_vars] - -section \The corres method\ + fixes h h' + assumes h: "corres_underlying sr nf nf' (=) \ \ h h'" +begin -text \The core algorithm of the corres method is simple: - 1) start by applying any necessary weakening rules to ensure the goal has schematic preconditions - 2) apply a known @{thm corres} or @{thm corresK} rule (see next section) - 3) if unsuccessful, apply a split rule (i.e. @{thm corresK_split}) and go to 2 - -Importantly, @{method corres} will not split a goal if it ultimately is not able to apply at least -one @{thm corres} or @{thm corresK} rule. -\ - -subsection \The corres and corresK named_theorems\ - -text \To address the fact that existing refinement rules are phrased as @{const corres_underlying} -and not @{const corres_underlyingK} there are two different named_theorems that are used for different -kind of rules @{thm corres} and @{thm corresK}. A @{thm corres} rule is understood to be phrased -with @{const corres_underlying} and may have additional assumptions. These assumptions will be -propagated through the additional @{term F} flag in @{const corres_underlyingK}, rather than presented -as proof obligations immediately. A @{thm corresK} rule is understood to be phrased with -@{const corres_underlyingK}, and is meant for calculational rules which may have proper assumptions that -should not be propagated. -\ -thm corresK -thm corres - -subsection \The corresc method\ - -text \Similar to @{method wpc}, @{method corresc} can handle case statements in @{const corres_underlyingK} -proof goals. Importantly, however, it is split into two sub-methods @{method corresc_left} and -@{method corresc_right}, which perform case-splitting on each side respectively. The combined method -@{method corresc}, however, attempts to discharge the contradictions that arise from the quadratic -blowup of a case analysis on both the left and right sides.\ +abbreviation "corres \ corres_underlying sr nf nf'" -subsection \corres_concrete_r, corres_concrete_rE\ - -text \Some @{thm corresK} rules should only be applied if certain variables are concrete -(i.e. not schematic) in the goal. These are classified separately with the named_theorems -@{thm corres_concrete_r} and @{thm corres_concrete_rER}. The first -indicates that the return value relation of the goal must be concrete, the second indicates that -only the left side of the error relation must be concrete.\ -thm corres_concrete_r -thm corres_concrete_rER - -subsection \The corres_search method\ +section "Examples" -text \The purpose of @{method corres_search} is to address cases where there is non-trivial control flow. -In particular: in the case where there is an "if" statement or either side needs to be symbolically -executed. The core idea is that corres_search should be provided with a "search" rule that acts -as an anchoring point. Symbolic execution and control flow is decomposed until either the given -rule is successfully applied or all search branches are exhausted.\ +(* The purpose of the corres method is to make progres on easy corres steps, where things + "obviously" match up on the concrete and abstract side. You can provide basic terminal + corres rules like f and g to try. You can provide simp rules to rewrite corres goals + and to solve side conditions of terminal rules such as the rule for g above. Finally, + you can provide wp rules to solve or make progress on the final wp goals that a corres + proof produces. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + by (corres corres: f g wp: Q simp: t) -subsubsection \Symbolic Execution\ +(* All of these can be declared globally and will be picked up by the method *) +context + notes [corres] = f g + notes [wp] = Q + notes [simp] = t +begin -text \Symbolic execution is handled by two named theorems: - @{thm corres_symb_exec_ls} and @{thm corres_symb_exec_rs}, which perform symbolic execution on -the left and right hand sides of a corres goal.\ +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + by corres -thm corres_symb_exec_ls -thm corres_symb_exec_rs +end -text \A function may be symbolically executed if it does not modify the state, i.e. its only purpose -is to compute some value and return it. After being symbolically executed, -this value can only be discussed by the precondition of the associated side or the stateless -precondition of corresK. The resulting @{const corres_rv} goal has @{const corres_noop} as the -function on the alternate side. This gives @{method corres_rv} a hint that the resulting obligation -should be aggressively re-written into a hoare triple over @{term m} if it can't be propagated -back statelessly safely. -\ +(* During development, the rules needed are often not declared [corres] yet or the right + simp rules for side conditions etc have yet to be figured out. The following proof + demonstrates this process. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* We begin by invoking "corres" *) + apply corres + (* In this case, not much has happened yet, corres has only produced schematic preconditions. + However, we can see that f and f' are the heads of both sides, and searching with find_theorems + for a corres rule that mentions those two turns up the rule "f", which we provided to the corres + method. At this point we can either go back and add it to the previous line, or we + add a new invocation. The process is very similar to using wpsimp. *) + apply (corres corres: f) + (* We see that f has been split off, and we now have a goal for g. Same process as above finds + the corresponding rule. *) + apply (corres corres: g) + (* This solves the corres goal but leaves the side condition of the "g" rule. We can + now either solve it manually with "apply (simp add: t)" and then continue, or, if it really + is as simple as a few simp rules, we can tell the corres method to apply it directly *) + apply (corres simp: t) + (* We now have only wp goals and the final implication left. *) + apply (wp Q) + apply wp + apply simp + apply simp + done +(* Once we have found this proof, we can roll it up, and merge eg. the "simp: t" into the corres + line before. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply corres + apply (corres corres: f) + apply (corres corres: g simp: t) + (* Adding "wp: Q" to the previous line does not help at this stage, because this wp goal + is produced in the (corres corres: f) line above. We could do + apply (corres corres: g simp: t wp: Q)+ + above, which *would* solve the rest of the goals, but using + in an uncontrolled way + is not very stable and therefore not recommended style. *) + apply (wp Q) + apply wp + apply simp + apply simp + done -section \Demo\ +(* Merging the g and f corres lines does enable us to prove the Q wp rule. *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply corres + apply (corres corres: f g simp: t wp: Q) + (* This will still leave the final implication, because we have produced that implication + outside this subgoal. Merging the two corres invocations above will attempt the final + implications automatically as well. *) + apply simp + apply simp + done -context begin interpretation Arch . - -(* VSpace_R *) +section "More controlled single-stepping" + +(* Sometimes invoking "corres" does too much or too little. + Too much can occur when the method applies a rule we didn't know is in the [corres] set and + which leaves us with a strange side condition to solve. Or we may have added an unsafe, + not-really-terminal rule to [corres] and now we are getting an unprovable goal. Too little + can occur when the method refuses to split off the head terms even though it looks like a + terminal corres rule should apply. For these cases, we can take apart some of the internal + steps like this: *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* Controlled way to only introduce schematic preconditions and the final implication *) + apply corres_pre + (* Invoking "corres" would now fail. Maybe we are convinced that the "f" rule is declared + [corres] and we want to figure out why it does not apply. Invoking the corres_split method + will give us the goal the terminal corres rule is tried on: *) + apply corres_split + (* Trying out "rule f" does work now -- if it didn't we could debug that and find out why *) + apply (succeeds \rule f\) + (* Turns out we forgot to declare it, so we add it manually, and the corres method now + succeeds on the subgoal *) + apply (corres corres: f) + (* For the next goal, we have only g. Maybe we want to debug why corres doesn't solve the + application of the "g" rule automatically, or where the "x = t x" side condition comes from. + To do that, we can apply the rule manually: *) + apply (rule g) + (* Now it is clear where that side condition comes from, and we can look for rules to solve + it. *) + apply (simp add: t) + apply (wpsimp wp: Q)+ + done -lemmas load_hw_asid_corres_args[corres] = - loadHWASID_corres[@lift_corres_args] - -lemmas invalidate_asid_corres_args[corres] = - invalidateASID_corres[@lift_corres_args] - -lemmas invalidate_hw_asid_entry_corres_args[corres] = - invalidateHWASIDEntry_corres[@lift_corres_args] - -lemma invalidateASIDEntry_corres: - "corres dc (valid_vspace_objs and valid_asid_map - and K (asid \ mask asid_bits \ asid \ 0) - and vspace_at_asid asid pd and valid_vs_lookup - and unique_table_refs o caps_of_state - and valid_global_objs and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (invalidate_asid_entry asid) (invalidateASIDEntry asid)" - apply (simp add: invalidate_asid_entry_def invalidateASIDEntry_def) - apply_debug (trace) (* apply_trace between steps *) - (tags "corres") (* break at breakpoints labelled "corres" *) - corres (* weaken precondition *) - continue (* split *) - continue (* solve load_hw_asid *) - continue (* split *) - continue (* apply corres_when *) - continue (* trivial simplification *) - continue (* invalidate _hw_asid_entry *) - finish (* invalidate_asid *) - - apply (corressimp wp: load_hw_asid_wp)+ - apply (fastforce simp: pd_at_asid_uniq) +(* Using apply_debug *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* The corres method declares a "corres" breakpoint tag that can be used with apply_debug to + step through what it does. This is useful if the method goes too far or applies rules we + didn't expect. The (trace) option to apply_debug allows us to see which rules were applied. *) + apply_debug (trace) (tags "corres") (corres corres: f g simp: t wp: Q) + continue (* guard implication *) + continue (* application of f *) + continue (* application of g, including solved side condition for t *) + continue (* wpsimp+, which happens to solve all remaining goals *) + finish done +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + (* There is also a corres_cleanup breakpoint for further data *) + apply_debug (trace) (tags "corres", "corres_cleanup") (corres corres: f g simp: t wp: Q) + continue (* guard implication *) + continue (* application of f *) + continue (* application of g, showing side condition *) + continue (* solve side condition (separate goal) *) + continue (* wpsimp+, which happens to solve all remaining goals *) + finish + done -crunch typ_at'[wp]: invalidateASIDEntry, flushSpace "typ_at' T t" -crunch ksCurThread[wp]: invalidateASIDEntry, flushSpace "\s. P (ksCurThread s)" -crunch obj_at'[wp]: invalidateASIDEntry, flushSpace "obj_at' P p" - -lemmas flush_space_corres_args[corres] = - flushSpace_corres[@lift_corres_args] - -lemmas invalidate_asid_entry_corres_args[corres] = - invalidateASIDEntry_corres[@lift_corres_args] - - -lemma corres_inst_eq_ext: - "(\x. corres_inst_eq (f x) (f' x)) \ corres_inst_eq f f'" - by (auto simp add: corres_inst_eq_def) - -lemma delete_asid_corresb: - notes [corres] = corres_gets_asid getCurThread_corres setObject_ASIDPool_corres and - [@lift_corres_args, corres] = get_asid_pool_corres_inv' - invalidateASIDEntry_corres - setVMRoot_corres - notes [wp] = set_asid_pool_asid_map_unmap set_asid_pool_vs_lookup_unmap' - set_asid_pool_vspace_objs_unmap' - invalidate_asid_entry_invalidates - getASID_wp - notes if_weak_cong[cong] option.case_cong_weak[cong] - shows - "corres dc - (invs and valid_etcbs and K (asid \ mask asid_bits \ asid \ 0)) - (pspace_aligned' and pspace_distinct' and no_0_obj' - and valid_arch_state' and cur_tcb') - (delete_asid asid pd) (deleteASID asid pd)" - apply (simp add: delete_asid_def deleteASID_def) - apply_debug (trace) (* apply_trace between steps *) - (tags "corres") (* break at breakpoints labelled "corres" *) - corres (* weaken precondition *) - continue (* split *) - continue (* gets rule *) - continue (* corresc *) - continue (* return rule *) - continue (* split *) - continue (* function application *) - continue (* liftM rule *) - continue (* get_asid_pool_corres_inv' *) - continue (* function application *) - continue (* function application *) - continue (* corresK_when *) - continue (* split *) - continue (* flushSpace_corres *) - continue (* K_bind *) - continue (* K_bind *) - continue (* split *) - continue (* invalidateASIDEntry_corres *) - continue (* K_bind *) - continue (* return bind *) - continue (* K_bind *) - continue (* split *) - continue (* backtracking *) - continue (* split *) - continue (* function application *) - continue (* setObject_ASIDPool_corres *) - continue (* K_bind *) - continue (* K_bind *) - continue (* split *) - continue (* getCurThread_corres *) - continue (* setVMRoot_corres *) - finish (* backtracking? *) - apply (corressimp simp: mask_asid_low_bits_ucast_ucast - | fold cur_tcb_def | wps)+ - apply (frule arm_asid_table_related,clarsimp) - apply (rule conjI) - apply (intro impI allI) - apply (rule conjI) - apply (safe; assumption?) - apply (rule ext) - apply (fastforce simp: inv_def dest: ucast_ucast_eq) - apply (rule context_conjI) - apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) - apply (intro allI impI) - apply (subgoal_tac "vspace_at_asid asid pd s") - prefer 2 - apply (simp add: vspace_at_asid_def) - apply (rule vs_lookupI) - apply (simp add: vs_asid_refs_def) - apply (rule image_eqI[OF refl]) - apply (rule graph_ofI) - apply fastforce - apply (rule r_into_rtrancl) - apply simp - apply (rule vs_lookup1I [OF _ _ refl], assumption) - apply (simp add: vs_refs_def) - apply (rule image_eqI[rotated], erule graph_ofI) - apply (simp add: mask_asid_low_bits_ucast_ucast) - prefer 2 - apply (intro allI impI context_conjI; assumption?) - apply (rule aligned_distinct_relation_asid_pool_atI'; fastforce?) - apply (fastforce simp: o_def dest: valid_asid_tableD invs_valid_asid_table) - apply (simp add: cur_tcb'_def) - apply (safe; assumption?) - apply (erule ko_at_weakenE) - apply (clarsimp simp: graph_of_def) - apply (fastforce split: if_split_asm) - apply (frule invs_vspace_objs) - apply (drule (2) valid_vspace_objsD) - apply (erule ranE) - apply (fastforce split: if_split_asm) - apply (erule ko_at_weakenE) - apply (clarsimp simp: graph_of_def) - apply (fastforce split: if_split_asm) +(* Rewriting corres terms *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ liftM t f'; g' y od)" + (* In this goal, corres will stop at liftM without finding a rule to apply. Unfolding + liftM_def exposes the bare f' to the toplevel and lets it apply the existing "f" rule. + The "t" rewrite happens to solve the now more complex side condition for g. + Unfolding liftM_def is generally preferred to the liftM corres simp rules, because + these transform schematic guards in ways that later hinder unification. *) + by (corres corres: f g simp: liftM_def t wp: Q) + +(* Rewriting corres terms more carefully *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ liftM t f'; g' y od)" + (* "term_simp" tells corres to apply the following simp rules only to the side conditions + of terminal corres steps, not to the corres terms themselves. Usually those simp rules + are fairly distinct and side-condition rules don't do anything to the corres terms, so + it's fine to put them in the "simp:" section, but occasionally we want more control. *) + by (corres corres: f g simp: liftM_def term_simp: t wp: Q) + +(* Dealing with asserts and symbolic execution *) +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* Here we'd like to do symbolic execution on "get" and then use the unsafe rule + corres_assert_gen_asm_l for the assert. Often it is good enough to locally + provide such rules as [corres], but adding corres_symb_exec_l here for instance will + go too far. It will try to execute all of get, assert, and f: *) + apply (corres corres: corres_symb_exec_l[where P=P]) + (* unsolvable *) + oops + +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* We can provide the same rule as a fallback rule. This means it will be tried only when + no other rule has worked. This lets f and corres_assert_gen_asm_l go first. *) + by (corres corres: corres_assert_gen_asm_l f g + fallback: corres_symb_exec_l[where P=P] + simp: t wp: Q) + +lemma "corres (=) P \ (do s \ get; assert (P s); x \ f; g x od) (do y \ f'; g' y od)" + (* For even more control, we can instantiate the rule further: *) + by (corres corres: corres_assert_gen_asm_l f g + fallback: corres_symb_exec_l[where P=P and m=get] + simp: t wp: Q) + + +section "@{method corres'} and @{method corres_cleanup} parameter methods" + +(* First with corres only, no cleanup method: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + apply (corres corres: f g) + (* Imagine we get here, and (simp add: t) wasn't strong enough to solve the side condition. + Maybe we needed fastforce for it: *) + apply (fastforce simp: t) + (* It is absolutely fine to leave this fastforce here, and continue the corres proof *) + apply (corres corres: h) + apply (wpsimp wp: Q)+ done -lemma cte_wp_at_ex: - "cte_wp_at (\_. True) p s \ (\cap. cte_wp_at ((=) cap) p s)" - by (simp add: cte_wp_at_def) - -(* Sadly broken: -lemma setVMRootForFlush_corres: - notes [corres] = getCurThread_corres getSlotCap_corres - shows - "corres (=) - (cur_tcb and vspace_at_asid asid pd - and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (set_vm_root_for_flush pd asid) - (setVMRootForFlush pd asid)" - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply corres - apply_debug (trace) (tags "corres_search") (corres_search search: armv_contextSwitch_corres) - continue (* step left *) - continue (* if rule *) - continue (* failed corres on first subgoal, trying next *) - continue (* fail corres on last subgoal, trying reverse if rule *) - continue (* can't make corres progress here, trying other goal *) - finish (* successful goal discharged by corres *) - - apply (corressimp wp: get_cap_wp getSlotCap_wp)+ - apply (rule context_conjI) - subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def - tcbVTableSlot_def to_bl_1 cte_level_bits_def) - apply (rule context_conjI) - subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) - apply (rule conjI) - subgoal by (fastforce simp: isCap_simps) - apply (drule cte_wp_at_ex) - apply clarsimp - apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) - apply (drule cte_wp_at_norm') - apply clarsimp - apply (rule_tac x="cteCap cte" in exI) - apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) +(* Sometimes that one fastforce is the only thing standing in the way of full automation. Providing + the fastforce as a cleanup method can help here. *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + by (corres' \fastforce simp: t\ corres: f g h wp: Q) + +(* Providing "succeed" will stop at any side condition without solving it. Occasionally useful for + debugging: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + apply (corres' \succeed\ corres: f g h term_simp: t) + (* stops at side condition for g, even though t was available in term_simp *) + apply (simp add: t) + apply (corres corres: h) + apply (wpsimp wp: Q)+ done -text \Note we can wrap it all up in corressimp\ - -lemma setVMRootForFlush_corres': - notes [corres] = getCurThread_corres getSlotCap_corres - shows - "corres (=) - (cur_tcb and vspace_at_asid asid pd - and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct) - (pspace_aligned' and pspace_distinct' and no_0_obj') - (set_vm_root_for_flush pd asid) - (setVMRootForFlush pd asid)" - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply (corressimp search: armv_contextSwitch_corres - wp: get_cap_wp getSlotCap_wp - simp: isCap_simps) - apply (rule context_conjI) - subgoal by (simp add: cte_map_def objBits_simps tcb_cnode_index_def - tcbVTableSlot_def to_bl_1 cte_level_bits_def) - apply (rule context_conjI) - subgoal by (fastforce simp: cur_tcb_def intro!: tcb_at_cte_at_1[simplified]) - apply (rule conjI) - subgoal by (fastforce) - apply (drule cte_wp_at_ex) - apply clarsimp - apply (drule (1) pspace_relation_cte_wp_at[rotated 1]; (assumption | clarsimp)?) - apply (drule cte_wp_at_norm') - apply clarsimp - apply (rule_tac x="cteCap cte" in exI) - apply (auto elim: cte_wp_at_weakenE' dest!: curthread_relation) +(* Providing something like fastforce can lead to non-termination or slowdown, because the method + will be tried for any side condition. If there is a distinctive goal pattern that can + distinguish when the cleanup method should be run, you can use "match" to restrict the method: *) +lemma "corres (=) P \ (do x \ f; g x; h od) (do y \ f'; g' y; h' od)" + by (corres' \match conclusion in "x = t y" for x y \ \fastforce simp: t\\ corres: f g h wp: Q) + + +section "Form of [@{attribute corres}] rules" + +(* The method expects terminal corres rules to instantiate return relation and guards. + It also expects distinct variables for the abstract and concrete side and tries hard to + not accidentally mix these by rewriting corres terms with assumptions. + + For instance, it would be tempting to write the "g" rule as follows: *) +lemma g': "corres (=) Q \ (g x) (g' x)" + by (simp add: g t) + +(* This will usually not apply in the corres proof, because the goal will tend to have + the form "corres (=) Q \ (g x) (g' y)" with a side condition connecting x and y, and not + "corres (=) Q \ (g x) (g' x)" *) +lemma "corres (=) P \ (do x \ f; g x od) (do y \ f'; g' y od)" + apply (corres corres: f g') + (* \x y. x = y \ corres (=) (?R2 x) (?R'2 y) (g x) (g' y) *) + apply (fails \rule g'\) + (* The original "g" rule from the top of this file works, because it has separate x and y *) + apply (rule g) + apply (wpsimp wp: Q simp: t)+ done + +(* The corres method refuses to rewrite guards for the same reason. + Because corres is careful with keeping abstract and concrete variables separate, + it is usually safe to interleave corres with corres_cases or corres_cases_both *) +lemma "corres (=) P \ + (do x \ case z of None \ f | Some x' \ do return x'; f od; g x od) + (do y \ f'; g' y od)" + by (corres corres: f g simp: t wp: Q | corres_cases)+ + +(* It is usually safe to interleave corres with methods that solve their goal, such as + fastforce, blast, etc. + + It is *not* generally safe to interleave corres with simp or clarsimp. It can occasionally be + useful to invoke simp or clarsimp manually on corres terms with schematics, but + generally it is unsafe and should be avoided. Use the "simp:" facility of the corres method + instead wherever possible, because it provides some protection against common pitfalls. + + Occasionally it is useful to interleave with tactics that work on specific kinds of goals + only, e.g. a clarsimp on goals that are not corres goals. For this, the predicate methods + is_corres, is_wp, and is_safe_wp are available. These do not change the proof state, but they + fail when their predicate does not hold. + + is_corres succeeds on corres goals only + is_wp succeeds on wp goals only (valid, validE, no_fail) + is_safe_wp succeeds only on wp goals without a schematic post condition (where wpsimp is not safe) + + Boolean combinations of predicates can be obtained with "," "|" and "fails" for "and", "or", and + "not". *) +(* Example of predicate methods *) +lemma "corres (=) P \ + (do x \ case z of None \ f | Some x' \ do return x'; f od; g x od) + (do y \ f'; g' y od)" + (* Do case distinction and apply the corres method only to the corres goals: *) + apply (corres_cases; (is_corres, corres corres: f g)?) + (* Find all safe wp goals and run wpsimp on them *) + apply (all \(is_safe_wp, wpsimp wp: Q)?\) + (* Only non-corres and non-wp should remain -- fail if that is not the case *) + apply (all \fails \is_corres | is_wp\, simp add: t\) + done + end + end diff --git a/lib/test/Crunch_Test_NonDet.thy b/lib/test/Crunch_Test_NonDet.thy index a0880ad4af..1961523b7a 100644 --- a/lib/test/Crunch_Test_NonDet.thy +++ b/lib/test/Crunch_Test_NonDet.thy @@ -26,7 +26,7 @@ definition crunch_foo1 13 od" -crunch_ignore (valid, empty_fail, no_fail) (add: NonDetMonad.bind) +crunch_ignore (valid, empty_fail, no_fail) (add: Nondet_Monad.bind) crunch (empty_fail) empty_fail: crunch_foo2 diff --git a/lib/test/Match_Abbreviation_Test.thy b/lib/test/Match_Abbreviation_Test.thy index 1e682b137c..0ebf750674 100644 --- a/lib/test/Match_Abbreviation_Test.thy +++ b/lib/test/Match_Abbreviation_Test.thy @@ -7,7 +7,7 @@ theory Match_Abbreviation_Test imports Lib.Match_Abbreviation - Monads.NonDetMonad + Monads.Nondet_Monad begin experiment diff --git a/lib/test/MonadicRewrite_Test.thy b/lib/test/MonadicRewrite_Test.thy new file mode 100644 index 0000000000..5ec082106c --- /dev/null +++ b/lib/test/MonadicRewrite_Test.thy @@ -0,0 +1,270 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: BSD-2-Clause + *) + +theory MonadicRewrite_Test +imports Lib.MonadicRewrite +begin + +(* in order to see the way bound variables are handled with bind/bindE by various rules, show etas + in this file *) +declare [[eta_contract=false]] + +section \Function definitions to use in examples\ + +definition + "example_k x \ gets (K x)" +definition + "example_f \ example_k 2" + +(* linear examples in normal and error monad *) + +definition + "example_add = do + a \ example_f; + b \ example_k a; + c \ example_f; + return (a+b+c) + od" + +definition + "example_addE = doE + a \ liftE example_f; + b \ liftE (example_k a); + c \ liftE example_f; + returnOk (a+b+c) + odE" + +section \Sanity checks\ + +(* pass through entire LHS while doing nothing, should get exact same state out *) + +lemma + "monadic_rewrite True False \ example_add example_add" + unfolding example_add_def + apply monadic_rewrite_pre + apply (rule monadic_rewrite_trans) \ \schematise RHS\ + apply (rule monadic_rewrite_step_l)+ + apply (rule monadic_rewrite_refl) + apply wp+ + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +lemma + "monadic_rewrite True False \ example_addE example_addE" + unfolding example_addE_def + apply monadic_rewrite_pre + apply (rule monadic_rewrite_trans) \ \schematise RHS\ + apply (rule monadic_rewrite_step_l)+ + apply (rule monadic_rewrite_refl) + apply wp+ + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* now do the same using automation (note automation needs a specific target to hit, as achieving + nothing is considered a failure *) + +lemma + "monadic_rewrite True False \ example_add example_add" + unfolding example_add_def + apply (monadic_rewrite_l monadic_rewrite_refl[where f="return (a+b+c)" for a b c]) + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +lemma + "monadic_rewrite True False \ example_addE example_addE" + unfolding example_addE_def + apply (monadic_rewrite_l monadic_rewrite_refl[where f="returnOk (a+b+c)" for a b c]) + (* the terms on both sides should be identical, including bound names *) + apply (rule monadic_rewrite_refl) + apply simp + done + +section \Example of rewriting with a matching rule: selecting branches of if statements\ + +(* in this example, we know we'll always take the left branch because b will be 2 *) + +definition + "example_if = do + a \ example_f; + b \ example_k a; + if (b = 2) + then do + c \ example_f; + return (a+2+c) + od + else do + c \ example_f; + return (a+b+c) + od + od" + +definition + "example_removed_if = do + a \ example_f; + b \ example_k a; + c \ example_f; + return (a+2+c) + od" + +lemma example_k_wp: + "\K (a = n)\ example_k a \\rv s. rv = n\" + unfolding example_k_def + by wpsimp + +lemma example_f_wp_2: + "\\\ example_f \\rv s. rv = 2\" + unfolding example_f_def + by (wpsimp wp: example_k_wp) + +lemma example_f_wp: + "\K (n = 2)\ example_f \\rv s. rv = n\" + unfolding example_f_def + by (wpsimp wp: example_k_wp) + +(* rewrite the if, but use succeed to show remaining wp goals *) +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + apply (monadic_rewrite_l monadic_rewrite_if_l_True succeed) + apply (wpsimp wp: example_k_wp) + apply (wpsimp wp: example_f_wp_2) + (* note: bound names are preserved *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* RHS version: rewrite the if, but use succeed to show remaining wp goals *) +lemma + "monadic_rewrite True False \ example_removed_if example_if" + unfolding example_if_def example_removed_if_def + apply (monadic_rewrite_r monadic_rewrite_if_r_True succeed) + apply (wpsimp wp: example_k_wp) + apply (wpsimp wp: example_f_wp_2) + (* note: bound names are preserved *) + apply (rule monadic_rewrite_refl) + apply simp + done + +(* rewrite the if completely automatically *) +lemma (* on left *) + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + by (monadic_rewrite_l monadic_rewrite_if_l_True \wpsimp wp: example_k_wp example_f_wp\) + (rule monadic_rewrite_refl, simp) +lemma (* on right *) + "monadic_rewrite True False \ example_removed_if example_if" + unfolding example_if_def example_removed_if_def + by (monadic_rewrite_r monadic_rewrite_if_r_True \wpsimp wp: example_k_wp example_f_wp\) + (rule monadic_rewrite_refl, simp) + +(* if the required rules are already present in the environment, no need to specify a method *) +lemma (* on left *) + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + supply example_k_wp[wp] example_f_wp[wp] + by (monadic_rewrite_l monadic_rewrite_if_l_True) + (rule monadic_rewrite_refl, simp) + +section \Symbolic execution\ + +(* performing symbolic execution within a monadic_rewrite requires discharging no_fail/empty_fail + conditions depending on RHS/LHS and flags *) +crunches example_k, example_f + for inv[wp]: "P" + and (empty_fail) empty_fail[wp] + and (no_fail) no_fail[wp] + +(* If you know the value and can prove it later: monadic_rewrite_symb_exec_l/r_known *) + +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + supply example_k_wp[wp] example_f_wp[wp] + (* LHS: we know example_f will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_l_known 2) + (* LHS: we know example_k 2 will return 2, but will prove it later *) + (* observe that symb_exec methods attempt to discharge inv/no_/empty_fail goals in the + background and optionally take a custom method; here we examine them with succeed *) + apply (monadic_rewrite_symb_exec_l_known 2 succeed) + prefer 2 apply wp (* inv *) + prefer 2 apply wp (* empty_fail *) + (* can simplify if condition normally *) + apply simp + (* we know the same return values occur on RHS, but that isn't very interesting as we won't + normally symbolically execute if we have the same term on both sides, so let's schematise + LHS and rewrite RHS to match it via symbolic execution *) + apply (monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]) + (* RHS: we know example_f will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_r_known 2) + (* RHS: we know example_k 2 will return 2, but will prove it later *) + apply (monadic_rewrite_symb_exec_r_known 2) + (* done with RHS rewrite *) + apply (rule monadic_rewrite_refl) + (* discharge RHS obligations of returning 2 that we deferred earlier *) + apply wpsimp+ + (* rewrite was successful, LHS = RHS *) + apply (rule monadic_rewrite_refl) + (* discharge LHS obligations of returning 2 that we deferred earlier *) + apply wpsimp+ + done + +(* The basic form of symbolic execution acts as one would expect: it does not specify any return + value, discharging any obligations later *) +lemma + "monadic_rewrite True False \ example_if example_removed_if" + unfolding example_if_def example_removed_if_def + (* let's rewrite the LHS as in previous example, but this time not knowing what the values will + be *) + apply monadic_rewrite_symb_exec_l+ + (* we still know we will take the first branch of the if, but we'll prove it later *) + apply (rule_tac P="b = 2" in monadic_rewrite_gen_asm) + (* we can simplify the if statement as usual *) + apply simp + + (* let's rewrite RHS into new LHS now, but with normal symbolic execution *) + apply (monadic_rewrite_pre, rule monadic_rewrite_trans[rotated]) + apply monadic_rewrite_symb_exec_r (* name collision: a \ aa *) + (* the rewrite is only true if the two "a" are the same, so assume that *) + apply (rule_tac P="aa = a" in monadic_rewrite_gen_asm, simp) + apply monadic_rewrite_symb_exec_r + (* done with RHS rewrite *) + apply (rule monadic_rewrite_refl) + (* discharge RHS obligations *) + apply (wpsimp wp: example_f_wp)+ + + (* rewrite was successful, LHS = RHS *) + apply no_name_eta + apply (rule monadic_rewrite_refl) + (* clear up LHS obligations w.r.t. precondition (bit fiddly due to equalities) *) + apply (clarsimp simp: pred_conj_def cong: conj_cong) + apply (wpsimp wp: example_k_wp example_f_wp)+ + done + +(* The "drop" form of symbolic execution is mainly used when a combination of rewrites and + assertions results in a state-invariant operation whose results are not used, such as + a number of getters whose results are used on branches not taken under the precondition. *) +lemma + "monadic_rewrite True False \ example_if example_if" + unfolding example_if_def + apply (monadic_rewrite_pre, rule monadic_rewrite_trans) + (* we artificially add operations to LHS that are irrelevant *) + apply (repeat 10 \rule monadic_rewrite_add_return\) + (* done with rewriting *) + apply (rule monadic_rewrite_refl) + (* we can remove added operations in one pass *) + apply monadic_rewrite_symb_exec_l_drop+ + (* both sides equal again *) + apply (rule monadic_rewrite_refl) + apply simp + done + +end diff --git a/misc/jedit/macros/goto-error.bsh b/misc/jedit/macros/goto-error.bsh index d727d5dfb4..3fc260c0d8 100644 --- a/misc/jedit/macros/goto-error.bsh +++ b/misc/jedit/macros/goto-error.bsh @@ -25,8 +25,8 @@ import isabelle.jedit.*; msg(s) { Macros.message(view, s); } // isabelle setup -model = Document_Model.get(textArea.getBuffer()); -snapshot = model.get().snapshot(); +model = Document_Model.get_model(textArea.getBuffer()); +snapshot = Document_Model.snapshot(model.get()); class FirstError { public int first_error_pos = -1; diff --git a/proof/ROOT b/proof/ROOT index 42b1954cef..38e27435a3 100644 --- a/proof/ROOT +++ b/proof/ROOT @@ -56,6 +56,8 @@ session Refine in "refine" = BaseRefine + *) session RefineOrphanage in "refine/$L4V_ARCH/orphanage" = Refine + description \Proof that the kernel does not orphan threads.\ + theories [condition = "REFINE_QUICK_AND_DIRTY", quick_and_dirty] + "Orphanage" theories "Orphanage" diff --git a/proof/access-control/ADT_AC.thy b/proof/access-control/ADT_AC.thy index eb85735b24..f0c20cbbf2 100644 --- a/proof/access-control/ADT_AC.thy +++ b/proof/access-control/ADT_AC.thy @@ -95,7 +95,8 @@ lemma do_user_op_respects: apply (rule dmo_device_update_respects_Write) apply (wpsimp wp: dmo_um_upd_machine_state dmo_user_memory_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift)+ + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp)+ apply (rule hoare_pre_cont) apply (wpsimp wp: select_wp)+ apply (simp add: restrict_map_def split: if_splits) diff --git a/proof/access-control/ARM/ArchAccess.thy b/proof/access-control/ARM/ArchAccess.thy index 4538c300a7..f84b5dde16 100644 --- a/proof/access-control/ARM/ArchAccess.thy +++ b/proof/access-control/ARM/ArchAccess.thy @@ -196,10 +196,10 @@ lemmas integrity_asids_kh_upds = declare integrity_asids_def[simp] lemma integrity_asids_kh_upds': - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ CNode sz cs)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ TCB tcb)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Endpoint ep)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Notification ntfn)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ CNode sz cs)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ TCB tcb)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Endpoint ep)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Notification ntfn)\) s" by auto lemma integrity_asids_kh_update: diff --git a/proof/access-control/ARM/ArchAccess_AC.thy b/proof/access-control/ARM/ArchAccess_AC.thy index af0d42118d..39112a65cc 100644 --- a/proof/access-control/ARM/ArchAccess_AC.thy +++ b/proof/access-control/ARM/ArchAccess_AC.thy @@ -91,7 +91,7 @@ lemma integrity_asids_refl[Access_AC_assms, simp]: lemma integrity_asids_update_autarch[Access_AC_assms]: "\ \x a. integrity_asids aag subjects x a st s; is_subject aag ptr \ - \ \x a. integrity_asids aag subjects x a st (s\kheap := kheap s(ptr \ obj)\)" + \ \x a. integrity_asids aag subjects x a st (s\kheap := (kheap s)(ptr \ obj)\)" by simp end diff --git a/proof/access-control/ARM/ArchArch_AC.thy b/proof/access-control/ARM/ArchArch_AC.thy index f1c2b9c0d5..4f24903648 100644 --- a/proof/access-control/ARM/ArchArch_AC.thy +++ b/proof/access-control/ARM/ArchArch_AC.thy @@ -549,7 +549,7 @@ lemma perform_asid_control_invocation_respects: apply (rule hoare_pre) apply (wpc, simp) apply (wpsimp wp: set_cap_integrity_autarch cap_insert_integrity_autarch - retype_region_integrity[where sz=12] static_imp_wp) + retype_region_integrity[where sz=12] hoare_weak_lift_imp) apply (clarsimp simp: authorised_asid_control_inv_def ptr_range_def page_bits_def add.commute range_cover_def obj_bits_api_def default_arch_object_def @@ -576,12 +576,12 @@ lemma perform_asid_control_invocation_pas_refined [wp]: \\_. pas_refined aag\" apply (simp add: perform_asid_control_invocation_def) apply (rule hoare_pre) - apply (wp cap_insert_pas_refined' static_imp_wp + apply (wp cap_insert_pas_refined' hoare_weak_lift_imp | strengthen pas_refined_set_asid_strg | wpc | simp add: delete_objects_def2 fun_upd_def[symmetric])+ apply (wp retype_region_pas_refined'[where sz=pageBits] - hoare_vcg_ex_lift hoare_vcg_all_lift static_imp_wp hoare_wp_combs hoare_drop_imp + hoare_vcg_ex_lift hoare_vcg_all_lift hoare_weak_lift_imp hoare_wp_combs hoare_drop_imp retype_region_invs_extras(1)[where sz = pageBits] retype_region_invs_extras(4)[where sz = pageBits] retype_region_invs_extras(6)[where sz = pageBits] @@ -591,7 +591,7 @@ lemma perform_asid_control_invocation_pas_refined [wp]: max_index_upd_invs_simple max_index_upd_caps_overlap_reserved hoare_vcg_ex_lift set_cap_cte_wp_at hoare_vcg_disj_lift set_free_index_valid_pspace set_cap_descendants_range_in set_cap_no_overlap get_cap_wp set_cap_caps_no_overlap - hoare_vcg_all_lift static_imp_wp retype_region_invs_extras + hoare_vcg_all_lift hoare_weak_lift_imp retype_region_invs_extras set_cap_pas_refined_not_transferable | simp add: do_machine_op_def split_def cte_wp_at_neg2 region_in_kernel_window_def)+ apply (rename_tac frame slot parent base cap) @@ -826,8 +826,7 @@ lemma decode_arch_invocation_authorised: apply (rule hoare_pre) apply (simp add: split_def Let_def split del: if_split cong: cap.case_cong arch_cap.case_cong if_cong option.case_cong) - apply (wp select_wp whenE_throwError_wp check_vp_wpR - find_pd_for_asid_authority2 + apply (wp whenE_throwError_wp check_vp_wpR find_pd_for_asid_authority2 | wpc | simp add: authorised_asid_control_inv_def authorised_page_inv_def authorised_page_directory_inv_def diff --git a/proof/access-control/ARM/ArchCNode_AC.thy b/proof/access-control/ARM/ArchCNode_AC.thy index a385de3008..9c69e20017 100644 --- a/proof/access-control/ARM/ArchCNode_AC.thy +++ b/proof/access-control/ARM/ArchCNode_AC.thy @@ -75,17 +75,17 @@ crunches set_cdt crunches prepare_thread_delete, arch_finalise_cap for cur_domain[CNode_AC_assms, wp]:"\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma state_vrefs_tcb_upd[CNode_AC_assms]: - "tcb_at t s \ state_vrefs (s\kheap := kheap s(t \ TCB tcb)\) = state_vrefs s" + "tcb_at t s \ state_vrefs (s\kheap := (kheap s)(t \ TCB tcb)\) = state_vrefs s" apply (rule ext) apply (auto simp: state_vrefs_def vs_refs_no_global_pts_def tcb_at_def dest!: get_tcb_SomeD) done lemma state_vrefs_simple_type_upd[CNode_AC_assms]: "\ ko_at ko ptr s; is_simple_type ko; a_type ko = a_type (f val) \ - \ state_vrefs (s\kheap := kheap s(ptr \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(ptr \ f val)\) = state_vrefs s" apply (rule ext) apply (auto simp: state_vrefs_def vs_refs_no_global_pts_def obj_at_def partial_inv_def a_type_def split: kernel_object.splits arch_kernel_obj.splits if_splits) diff --git a/proof/access-control/ARM/ArchDomainSepInv.thy b/proof/access-control/ARM/ArchDomainSepInv.thy index 73e9df6bee..eb3ac5ab85 100644 --- a/proof/access-control/ARM/ArchDomainSepInv.thy +++ b/proof/access-control/ARM/ArchDomainSepInv.thy @@ -49,7 +49,7 @@ lemma perform_page_invocation_domain_sep_inv: \\_. domain_sep_inv irqs st\" apply (rule hoare_pre) apply (wp mapM_wp[OF _ subset_refl] set_cap_domain_sep_inv mapM_x_wp[OF _ subset_refl] - perform_page_invocation_domain_sep_inv_get_cap_helper static_imp_wp + perform_page_invocation_domain_sep_inv_get_cap_helper hoare_weak_lift_imp | simp add: perform_page_invocation_def o_def | wpc)+ apply (clarsimp simp: valid_page_inv_def) apply (case_tac xa, simp_all add: domain_sep_inv_cap_def is_pg_cap_def) @@ -79,7 +79,7 @@ lemma perform_asid_control_invocation_domain_sep_inv: unfolding perform_asid_control_invocation_def apply (rule hoare_pre) apply (wp modify_wp cap_insert_domain_sep_inv' set_cap_domain_sep_inv - get_cap_domain_sep_inv_cap[where st=st] static_imp_wp + get_cap_domain_sep_inv_cap[where st=st] hoare_weak_lift_imp | wpc | simp )+ done diff --git a/proof/access-control/ARM/ArchFinalise_AC.thy b/proof/access-control/ARM/ArchFinalise_AC.thy index 901acccf2c..37a9bdfdd2 100644 --- a/proof/access-control/ARM/ArchFinalise_AC.thy +++ b/proof/access-control/ARM/ArchFinalise_AC.thy @@ -93,7 +93,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) qed lemma finalise_cap_caps_of_state_nullinv[Finalise_AC_assms]: - "\\s. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\\s. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\_ s. P (caps_of_state s)\" by (cases cap; diff --git a/proof/access-control/ARM/ArchIpc_AC.thy b/proof/access-control/ARM/ArchIpc_AC.thy index ccd47639e2..74034f569a 100644 --- a/proof/access-control/ARM/ArchIpc_AC.thy +++ b/proof/access-control/ARM/ArchIpc_AC.thy @@ -175,7 +175,7 @@ lemma handle_arch_fault_reply_respects[Ipc_AC_assms]: lemma auth_ipc_buffers_kheap_update[Ipc_AC_assms]: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" by (clarsimp simp: auth_ipc_buffers_member_def get_tcb_def caps_of_state_tcb) lemma auth_ipc_buffers_machine_state_update[Ipc_AC_assms, simp]: diff --git a/proof/access-control/ARM/ArchTcb_AC.thy b/proof/access-control/ARM/ArchTcb_AC.thy index dd78504af0..1619cbdb92 100644 --- a/proof/access-control/ARM/ArchTcb_AC.thy +++ b/proof/access-control/ARM/ArchTcb_AC.thy @@ -45,7 +45,7 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: | wp restart_integrity_autarch set_mcpriority_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch - opt_update_thread_valid_sched static_imp_wp + opt_update_thread_valid_sched hoare_weak_lift_imp cap_insert_integrity_autarch checked_insert_pas_refined cap_delete_respects' cap_delete_pas_refined' check_cap_inv2[where Q="\_. integrity aag X st"] diff --git a/proof/access-control/Access_AC.thy b/proof/access-control/Access_AC.thy index 95a7b899b0..1c061ed0d2 100644 --- a/proof/access-control/Access_AC.thy +++ b/proof/access-control/Access_AC.thy @@ -208,17 +208,17 @@ lemmas state_objs_to_policy_cases lemma tcb_states_of_state_preserved: "\ get_tcb thread s = Some tcb; tcb_state tcb' = tcb_state tcb \ - \ tcb_states_of_state (s\kheap := kheap s(thread \ TCB tcb')\) = tcb_states_of_state s" + \ tcb_states_of_state (s\kheap := (kheap s)(thread \ TCB tcb')\) = tcb_states_of_state s" by (auto split: option.splits simp: tcb_states_of_state_def get_tcb_def) lemma thread_st_auth_preserved: "\ get_tcb thread s = Some tcb; tcb_state tcb' = tcb_state tcb \ - \ thread_st_auth (s\kheap := kheap s(thread \ TCB tcb')\) = thread_st_auth s" + \ thread_st_auth (s\kheap := (kheap s)(thread \ TCB tcb')\) = thread_st_auth s" by (simp add: tcb_states_of_state_preserved thread_st_auth_def) lemma thread_bound_ntfns_preserved: "\ get_tcb thread s = Some tcb; tcb_bound_notification tcb' = tcb_bound_notification tcb \ - \ thread_bound_ntfns (s\kheap := kheap s(thread \ TCB tcb')\) = thread_bound_ntfns s" + \ thread_bound_ntfns (s\kheap := (kheap s)(thread \ TCB tcb')\) = thread_bound_ntfns s" by (auto simp: thread_bound_ntfns_def get_tcb_def split: option.splits) lemma is_transferable_null_filter[simp]: @@ -865,7 +865,7 @@ locale Access_AC_2 = Access_AC_1 + \ (\x a. integrity_asids aag subjects x a s s'')" and integrity_asids_update_autarch: "\ \x a. integrity_asids aag {pasSubject aag} x a s s'; is_subject aag ptr \ - \ \x a. integrity_asids aag {pasSubject aag} x a s (s'\kheap := kheap s'(ptr \ obj)\)" + \ \x a. integrity_asids aag {pasSubject aag} x a s (s'\kheap := (kheap s')(ptr \ obj)\)" begin section \Generic AC stuff\ @@ -980,7 +980,7 @@ lemma integrity_refl [simp]: lemma integrity_update_autarch: "\ integrity aag X st s; is_subject aag ptr \ - \ integrity aag X st (s\kheap := kheap s(ptr \ obj)\)" + \ integrity aag X st (s\kheap := (kheap s)(ptr \ obj)\)" unfolding integrity_subjects_def apply (intro conjI,simp_all) apply clarsimp diff --git a/proof/access-control/CNode_AC.thy b/proof/access-control/CNode_AC.thy index a1555b5970..a1d2631eec 100644 --- a/proof/access-control/CNode_AC.thy +++ b/proof/access-control/CNode_AC.thy @@ -56,11 +56,11 @@ locale CNode_AC_1 = \ state_asids_to_policy_arch aag (caps(ptr \ cap, ptr' \ cap')) as vrefs \ pasPolicy aag" and state_vrefs_tcb_upd: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; tcb_at tptr s \ - \ state_vrefs (s\kheap := kheap s(tptr \ TCB tcb)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(tptr \ TCB tcb)\) = state_vrefs s" and state_vrefs_simple_type_upd: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; ko_at ko p s; is_simple_type ko; a_type ko = a_type (f (val :: 'b)) \ - \ state_vrefs (s\kheap := kheap s(p \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(p \ f val)\) = state_vrefs s" and a_type_arch_object_not_tcb[simp]: "a_type (ArchObj arch_kernel_obj) \ ATCB" and set_cap_state_vrefs: @@ -727,7 +727,7 @@ lemmas[monad_commute_wp] = (* Sort-of VCG for monad_commute goals *) lemma wpc_helper_monad_commute: - "monad_commute P f g \ wpc_helper (P, P') (Q, Q') (monad_commute P f g)" + "monad_commute P f g \ wpc_helper (P, P', P'') (Q, Q', Q'') (monad_commute P f g)" by (clarsimp simp: wpc_helper_def) wpc_setup "\m. monad_commute P f m" wpc_helper_monad_commute @@ -969,10 +969,10 @@ lemma set_untyped_cap_as_full_is_transferable[wp]: using untyped_not_transferable max_free_index_update_preserve_untyped by simp lemma set_untyped_cap_as_full_is_transferable': - "\\s. is_transferable ((caps_of_state s(slot2 \ new_cap)) slot3) \ + "\\s. is_transferable (((caps_of_state s)(slot2 \ new_cap)) slot3) \ Some src_cap = (caps_of_state s slot)\ set_untyped_cap_as_full src_cap new_cap slot - \\_ s. is_transferable ((caps_of_state s(slot2 \ new_cap)) slot3)\" + \\_ s. is_transferable (((caps_of_state s)(slot2 \ new_cap)) slot3)\" apply (clarsimp simp: set_untyped_cap_as_full_def) apply safe apply (wp,fastforce)+ @@ -1522,10 +1522,10 @@ lemma post_cap_deletion_cur_domain[wp]: by (wpsimp simp: post_cap_deletion_def) crunch cur_domain[wp]: cap_swap_for_delete, empty_slot "\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) crunch cur_domain[wp]: finalise_cap "\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma rec_del_cur_domain[wp]: "rec_del call \\s. P (cur_domain s)\" diff --git a/proof/access-control/DomainSepInv.thy b/proof/access-control/DomainSepInv.thy index 691ef30d7a..4ea7c3ce0a 100644 --- a/proof/access-control/DomainSepInv.thy +++ b/proof/access-control/DomainSepInv.thy @@ -133,7 +133,7 @@ crunch domain_sep_inv[wp]: set_extra_badge "domain_sep_inv irqs st" lemma set_cap_neg_cte_wp_at_other_helper': "\ oslot \ slot; ko_at (TCB x) (fst oslot) s; tcb_cap_cases (snd oslot) = Some (ogetF, osetF, orestr); - kheap (s\kheap := kheap s(fst oslot \ TCB (osetF (\ x. cap) x))\) (fst slot) = Some (TCB tcb); + kheap (s\kheap := (kheap s)(fst oslot \ TCB (osetF (\ x. cap) x))\) (fst slot) = Some (TCB tcb); tcb_cap_cases (snd slot) = Some (getF, setF, restr); P (getF tcb) \ \ cte_wp_at P slot s" apply (case_tac "fst oslot = fst slot") @@ -150,7 +150,7 @@ lemma set_cap_neg_cte_wp_at_other_helper': lemma set_cap_neg_cte_wp_at_other_helper: "\ \ cte_wp_at P slot s; oslot \ slot; ko_at (TCB x) (fst oslot) s; tcb_cap_cases (snd oslot) = Some (getF, setF, restr) \ - \ \ cte_wp_at P slot (s\kheap := kheap s(fst oslot \ TCB (setF (\ x. cap) x))\)" + \ \ cte_wp_at P slot (s\kheap := (kheap s)(fst oslot \ TCB (setF (\ x. cap) x))\)" apply (rule notI) apply (erule cte_wp_atE) apply (fastforce elim: notE intro: cte_wp_at_cteI split: if_splits) @@ -336,7 +336,7 @@ lemma empty_slot_domain_sep_inv: \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" unfolding empty_slot_def post_cap_deletion_def by (wpsimp wp: get_cap_wp set_cap_domain_sep_inv set_original_wp dxo_wp_weak - static_imp_wp deleted_irq_handler_domain_sep_inv) + hoare_weak_lift_imp deleted_irq_handler_domain_sep_inv) end @@ -439,7 +439,7 @@ lemma reply_cancel_ipc_domain_sep_inv[wp]: reply_cancel_ipc t \\_ s. domain_sep_inv irqs (st :: 'state_ext state) (s :: det_ext state)\" apply (simp add: reply_cancel_ipc_def) - apply (wp select_wp) + apply wp apply (rule hoare_strengthen_post[OF thread_set_tcb_fault_update_domain_sep_inv]) apply auto done @@ -553,7 +553,7 @@ lemma cap_revoke_domain_sep_inv': apply (wp drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] drop_spec_validE[OF valid_validE[OF cap_delete_domain_sep_inv]] drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] - drop_spec_validE[OF liftE_wp] select_wp + drop_spec_validE[OF liftE_wp] | simp | wp (once) hoare_drop_imps)+ done qed @@ -568,7 +568,7 @@ lemma cap_move_cte_wp_at_other: cap_move cap src_slot dest_slot \\_. cte_wp_at P slot\" unfolding cap_move_def - by (wpsimp wp: set_cdt_cte_wp_at set_cap_cte_wp_at' dxo_wp_weak static_imp_wp set_original_wp) + by (wpsimp wp: set_cdt_cte_wp_at set_cap_cte_wp_at' dxo_wp_weak hoare_weak_lift_imp set_original_wp) lemma cte_wp_at_weak_derived_ReplyCap: "cte_wp_at ((=) (ReplyCap x False R)) slot s @@ -1042,7 +1042,7 @@ lemma invoke_tcb_domain_sep_inv: apply (simp add: split_def cong: option.case_cong) apply (wp checked_cap_insert_domain_sep_inv hoare_vcg_all_lift_R hoare_vcg_all_lift hoare_vcg_const_imp_lift_R cap_delete_domain_sep_inv cap_delete_deletes - dxo_wp_weak cap_delete_valid_cap cap_delete_cte_at static_imp_wp + dxo_wp_weak cap_delete_valid_cap cap_delete_cte_at hoare_weak_lift_imp | wpc | strengthen | simp add: option_update_thread_def emptyable_def tcb_cap_cases_def tcb_cap_valid_def tcb_at_st_tcb_at @@ -1181,7 +1181,7 @@ lemma handle_event_domain_sep_inv: lemma schedule_domain_sep_inv: "(schedule :: (unit,det_ext) s_monad) \domain_sep_inv irqs (st :: 'state_ext state)\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp add: alternative_wp select_wp guarded_switch_to_lift hoare_drop_imps + apply (wp add: guarded_switch_to_lift hoare_drop_imps del: ethread_get_wp | wpc | clarsimp simp: get_thread_state_def thread_get_def trans_state_update'[symmetric] schedule_choose_new_thread_def)+ diff --git a/proof/access-control/Finalise_AC.thy b/proof/access-control/Finalise_AC.thy index c3ed88252c..e0d4d1e316 100644 --- a/proof/access-control/Finalise_AC.thy +++ b/proof/access-control/Finalise_AC.thy @@ -347,7 +347,7 @@ lemma reply_cancel_ipc_pas_refined[wp]: \\_. pas_refined aag\" apply (rule hoare_gen_asm) apply (simp add: reply_cancel_ipc_def) - apply (wp add: select_wp wp_transferable del: wp_not_transferable) + apply (wp add: wp_transferable del: wp_not_transferable) apply (rule hoare_strengthen_post[where Q="\_. invs and tcb_at t and pas_refined aag"]) apply (wpsimp wp: hoare_wp_combs thread_set_tcb_fault_reset_invs thread_set_pas_refined)+ apply (frule(1) reply_cap_descends_from_master0) @@ -368,7 +368,7 @@ crunches suspend for pspace_aligned[wp]: "\s :: det_ext state. pspace_aligned s" and valid_vspace_objs[wp]: "\s :: det_ext state. valid_vspace_objs s" and valid_arch_state[wp]: "\s :: det_ext state. valid_arch_state s" - (wp: dxo_wp_weak select_wp hoare_drop_imps simp: crunch_simps) + (wp: dxo_wp_weak hoare_drop_imps simp: crunch_simps) crunch pas_refined[wp]: suspend "pas_refined aag" @@ -528,12 +528,12 @@ lemma reply_cancel_ipc_respects[wp]: \\_. integrity aag X st\" apply (simp add: reply_cancel_ipc_def) apply (rule hoare_pre) - apply (wp add: select_wp wp_transferable del:wp_not_transferable) + apply (wp add: wp_transferable del:wp_not_transferable) apply simp apply (rule hoare_lift_Pf2[where f="cdt"]) apply (wpsimp wp: hoare_vcg_const_Ball_lift thread_set_integrity_autarch thread_set_invs_trivial[OF ball_tcb_cap_casesI] thread_set_tcb_state_trivial - thread_set_not_state_valid_sched static_imp_wp thread_set_cte_wp_at_trivial + thread_set_not_state_valid_sched hoare_weak_lift_imp thread_set_cte_wp_at_trivial thread_set_pas_refined simp: ran_tcb_cap_cases)+ apply (strengthen invs_psp_aligned invs_vspace_objs invs_arch_state, clarsimp) @@ -799,7 +799,7 @@ proof (induct arbitrary: st rule: rec_del.induct, simp_all only: rec_del_fails) apply (simp only: split_def) apply (rule hoare_pre_spec_validE) apply (rule split_spec_bindE) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (rule spec_strengthen_postE) apply (rule spec_valid_conj_liftE1) apply (rule valid_validE_R, rule rec_del_valid_list, rule preemption_point_inv'; @@ -816,7 +816,7 @@ next apply (subst rec_del.simps) apply (simp only: split_def) apply (rule hoare_pre_spec_validE) - apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable "2.hyps" static_imp_wp) + apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable "2.hyps" hoare_weak_lift_imp) apply ((wp preemption_point_inv' | simp add: integrity_subjects_def pas_refined_def)+)[1] apply (simp(no_asm)) apply (rule spec_strengthen_postE) @@ -833,7 +833,7 @@ next apply (simp add: conj_comms) apply (wp set_cap_integrity_autarch set_cap_pas_refined_not_transferable replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_wp_at hoare_vcg_const_Ball_lift hoare_weak_lift_imp | rule finalise_cap_not_reply_master | simp add: in_monad)+ apply (rule hoare_strengthen_post) @@ -848,7 +848,7 @@ next apply (wp finalise_cap_invs[where slot=slot] finalise_cap_replaceable[where sl=slot] finalise_cap_makes_halted[where slot=slot] - finalise_cap_auth' static_imp_wp)[1] + finalise_cap_auth' hoare_weak_lift_imp)[1] apply (rule finalise_cap_cases[where slot=slot]) apply (clarsimp simp: cte_wp_at_caps_of_state) apply (erule disjE) @@ -871,7 +871,7 @@ next case (3 ptr bits n slot s) show ?case apply (simp add: spec_validE_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply clarsimp done next @@ -889,7 +889,7 @@ next apply (wpsimp wp: rec_del_invs) apply (rule "4.hyps", assumption+) apply (wpsimp wp: set_cap_integrity_autarch set_cap_pas_refined_not_transferable - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: invs_psp_aligned invs_vspace_objs invs_arch_state cte_wp_at_caps_of_state clas_no_asid cli_no_irqs aag_cap_auth_def) apply (drule_tac auth=auth in sta_caps, simp+) @@ -958,13 +958,13 @@ lemma rec_del_respects_CTEDelete_transferable': apply (wp rec_del_respects'') apply (solves \simp\) apply (subst rec_del.simps[abs_def]) - apply (wp add: hoare_K_bind without_preemption_wp static_imp_wp wp_transferable + apply (wp add: hoare_K_bind without_preemption_wp hoare_weak_lift_imp wp_transferable rec_del_Finalise_transferable del: wp_not_transferable | wpc)+ apply (rule hoare_post_impErr,rule rec_del_Finalise_transferable) apply simp apply (elim conjE) apply simp apply simp - apply (wp add: hoare_K_bind without_preemption_wp static_imp_wp wp_transferable + apply (wp add: hoare_K_bind without_preemption_wp hoare_weak_lift_imp wp_transferable rec_del_Finalise_transferable del: wp_not_transferable | wpc)+ @@ -1085,7 +1085,7 @@ lemma empty_slot_cte_wp_at: by (wpsimp wp: empty_slot_caps_of_state) lemma deleting_irq_handler_caps_of_state_nullinv: - "\\s. \p. P (caps_of_state s(p \ NullCap))\ + "\\s. \p. P ((caps_of_state s)(p \ NullCap))\ deleting_irq_handler irq \\_ s. P (caps_of_state s)\" unfolding deleting_irq_handler_def @@ -1104,7 +1104,7 @@ locale Finalise_AC_2 = Finalise_AC_1 + \\_. (\s. trp \ integrity aag X st s) and pas_refined aag\, \\_. (\s. trp \ integrity aag X st s) and pas_refined aag\" and finalise_cap_caps_of_state_nullinv: - "\P. \\s :: det_ext state. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\P. \\s :: det_ext state. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\rv s. P (caps_of_state s)\" and finalise_cap_fst_ret: @@ -1144,7 +1144,7 @@ proof (induct rule: rec_del.induct, simp_all only: rec_del_fails) apply (insert P_Null) apply (subst rec_del.simps) apply (simp only: split_def) - apply (wp static_imp_wp | simp)+ + apply (wp hoare_weak_lift_imp | simp)+ apply (wp empty_slot_cte_wp_at)[1] apply (rule spec_strengthen_postE) apply (rule hoare_pre_spec_validE) @@ -1160,7 +1160,7 @@ next apply (subst rec_del.simps) apply (simp only: split_def without_preemption_def rec_del_call.simps) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp set_cap_cte_wp_at')[1] apply (wp "2.hyps"[simplified without_preemption_def rec_del_call.simps]) apply ((wp preemption_point_inv | simp)+)[1] @@ -1172,7 +1172,7 @@ next apply (rule_tac Q = "\rv' s. (slot \ p \ exposed \ cte_wp_at P p s) \ P (fst rv') \ cte_at slot s" in hoare_post_imp) apply (clarsimp simp: cte_wp_at_caps_of_state) - apply (wp static_imp_wp set_cap_cte_wp_at' finalise_cap_cte_wp_at_nullinv + apply (wp hoare_weak_lift_imp set_cap_cte_wp_at' finalise_cap_cte_wp_at_nullinv finalise_cap_fst_ret get_cap_wp | simp add: is_final_cap_def)+ apply (clarsimp simp add: P_Zombie is_cap_simps cte_wp_at_caps_of_state)+ @@ -1231,7 +1231,7 @@ proof (induct rule: cap_revoke.induct) apply (subst cap_revoke.simps) apply (unfold P_def) apply (wp "1.hyps"[unfolded P_def], simp+) - apply (wp preemption_point_inv hoare_drop_imps select_wp + apply (wp preemption_point_inv hoare_drop_imps rec_del_preserves_cte_zombie_null_insts[where P=Q] | simp add: Q_Null Q_Zombie)+ done diff --git a/proof/access-control/Ipc_AC.thy b/proof/access-control/Ipc_AC.thy index 512ce60b96..622495d2c9 100644 --- a/proof/access-control/Ipc_AC.thy +++ b/proof/access-control/Ipc_AC.thy @@ -31,13 +31,13 @@ lemma send_signal_caps_of_state[wp]: "send_signal ntfnptr badge \\s. P (caps_of_state s)\" apply (clarsimp simp: send_signal_def) apply (rule hoare_seq_ext[OF _ get_simple_ko_sp]) - apply (wpsimp wp: dxo_wp_weak cancel_ipc_receive_blocked_caps_of_state gts_wp static_imp_wp + apply (wpsimp wp: dxo_wp_weak cancel_ipc_receive_blocked_caps_of_state gts_wp hoare_weak_lift_imp simp: update_waiting_ntfn_def) apply (clarsimp simp: fun_upd_def[symmetric] st_tcb_def2) done crunch mdb[wp]: blocked_cancel_ipc, update_waiting_ntfn "\s. P (cdt (s :: det_ext state))" - (wp: crunch_wps unless_wp select_wp dxo_wp_weak simp: crunch_simps) + (wp: crunch_wps unless_wp dxo_wp_weak simp: crunch_simps) lemma cancel_ipc_receive_blocked_mdb: "\\s :: det_ext state. P (cdt s) \ st_tcb_at receive_blocked t s\ @@ -178,8 +178,8 @@ lemma send_upd_ctxintegrity: integrity aag X st s; st_tcb_at ((=) Running) thread s; get_tcb thread st = Some tcb; get_tcb thread s = Some tcb'\ \ integrity aag X st - (s\kheap := kheap s(thread \ - TCB (tcb'\tcb_arch := arch_tcb_context_set c' (tcb_arch tcb')\))\)" + (s\kheap := (kheap s) + (thread \ TCB (tcb'\tcb_arch := arch_tcb_context_set c' (tcb_arch tcb')\))\)" apply (clarsimp simp: integrity_def tcb_states_of_state_preserved st_tcb_def2) apply (rule conjI) prefer 2 @@ -423,7 +423,7 @@ lemma send_signal_respects: apply (rule hoare_pre) apply (wp set_notification_respects[where auth=Notify] as_user_set_register_respects_indirect[where ntfnptr=ntfnptr] - set_thread_state_integrity' sts_st_tcb_at' static_imp_wp + set_thread_state_integrity' sts_st_tcb_at' hoare_weak_lift_imp cancel_ipc_receive_blocked_respects[where ntfnptr=ntfnptr] gts_wp | wpc | simp)+ @@ -451,7 +451,7 @@ lemma send_signal_respects: sts_st_tcb_at' as_user_set_register_respects set_thread_state_pas_refined set_simple_ko_pas_refined set_thread_state_respects_in_signalling [where ntfnptr = ntfnptr] - set_ntfn_valid_objs_at hoare_vcg_disj_lift static_imp_wp + set_ntfn_valid_objs_at hoare_vcg_disj_lift hoare_weak_lift_imp | wpc | simp add: update_waiting_ntfn_def)+ apply clarsimp @@ -756,10 +756,10 @@ lemma transfer_caps_loop_presM_extended: apply (clarsimp simp add: Let_def split_def whenE_def cong: if_cong list.case_cong split del: if_split) apply (rule hoare_pre) - apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift static_imp_wp + apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift hoare_weak_lift_imp | assumption | simp split del: if_split)+ apply (rule cap_insert_assume_null) - apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at static_imp_wp)+ + apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at hoare_weak_lift_imp)+ apply (rule hoare_vcg_conj_liftE_R) apply (rule derive_cap_is_derived_foo') apply (rule_tac Q' ="\cap' s. (vo \ cap'\ NullCap \ @@ -1061,7 +1061,7 @@ lemma send_ipc_pas_refined: (pasObjectAbs aag x21, Reply, pasSubject aag) \ pasPolicy aag)" in hoare_strengthen_post[rotated]) apply simp - apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined static_imp_wp gts_wp + apply (wp set_thread_state_pas_refined do_ipc_transfer_pas_refined hoare_weak_lift_imp gts_wp | wpc | simp add: hoare_if_r_and)+ apply (wp hoare_vcg_all_lift hoare_imp_lift_something | simp add: st_tcb_at_tcb_states_of_state_eq)+ @@ -1206,7 +1206,7 @@ lemma receive_ipc_base_pas_refined: aag_has_auth_to aag Reply (hd list))" in hoare_strengthen_post[rotated]) apply (fastforce simp: pas_refined_refl) - apply (wp static_imp_wp do_ipc_transfer_pas_refined set_simple_ko_pas_refined + apply (wp hoare_weak_lift_imp do_ipc_transfer_pas_refined set_simple_ko_pas_refined set_thread_state_pas_refined get_simple_ko_wp hoare_vcg_all_lift hoare_vcg_imp_lift [OF set_simple_ko_get_tcb, unfolded disj_not1] | wpc @@ -1365,7 +1365,7 @@ lemma do_normal_transfer_send_integrity_autarch: by (wpsimp wp: as_user_integrity_autarch set_message_info_integrity_autarch copy_mrs_pas_refined copy_mrs_integrity_autarch transfer_caps_integrity_autarch lookup_extra_caps_authorised lookup_extra_caps_length get_mi_length get_mi_valid' - static_imp_wp hoare_vcg_conj_lift hoare_vcg_ball_lift lec_valid_cap') + hoare_weak_lift_imp hoare_vcg_conj_lift hoare_vcg_ball_lift lec_valid_cap') crunch integrity_autarch: setup_caller_cap "integrity aag X st" @@ -1742,7 +1742,7 @@ locale Ipc_AC_2 = Ipc_AC_1 + and auth_ipc_buffers_kheap_update: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" and auth_ipc_buffers_machine_state_update[simp]: "auth_ipc_buffers (machine_state_update f s) = auth_ipc_buffers (s :: det_ext state)" and empty_slot_extended_list_integ_lift_in_ipc: @@ -2365,7 +2365,7 @@ lemma send_ipc_integrity_autarch: apply (fastforce dest!: integrity_tcb_in_ipc_final elim!: integrity_trans) apply (wp setup_caller_cap_respects_in_ipc_reply set_thread_state_respects_in_ipc_autarch[where param_b = Inactive] - hoare_vcg_if_lift static_imp_wp possible_switch_to_respects_in_ipc_autarch + hoare_vcg_if_lift hoare_weak_lift_imp possible_switch_to_respects_in_ipc_autarch set_thread_state_running_respects_in_ipc do_ipc_transfer_respects_in_ipc thread_get_inv set_endpoint_integrity_in_ipc | wpc diff --git a/proof/access-control/RISCV64/ArchAccess.thy b/proof/access-control/RISCV64/ArchAccess.thy index 98caca94ee..8c48f69eb3 100644 --- a/proof/access-control/RISCV64/ArchAccess.thy +++ b/proof/access-control/RISCV64/ArchAccess.thy @@ -186,10 +186,10 @@ lemmas integrity_asids_kh_upds = declare integrity_asids_def[simp] lemma integrity_asids_kh_upds': - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ CNode sz cs)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ TCB tcb)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Endpoint ep)\) s" - "integrity_asids aag subjects x a (s\kheap := kheap s(p \ Notification ntfn)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ CNode sz cs)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ TCB tcb)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Endpoint ep)\) s" + "integrity_asids aag subjects x a (s\kheap := (kheap s)(p \ Notification ntfn)\) s" by (auto simp: opt_map_def split: option.splits) lemma integrity_asids_kh_update: diff --git a/proof/access-control/RISCV64/ArchAccess_AC.thy b/proof/access-control/RISCV64/ArchAccess_AC.thy index c07e39b7d6..0d5405dbed 100644 --- a/proof/access-control/RISCV64/ArchAccess_AC.thy +++ b/proof/access-control/RISCV64/ArchAccess_AC.thy @@ -82,7 +82,7 @@ lemma integrity_asids_refl[Access_AC_assms, simp]: lemma integrity_asids_update_autarch[Access_AC_assms]: "\ \x a. integrity_asids aag {pasSubject aag} x a st s; is_subject aag ptr \ - \ \x a. integrity_asids aag {pasSubject aag} x a st (s\kheap := kheap s(ptr \ obj)\)" + \ \x a. integrity_asids aag {pasSubject aag} x a st (s\kheap := (kheap s)(ptr \ obj)\)" by (auto simp: opt_map_def) end diff --git a/proof/access-control/RISCV64/ArchArch_AC.thy b/proof/access-control/RISCV64/ArchArch_AC.thy index e1735b0338..6389915bfa 100644 --- a/proof/access-control/RISCV64/ArchArch_AC.thy +++ b/proof/access-control/RISCV64/ArchArch_AC.thy @@ -541,7 +541,7 @@ lemma perform_pt_inv_unmap_pas_refined: lemma vs_lookup_PageTablePTE: "\ vs_lookup_table level asid vref s' = Some (lvl', pt); pspace_aligned s; valid_vspace_objs s; valid_asid_table s; - invalid_pte_at p s; ptes_of s' = ptes_of s (p \ pte); is_PageTablePTE pte; + invalid_pte_at p s; ptes_of s' = (ptes_of s)(p \ pte); is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; vref \ user_region; pts_of s (the (pte_ref pte)) = Some empty_pt; pt \ pptr_from_pte pte \ @@ -584,7 +584,7 @@ lemma vs_lookup_PageTablePTE: lemma vs_lookup_PageTablePTE': "\ vs_lookup_table level asid vref s = Some (lvl', pt); pspace_aligned s; valid_vspace_objs s; valid_asid_table s; - invalid_pte_at p s; ptes_of s' = ptes_of s (p \ pte); is_PageTablePTE pte; + invalid_pte_at p s; ptes_of s' = (ptes_of s)(p \ pte); is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; vref \ user_region \ \ \level' \ level. vs_lookup_table level' asid vref s' = Some (lvl', pt)" apply (induct level arbitrary: lvl' pt rule: bit0.from_top_full_induct[where y=max_pt_level]) @@ -915,7 +915,7 @@ lemma unmap_page_table_respects: unmap_page_table asid vaddr pt \\_. integrity aag X st\" apply (simp add: unmap_page_table_def sfence_def) - apply (wpsimp wp: pt_lookup_from_level_is_subject dmo_mol_respects hoare_vcg_conj_liftE + apply (wpsimp wp: pt_lookup_from_level_is_subject dmo_mol_respects hoare_vcg_conj_liftE_weaker store_pte_respects pt_lookup_from_level_wrp[where Q="\_. integrity aag X st"] | wp (once) hoare_drop_imps hoare_vcg_E_elim)+ apply (intro conjI; clarsimp) @@ -1237,7 +1237,7 @@ lemma perform_asid_control_invocation_respects: apply (wpc, simp) apply (wpsimp wp: set_cap_integrity_autarch cap_insert_integrity_autarch asid_table_entry_update_integrity retype_region_integrity[where sz=12] - static_imp_wp delete_objects_valid_vspace_objs delete_objects_valid_arch_state) + hoare_weak_lift_imp delete_objects_valid_vspace_objs delete_objects_valid_arch_state) apply (clarsimp simp: authorised_asid_control_inv_def ptr_range_def add.commute range_cover_def obj_bits_api_def default_arch_object_def pageBits_def word_bits_def) apply (subst is_aligned_neg_mask_eq[THEN sym], assumption) @@ -1318,9 +1318,9 @@ lemma perform_asid_control_invocation_pas_refined: apply (simp add: perform_asid_control_invocation_def ) apply wpc apply (rule pas_refined_asid_control_helper hoare_seq_ext hoare_K_bind)+ - apply (wp cap_insert_pas_refined' static_imp_wp | simp)+ + apply (wp cap_insert_pas_refined' hoare_weak_lift_imp | simp)+ apply ((wp retype_region_pas_refined'[where sz=pageBits] - hoare_vcg_ex_lift hoare_vcg_all_lift static_imp_wp hoare_wp_combs hoare_drop_imp + hoare_vcg_ex_lift hoare_vcg_all_lift hoare_weak_lift_imp hoare_wp_combs hoare_drop_imp retype_region_invs_extras(1)[where sz = pageBits] retype_region_invs_extras(4)[where sz = pageBits] retype_region_invs_extras(6)[where sz = pageBits] @@ -1329,7 +1329,7 @@ lemma perform_asid_control_invocation_pas_refined: max_index_upd_invs_simple max_index_upd_caps_overlap_reserved hoare_vcg_ex_lift set_cap_cte_wp_at hoare_vcg_disj_lift set_free_index_valid_pspace set_cap_descendants_range_in set_cap_no_overlap get_cap_wp set_cap_caps_no_overlap - hoare_vcg_all_lift static_imp_wp retype_region_invs_extras + hoare_vcg_all_lift hoare_weak_lift_imp retype_region_invs_extras set_cap_pas_refined_not_transferable arch_update_cap_valid_mdb | simp add: do_machine_op_def region_in_kernel_window_def cte_wp_at_neg2)+)[3] apply (rename_tac frame slot parent base ) diff --git a/proof/access-control/RISCV64/ArchCNode_AC.thy b/proof/access-control/RISCV64/ArchCNode_AC.thy index 245bc1ee9b..ee263cd699 100644 --- a/proof/access-control/RISCV64/ArchCNode_AC.thy +++ b/proof/access-control/RISCV64/ArchCNode_AC.thy @@ -97,18 +97,18 @@ crunches set_cdt crunches prepare_thread_delete, arch_finalise_cap for cur_domain[CNode_AC_assms, wp]:"\s. P (cur_domain s)" - (wp: crunch_wps select_wp hoare_vcg_if_lift2 simp: unless_def) + (wp: crunch_wps hoare_vcg_if_lift2 simp: unless_def) lemma state_vrefs_tcb_upd[CNode_AC_assms]: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; tcb_at t s \ - \ state_vrefs (s\kheap := kheap s(t \ TCB tcb)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(t \ TCB tcb)\) = state_vrefs s" apply (rule state_vrefs_eqI) by (fastforce simp: opt_map_def obj_at_def is_obj_defs valid_arch_state_def)+ lemma state_vrefs_simple_type_upd[CNode_AC_assms]: "\ pspace_aligned s; valid_vspace_objs s; valid_arch_state s; ko_at ko ptr s; is_simple_type ko; a_type ko = a_type (f val) \ - \ state_vrefs (s\kheap := kheap s(ptr \ f val)\) = state_vrefs s" + \ state_vrefs (s\kheap := (kheap s)(ptr \ f val)\) = state_vrefs s" apply (case_tac ko; case_tac "f val"; clarsimp) by (fastforce intro!: state_vrefs_eqI simp: opt_map_def obj_at_def is_obj_defs valid_arch_state_def)+ diff --git a/proof/access-control/RISCV64/ArchDomainSepInv.thy b/proof/access-control/RISCV64/ArchDomainSepInv.thy index 9c20d3ae96..442b1f0946 100644 --- a/proof/access-control/RISCV64/ArchDomainSepInv.thy +++ b/proof/access-control/RISCV64/ArchDomainSepInv.thy @@ -52,7 +52,7 @@ lemma perform_page_invocation_domain_sep_inv: \\_. domain_sep_inv irqs st\" apply (rule hoare_pre) apply (wp mapM_wp[OF _ subset_refl] set_cap_domain_sep_inv mapM_x_wp[OF _ subset_refl] - perform_page_invocation_domain_sep_inv_get_cap_helper static_imp_wp + perform_page_invocation_domain_sep_inv_get_cap_helper hoare_weak_lift_imp | simp add: perform_page_invocation_def o_def | wpc)+ done @@ -72,7 +72,7 @@ lemma perform_asid_control_invocation_domain_sep_inv: unfolding perform_asid_control_invocation_def apply (rule hoare_pre) apply (wp modify_wp cap_insert_domain_sep_inv' set_cap_domain_sep_inv - get_cap_domain_sep_inv_cap[where st=st] static_imp_wp + get_cap_domain_sep_inv_cap[where st=st] hoare_weak_lift_imp | wpc | simp )+ done diff --git a/proof/access-control/RISCV64/ArchFinalise_AC.thy b/proof/access-control/RISCV64/ArchFinalise_AC.thy index 821507266c..6068ca8b1f 100644 --- a/proof/access-control/RISCV64/ArchFinalise_AC.thy +++ b/proof/access-control/RISCV64/ArchFinalise_AC.thy @@ -172,7 +172,7 @@ crunches set_asid_pool lemma set_asid_pool_tcb_states_of_state[wp]: "set_asid_pool p pool \\s. P (tcb_states_of_state s)\" apply (wpsimp wp: set_object_wp_strong simp: obj_at_def set_asid_pool_def) - apply (prop_tac "\x. get_tcb x (s\kheap := kheap s(p \ ArchObj (ASIDPool pool))\) = get_tcb x s") + apply (prop_tac "\x. get_tcb x (s\kheap := (kheap s)(p \ ArchObj (ASIDPool pool))\) = get_tcb x s") apply (auto simp: tcb_states_of_state_def get_tcb_def) done @@ -266,7 +266,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) qed lemma finalise_cap_caps_of_state_nullinv[Finalise_AC_assms]: - "\\s. P (caps_of_state s) \ (\p. P (caps_of_state s(p \ NullCap)))\ + "\\s. P (caps_of_state s) \ (\p. P ((caps_of_state s)(p \ NullCap)))\ finalise_cap cap final \\_ s. P (caps_of_state s)\" by (cases cap; diff --git a/proof/access-control/RISCV64/ArchIpc_AC.thy b/proof/access-control/RISCV64/ArchIpc_AC.thy index 091abf877a..c3cd95d626 100644 --- a/proof/access-control/RISCV64/ArchIpc_AC.thy +++ b/proof/access-control/RISCV64/ArchIpc_AC.thy @@ -175,7 +175,7 @@ lemma handle_arch_fault_reply_respects[Ipc_AC_assms]: lemma auth_ipc_buffers_kheap_update[Ipc_AC_assms]: "\ x \ auth_ipc_buffers st thread; kheap st thread = Some (TCB tcb); kheap s thread = Some (TCB tcb'); tcb_ipcframe tcb = tcb_ipcframe tcb' \ - \ x \ auth_ipc_buffers (s\kheap := kheap s(thread \ TCB tcb)\) thread" + \ x \ auth_ipc_buffers (s\kheap := (kheap s)(thread \ TCB tcb)\) thread" by (clarsimp simp: auth_ipc_buffers_member_def get_tcb_def caps_of_state_tcb) lemma auth_ipc_buffers_machine_state_update[Ipc_AC_assms, simp]: diff --git a/proof/access-control/RISCV64/ArchTcb_AC.thy b/proof/access-control/RISCV64/ArchTcb_AC.thy index 703a1ae1f6..5a8b7a0f0b 100644 --- a/proof/access-control/RISCV64/ArchTcb_AC.thy +++ b/proof/access-control/RISCV64/ArchTcb_AC.thy @@ -45,7 +45,7 @@ lemma invoke_tcb_tc_respects_aag[Tcb_AC_assms]: | wp restart_integrity_autarch set_mcpriority_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch - opt_update_thread_valid_sched static_imp_wp + opt_update_thread_valid_sched hoare_weak_lift_imp cap_insert_integrity_autarch checked_insert_pas_refined cap_delete_respects' cap_delete_pas_refined' check_cap_inv2[where Q="\_. integrity aag X st"] diff --git a/proof/access-control/Retype_AC.thy b/proof/access-control/Retype_AC.thy index 70428e3ede..de013d7d0c 100644 --- a/proof/access-control/Retype_AC.thy +++ b/proof/access-control/Retype_AC.thy @@ -970,7 +970,7 @@ lemma reset_untyped_cap_valid_vspace_objs: \\_. valid_vspace_objs\" unfolding reset_untyped_cap_def apply (wpsimp wp: mapME_x_inv_wp preemption_point_inv) - apply (wp static_imp_wp delete_objects_valid_vspace_objs) + apply (wp hoare_weak_lift_imp delete_objects_valid_vspace_objs) apply (wpsimp wp: get_cap_wp)+ apply (cases src_slot) apply (auto simp: cte_wp_at_caps_of_state) @@ -1008,7 +1008,7 @@ lemma reset_untyped_cap_valid_arch_state: \\_. valid_arch_state\" unfolding reset_untyped_cap_def apply (wpsimp wp: mapME_x_inv_wp preemption_point_inv) - apply (wp static_imp_wp delete_objects_valid_arch_state) + apply (wp hoare_weak_lift_imp delete_objects_valid_arch_state) apply (wpsimp wp: get_cap_wp)+ apply (cases src_slot) apply (auto simp: cte_wp_at_caps_of_state) diff --git a/proof/access-control/Syscall_AC.thy b/proof/access-control/Syscall_AC.thy index b873a14e6c..472347ebbe 100644 --- a/proof/access-control/Syscall_AC.thy +++ b/proof/access-control/Syscall_AC.thy @@ -699,7 +699,7 @@ lemma handle_event_integrity: handle_reply_respects handle_fault_integrity_autarch handle_interrupt_integrity handle_vm_fault_integrity handle_reply_pas_refined handle_vm_fault_valid_fault - handle_reply_valid_sched alternative_wp select_wp + handle_reply_valid_sched hoare_vcg_conj_lift hoare_vcg_all_lift hoare_drop_imps simp: domain_sep_inv_def | rule dmo_wp hoare_vcg_E_elim @@ -899,7 +899,7 @@ lemma schedule_integrity: schedule \\_. integrity aag X st\" apply (simp add: schedule_def) - apply (wpsimp wp: alternative_wp switch_to_thread_respects' select_wp guarded_switch_to_lift + apply (wpsimp wp: switch_to_thread_respects' guarded_switch_to_lift switch_to_idle_thread_respects choose_thread_respects gts_wp hoare_drop_imps set_scheduler_action_cnt_valid_sched append_thread_queued enqueue_thread_queued tcb_sched_action_enqueue_valid_blocked_except tcb_sched_action_append_integrity' @@ -949,14 +949,14 @@ crunch pas_refined[wp]: choose_thread "pas_refined aag" lemma schedule_pas_refined: "schedule \pas_refined aag\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp add: alternative_wp guarded_switch_to_lift switch_to_thread_pas_refined select_wp - switch_to_idle_thread_pas_refined gts_wp - guarded_switch_to_lift switch_to_thread_respects_pasMayEditReadyQueues - choose_thread_respects_pasMayEditReadyQueues - next_domain_valid_sched next_domain_valid_queues gts_wp hoare_drop_imps - set_scheduler_action_cnt_valid_sched enqueue_thread_queued - tcb_sched_action_enqueue_valid_blocked_except - del: ethread_get_wp + apply (wp add: guarded_switch_to_lift switch_to_thread_pas_refined + switch_to_idle_thread_pas_refined gts_wp + guarded_switch_to_lift switch_to_thread_respects_pasMayEditReadyQueues + choose_thread_respects_pasMayEditReadyQueues + next_domain_valid_sched next_domain_valid_queues gts_wp hoare_drop_imps + set_scheduler_action_cnt_valid_sched enqueue_thread_queued + tcb_sched_action_enqueue_valid_blocked_except + del: ethread_get_wp | wpc | simp add: schedule_choose_new_thread_def)+ done @@ -983,7 +983,7 @@ lemma ct_active_update[simp]: lemma set_cap_ct_active[wp]: "set_cap ptr c \ct_active \" apply (rule hoare_pre) - apply (wps | wpsimp wp: select_wp sts_st_tcb_at_cases thread_set_no_change_tcb_state + apply (wps | wpsimp wp: sts_st_tcb_at_cases thread_set_no_change_tcb_state simp: crunch_simps ct_in_state_def)+ done @@ -1034,7 +1034,7 @@ crunch ct_active[wp]: post_cap_deletion, empty_slot "\s :: det_ext state wp: crunch_wps filterM_preserved unless_wp) crunch cur_thread[wp]: cap_swap_for_delete, finalise_cap "\s :: det_ext state. P (cur_thread s)" - (wp: select_wp dxo_wp_weak crunch_wps simp: crunch_simps) + (wp: dxo_wp_weak crunch_wps simp: crunch_simps) lemma rec_del_cur_thread[wp]: "rec_del a \\s :: det_ext state. P (cur_thread s)\" @@ -1139,8 +1139,7 @@ lemma call_kernel_integrity': apply (simp add: call_kernel_def) apply (simp only: spec_valid_def) apply (wpsimp wp: activate_thread_respects schedule_integrity_pasMayEditReadyQueues - handle_interrupt_integrity dmo_wp alternative_wp - select_wp handle_interrupt_pas_refined) + handle_interrupt_integrity dmo_wp handle_interrupt_pas_refined) apply (clarsimp simp: if_fun_split) apply (rule_tac Q="\rv ms. (rv \ None \ the rv \ non_kernel_IRQs) \ R True (domain_sep_inv (pasMaySendIrqs aag) st' s) rv ms" @@ -1182,7 +1181,7 @@ lemma call_kernel_pas_refined: \\_. pas_refined aag\" apply (simp add: call_kernel_def ) apply (wp activate_thread_pas_refined schedule_pas_refined handle_interrupt_pas_refined - do_machine_op_pas_refined dmo_wp alternative_wp select_wp hoare_drop_imps getActiveIRQ_inv + do_machine_op_pas_refined dmo_wp hoare_drop_imps getActiveIRQ_inv | simp add: if_fun_split | strengthen invs_psp_aligned invs_vspace_objs invs_arch_state)+ apply (wp he_invs handle_event_pas_refined) diff --git a/proof/access-control/Tcb_AC.thy b/proof/access-control/Tcb_AC.thy index d581b91d46..c57ff1efc8 100644 --- a/proof/access-control/Tcb_AC.thy +++ b/proof/access-control/Tcb_AC.thy @@ -60,7 +60,7 @@ lemmas itr_wps = restart_integrity_autarch as_user_integrity_autarch thread_set_integrity_autarch option_update_thread_integrity_autarch thread_set_pas_refined cap_insert_integrity_autarch cap_insert_pas_refined - hoare_vcg_all_liftE wp_throw_const_impE hoare_weak_lift_imp hoare_vcg_all_lift + hoare_vcg_all_liftE hoare_weak_lift_impE hoare_weak_lift_imp hoare_vcg_all_lift check_cap_inv[where P="valid_cap c" for c] check_cap_inv[where P="tcb_cap_valid c p" for c p] check_cap_inv[where P="cte_at p0" for p0] @@ -322,7 +322,7 @@ subsubsection\@{term "pas_refined"}\ lemmas ita_wps = as_user_pas_refined restart_pas_refined cap_insert_pas_refined thread_set_pas_refined cap_delete_pas_refined' check_cap_inv2 hoare_vcg_all_liftE - wp_throw_const_impE hoare_weak_lift_imp hoare_vcg_all_lift + hoare_weak_lift_impE hoare_weak_lift_imp hoare_vcg_all_lift lemma hoare_st_refl: "\ \st. \P st\ f \Q st\; \r s st. Q st r s \ Q' r s \ \ \\s. P s s\ f \Q'\" diff --git a/proof/bisim/Syscall_S.thy b/proof/bisim/Syscall_S.thy index 623a060613..f43a01ef99 100644 --- a/proof/bisim/Syscall_S.thy +++ b/proof/bisim/Syscall_S.thy @@ -699,7 +699,7 @@ lemma schedule_separate_state [wp]: "\separate_state\ schedule :: (unit,unit) s_monad \\_. separate_state\" unfolding schedule_def switch_to_thread_def arch_switch_to_thread_def switch_to_idle_thread_def arch_switch_to_idle_thread_def allActiveTCBs_def - by (wpsimp wp: select_inv separate_state_pres' alternative_wp + by (wpsimp wp: select_inv separate_state_pres' simp: arch_activate_idle_thread_def | strengthen imp_consequent)+ diff --git a/proof/capDL-api/Arch_DP.thy b/proof/capDL-api/Arch_DP.thy index 2f6f08c0fa..6543ac019b 100644 --- a/proof/capDL-api/Arch_DP.thy +++ b/proof/capDL-api/Arch_DP.thy @@ -25,7 +25,7 @@ lemma cdl_lookup_pt_slot_rv: apply (rule validE_validE_R) apply (clarsimp simp : cdl_lookup_pt_slot_def) apply (clarsimp simp: validE_def valid_def bindE_def - bind_def bind_assoc NonDetMonad.lift_def) + bind_def bind_assoc Nondet_Monad.lift_def) apply (case_tac a) apply (clarsimp simp:liftE_def bindE_def bind_def return_def) apply (clarsimp simp:liftE_def bindE_def bind_def return_def) @@ -65,9 +65,9 @@ lemma decode_page_map_intent_rv_20_24: \\r s. R r\, -" apply (simp add: decode_invocation_def get_index_def get_page_intent_def throw_opt_def cap_rights_def decode_page_invocation_def throw_on_none_def get_mapped_asid_def) - apply (wp alternativeE_wp select_wp | wpc)+ + apply (wp | wpc)+ apply (rule validE_validE_R) - apply (wp alternativeE_wp) + apply wp apply (simp add:cdl_page_mapping_entries_def split del:if_split | wp | wpc)+ apply auto done @@ -86,9 +86,9 @@ lemma decode_page_map_intent_rv_16_12: get_page_intent_def throw_opt_def cap_rights_def decode_page_invocation_def throw_on_none_def get_mapped_asid_def) - apply (wp alternativeE_wp select_wp) + apply wp apply (rule validE_validE_R) - apply (wp alternativeE_wp) + apply wp apply (simp add:cdl_page_mapping_entries_def) apply (wp cdl_lookup_pt_slot_rv | wpc | simp)+ apply auto @@ -130,13 +130,13 @@ lemma invoke_page_table_wp: done crunch cdl_cur_thread[wp]: invoke_page "\s. P (cdl_current_thread s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def ) + (wp: crunch_wps simp: swp_def) crunch cdl_cur_thread[wp]: invoke_page_table "\s. P (cdl_current_thread s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def ) + (wp: crunch_wps simp: swp_def) crunch cdl_cur_domain[wp]: invoke_page_table, invoke_page "\s. P (cdl_current_domain s)" -(wp: crunch_wps select_wp alternative_wp simp : swp_def unless_def) + (wp: crunch_wps simp: swp_def unless_def) lemmas cap_asid_simps[simp] = cap_asid_def[split_simps cdl_cap.split] lemmas cap_mapped_simps[simp] = cap_mapped_def[split_simps cdl_cap.split] @@ -153,7 +153,7 @@ lemma decode_page_table_rv: apply (simp add:decode_invocation_def get_page_table_intent_def throw_opt_def decode_page_table_invocation_def) apply (rule hoare_pre) - apply (wp alternativeE_wp throw_on_none_wp | wpc | simp)+ + apply (wp throw_on_none_wp | wpc | simp)+ apply (clarsimp split:option.splits simp:get_index_def cap_object_def cap_has_object_def get_mapped_asid_def) done @@ -564,7 +564,7 @@ lemma decode_invocation_asid_pool_assign: decode_asid_pool_invocation_def get_index_def throw_opt_def throw_on_none_def) apply (rule validE_validE_R) - apply (wp alternativeE_wp select_wp) + apply wp apply (clarsimp simp:cap_object_def cap_has_object_def) done diff --git a/proof/capDL-api/CNode_DP.thy b/proof/capDL-api/CNode_DP.thy index dbd2c762a6..fd1420c280 100644 --- a/proof/capDL-api/CNode_DP.thy +++ b/proof/capDL-api/CNode_DP.thy @@ -60,7 +60,7 @@ lemma invoke_cnode_insert_cdl_current_domain[wp]: \\_ s. P (cdl_current_domain s) \" apply (simp add: invoke_cnode_def) apply (rule hoare_pre) - apply (wp alternative_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ done lemma invoke_cnode_move_cdl_current_domain[wp]: diff --git a/proof/capDL-api/IRQ_DP.thy b/proof/capDL-api/IRQ_DP.thy index 0d0b6659e1..6f4a1ec470 100644 --- a/proof/capDL-api/IRQ_DP.thy +++ b/proof/capDL-api/IRQ_DP.thy @@ -46,7 +46,6 @@ lemma invoke_irq_handler_set_handler_wp: invoke_irq_handler (SetIrqHandler irq cap slot) \\_. < irq \irq obj \* (obj, 0) \c cap \* R> \" apply (clarsimp simp: invoke_irq_handler_def, wp) - apply (wp alternative_wp) apply (wp sep_wp: insert_cap_child_wp insert_cap_sibling_wp)+ apply (sep_wp delete_cap_simple_format[where cap=cap'])+ apply (safe) @@ -71,7 +70,7 @@ lemma decode_invocation_irq_ack_rv': decode_irq_handler_invocation cap cap_ref caps (IrqHandlerAckIntent) \P\, -" apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (clarsimp) done @@ -80,7 +79,7 @@ lemma decode_invocation_irq_clear_rv': decode_irq_handler_invocation cap cap_ref caps (IrqHandlerClearIntent) \P\, -" apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (clarsimp) done @@ -105,7 +104,7 @@ decode_irq_handler_invocation cap cap_ref caps (IrqHandlerSetEndpointIntent) \P\, -" apply (rule validE_R_gen_asm_conj) apply (clarsimp simp: decode_irq_handler_invocation_def) - apply (wp alternativeE_R_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp split: cdl_cap.splits, safe) apply ((wp throw_on_none_rv)+, clarsimp simp: get_index_def) apply simp @@ -117,7 +116,7 @@ lemma decode_irq_control_issue_irq_rv: <\ (r, (unat depth)) : root_cap index \u cap \* R> s\ decode_irq_control_invocation target target_ref caps (IrqControlIssueIrqHandlerIntent irq index depth) \P\, -" apply (clarsimp simp: decode_irq_control_invocation_def) - apply (wp alternativeE_R_wp lookup_slot_for_cnode_op_rvu'[where cap=cap and r=r] throw_on_none_rv) + apply (wp lookup_slot_for_cnode_op_rvu'[where cap=cap and r=r] throw_on_none_rv) apply (clarsimp simp: get_index_def) apply (sep_solve) done diff --git a/proof/capDL-api/Invocation_DP.thy b/proof/capDL-api/Invocation_DP.thy index 9b16571f1b..d1736068d1 100644 --- a/proof/capDL-api/Invocation_DP.thy +++ b/proof/capDL-api/Invocation_DP.thy @@ -12,10 +12,10 @@ crunch cdl_current_domain[wp]: update_available_range, generate_object_ids, upda mark_tcb_intent_error, corrupt_ipc_buffer, insert_cap_sibling, insert_cap_child, move_cap, invoke_irq_control, invoke_irq_handler "\s. P (cdl_current_domain s)" -(wp: crunch_wps select_wp alternative_wp alternativeE_wp unless_wp simp: split_def corrupt_intents_def) +(wp: crunch_wps unless_wp simp: split_def corrupt_intents_def) crunch cdl_irq_node [wp]: corrupt_ipc_buffer "\s. P (cdl_irq_node s)" -(wp: crunch_wps select_wp simp: corrupt_intents_def) +(wp: crunch_wps simp: corrupt_intents_def) crunch cdl_irq_node [wp]: mark_tcb_intent_error "\s. P (cdl_irq_node s)" (wp: crunch_wps) @@ -124,7 +124,7 @@ lemma corrupt_tcb_intent_sep_helper[wp]: \\rv s. A (object_at (\obj. P (object_clean obj)) ptr s)\" apply (simp add:corrupt_tcb_intent_def update_thread_def set_object_def) - apply (wp select_wp | wpc | simp add:set_object_def)+ + apply (wp | wpc | simp add:set_object_def)+ apply (clarsimp simp:object_at_def) apply (simp add:object_clean_def intent_reset_def object_slots_def asid_reset_def update_slots_def) @@ -141,7 +141,7 @@ lemma corrupt_frame_sep_helper[wp]: "\\s. A (object_at (\obj. P (object_clean obj)) ptr s)\ corrupt_frame a \\rv s. A (object_at (\obj. P (object_clean obj)) ptr s)\" apply (simp add:corrupt_frame_def) - apply (wp select_wp) + apply wp apply (clarsimp simp:corrupt_intents_def object_at_def map_add_def split:option.splits cdl_object.splits) apply (simp add:object_clean_def intent_reset_def @@ -157,7 +157,7 @@ lemma corrupt_ipc_buffer_sep_inv[wp]: \\rv s. < P > s\" apply (rule sep_nonimpact_valid_lift) apply (simp add:corrupt_ipc_buffer_def) - apply (wp select_wp hoare_drop_imps | wpc | simp)+ + apply (wp hoare_drop_imps | wpc | simp)+ done lemma update_thread_intent_update: @@ -231,55 +231,48 @@ lemma no_exception_conj': done crunch inv[wp]: decode_untyped_invocation P - (wp:crunch_wps alternativeE_wp mapME_x_inv_wp - unlessE_wp simp:crunch_simps throw_on_none_def) + (wp: crunch_wps mapME_x_inv_wp unlessE_wp simp: crunch_simps throw_on_none_def) crunch inv[wp]: decode_irq_handler_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_tcb_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_domain_invocation P - (wp:crunch_wps alternativeE_wp - simp:liftE_bindE throw_on_none_def) + (wp:crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_irq_control_invocation P - (wp:crunch_wps alternativeE_wp select_wp - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps simp: liftE_bindE throw_on_none_def) crunch inv[wp]: decode_asid_control_invocation P - (wp:crunch_wps alternativeE_wp select_wp ignore:returnOk - simp:liftE_bindE throw_on_none_def) + (wp: crunch_wps ignore: returnOk simp: liftE_bindE throw_on_none_def) crunch inv[wp]: lookup_cap_and_slot P (wp:crunch_wps resolve_address_bits_wp) crunch inv[wp]: decode_page_invocation P - (wp:crunch_wps alternativeE_wp select_wp resolve_address_bits_wp - simp:throw_on_none_def) + (wp: crunch_wps resolve_address_bits_wp simp: throw_on_none_def) lemma decode_page_table_invocation_inv[wp]: "\P\ decode_page_table_invocation a b c d \\_. P\" apply (simp add:decode_page_table_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_page_directory_invocation_inv[wp]: "\P\ decode_page_directory_invocation a b c d \\_. P\" apply (simp add:decode_page_directory_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_asid_pool_invocation_inv[wp]: "\P\ decode_asid_pool_invocation a b c d \\_. P\" apply (simp add:decode_asid_pool_invocation_def) apply (rule hoare_pre) - apply (wpc|wp alternativeE_wp select_wp |simp add:throw_on_none_def)+ + apply (wpc|wp |simp add:throw_on_none_def)+ done lemma decode_invocation_inv[wp]: @@ -427,16 +420,15 @@ lemma handle_event_syscall_no_decode_exception: done crunch cdl_current_thread [wp]: delete_cap_simple "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def) + (wp: crunch_wps simp: split_def unless_def) crunch cdl_current_thread [wp]: mark_tcb_intent_error "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def) + (wp: crunch_wps simp: split_def unless_def) crunch cdl_current_thread [wp]: corrupt_ipc_buffer "\s. P (cdl_current_thread s)" -(wp:crunch_wps select_wp simp:split_def unless_def corrupt_frame_def corrupt_intents_def) + (wp: crunch_wps simp: split_def unless_def corrupt_frame_def corrupt_intents_def) crunch cdl_current_thread [wp]: invoke_irq_control, invoke_irq_handler "\s. P (cdl_current_thread s)" -(wp:alternative_wp) lemma corrupt_tcb_intent_all_active_tcbs[wp]: @@ -478,7 +470,7 @@ lemma send_signal_no_pending: \\r. P\" apply (simp add: send_signal_def send_signal_bound_def) apply (rule hoare_pre) - apply (wp alternative_wp | wpc)+ + apply (wp | wpc)+ apply (rule hoare_pre_cont) apply (rule_tac P = "waiters = {}" in hoare_gen_asm) apply (clarsimp simp: option_select_def) @@ -495,7 +487,7 @@ lemma send_signal_no_pending: done crunch invs[wp]: get_active_irq P - (wp: crunch_wps alternative_wp select_wp) + (wp: crunch_wps) lemma handle_pending_interrupts_no_ntf_cap: "\P and no_pending\ @@ -506,7 +498,7 @@ lemma handle_pending_interrupts_no_ntf_cap: apply (wp send_signal_no_pending | wpc | simp add: option_select_def handle_interrupt_def split del: if_split)+ - apply (wp alternative_wp select_wp hoare_drop_imps hoare_vcg_all_lift) + apply (wp hoare_drop_imps hoare_vcg_all_lift) apply simp done @@ -622,7 +614,7 @@ lemma invoke_cnode_insert_cap: apply (simp add:validE_def) apply (rule hoare_name_pre_state) apply (clarsimp simp:invoke_cnode_def liftE_bindE validE_def[symmetric]) - apply (wpsimp wp: alternative_wp insert_cap_sibling_wp insert_cap_child_wp) + apply (wpsimp wp: insert_cap_sibling_wp insert_cap_child_wp) done lemma invoke_cnode_move_wp: @@ -676,19 +668,17 @@ lemma cdl_cur_thread_detype: by (simp add:detype_def) crunch cdl_current_thread[wp]: reset_untyped_cap "\s. P (cdl_current_thread s)" - (wp: select_wp alternativeE_wp mapME_x_inv_wp whenE_wp - simp: cdl_cur_thread_detype crunch_simps) + (wp: mapME_x_inv_wp whenE_wp simp: cdl_cur_thread_detype crunch_simps) lemmas helper = valid_validE_E[OF reset_untyped_cap_cdl_current_thread] crunch cdl_current_thread[wp]: invoke_untyped "\s. P (cdl_current_thread s)" -(wp:select_wp mapM_x_wp' crunch_wps unless_wp alternativeE_wp - helper - simp:cdl_cur_thread_detype crunch_simps) + (wp: mapM_x_wp' crunch_wps unless_wp helper + simp:cdl_cur_thread_detype crunch_simps) crunch cdl_current_thread[wp]: move_cap "\s. P (cdl_current_thread s)" -(wp:select_wp mapM_x_wp' crunch_wps unless_wp - simp:crunch_simps) + (wp: mapM_x_wp' crunch_wps unless_wp + simp:crunch_simps) lemma cnode_insert_cap_cdl_current_thread: "\\s. P (cdl_current_thread s) \ @@ -698,7 +688,7 @@ lemma cnode_insert_cap_cdl_current_thread: apply (clarsimp simp: invoke_cnode_def liftE_bindE validE_def[symmetric]) apply (rule hoare_pre) - apply (wp alternative_wp | simp | wpc)+ + apply (wp | simp | wpc)+ done lemma cnode_move_cap_cdl_current_thread: @@ -709,7 +699,7 @@ lemma cnode_move_cap_cdl_current_thread: apply (clarsimp simp: invoke_cnode_def liftE_bindE validE_def[symmetric]) apply (rule hoare_pre) - apply (wp alternative_wp | simp | wpc)+ + apply (wp | simp | wpc)+ done lemma sep_any_imp_c'_conj: @@ -865,7 +855,7 @@ lemma tcb_has_error_set_cap: apply (simp add:set_cap_def gets_the_def set_object_def split_def) - apply (wp select_wp|wpc|simp)+ + apply (wp|wpc|simp)+ apply (clarsimp simp:tcb_has_error_def object_at_def,simp split:cdl_object.split_asm) apply (intro conjI impI) @@ -1130,7 +1120,7 @@ lemma invoke_cnode_insert_cap': apply (simp add:validE_def) apply (rule hoare_name_pre_state) apply (clarsimp simp:invoke_cnode_def liftE_bindE validE_def[symmetric]) - apply (wpsimp wp: alternative_wp insert_cap_sibling_wp insert_cap_child_wp + apply (wpsimp wp: insert_cap_sibling_wp insert_cap_child_wp simp: cap_of_insert_call_def) done @@ -1152,13 +1142,13 @@ lemma sep_map_c_asid_reset: apply clarsimp apply (case_tac "\ has_slots obj") apply simp - apply (rule_tac x = "update_slots (object_slots obj(snd ptr \ cap')) obj" + apply (rule_tac x = "update_slots ((object_slots obj)(snd ptr \ cap')) obj" in exI) apply (simp add:sep_map_general_def object_to_sep_state_slot) apply clarsimp apply (case_tac "\ has_slots obj") apply simp - apply (rule_tac x = "update_slots (object_slots obj(snd ptr \ cap)) obj" + apply (rule_tac x = "update_slots ((object_slots obj)(snd ptr \ cap)) obj" in exI) apply (simp add:sep_map_general_def object_to_sep_state_slot) done diff --git a/proof/capDL-api/KHeap_DP.thy b/proof/capDL-api/KHeap_DP.thy index 0e31d16019..f2bdcf33b3 100644 --- a/proof/capDL-api/KHeap_DP.thy +++ b/proof/capDL-api/KHeap_DP.thy @@ -343,7 +343,7 @@ lemma decode_tcb_invocation: "\P\decode_tcb_invocation cap cap_ref caps (TcbWriteRegistersIntent resume flags count regs) \\_. P\" apply (clarsimp simp: decode_tcb_invocation_def) -apply (wp alternative_wp) +apply wp apply (clarsimp) done @@ -373,7 +373,7 @@ lemma invoke_cnode_insert_wp: \\_. c cap \* R>\" apply (rule hoare_gen_asm) apply (clarsimp simp: invoke_cnode_def) - apply (wp insert_cap_sibling_wp insert_cap_child_wp alternative_wp) + apply (wp insert_cap_sibling_wp insert_cap_child_wp) apply (clarsimp) done @@ -580,21 +580,21 @@ lemma derive_cap_rv: derive_cap slot cap \\rv s. P s \ ( rv = cap \ rv = NullCap )\, \\_ _. True\" apply (clarsimp simp: derive_cap_def returnOk_def split: cdl_cap.splits,safe) - apply (wp return_rv whenE_wp alternativeE_wp | clarsimp simp: ensure_no_children_def)+ + apply (wp return_rv whenE_wp | clarsimp simp: ensure_no_children_def)+ done lemma derive_cap_wp [wp]: "\P\ derive_cap slot cap \\_. P\" apply (clarsimp simp: derive_cap_def returnOk_def split: cdl_cap.splits) apply (safe) - apply ((wp alternative_wp whenE_wp)|(clarsimp simp: ensure_no_children_def))+ + apply ((wp whenE_wp)|(clarsimp simp: ensure_no_children_def))+ done lemma derive_cap_wpE: "\P\ derive_cap slot cap \\_.P\,\\_.P\" apply (clarsimp simp: derive_cap_def) - apply (case_tac cap, (wp whenE_wp alternative_wp | + apply (case_tac cap, (wp whenE_wp | simp add: ensure_no_children_def)+) done @@ -710,7 +710,7 @@ lemma derive_cap_invE: "\P (derived_cap cap) and Q\ derive_cap slot cap \P\, \\r. Q\" apply (simp add:derive_cap_def) apply (rule hoare_pre) - apply (wp alternative_wp alternativeE_wp|wpc|simp)+ + apply (wp|wpc|simp)+ apply (auto simp:derived_cap_def) done @@ -759,7 +759,7 @@ lemma decode_cnode_move_rvu: crunch preserve [wp]: decode_cnode_invocation "P" -(wp: derive_cap_wpE unlessE_wp whenE_wp select_wp hoare_drop_imps simp: if_apply_def2 throw_on_none_def) + (wp: derive_cap_wpE unlessE_wp whenE_wp hoare_drop_imps simp: if_apply_def2 throw_on_none_def) lemma decode_invocation_wp: "\P\ decode_invocation (CNodeCap x y z sz) ref caps (CNodeIntent intent) \\_. P\, -" @@ -1151,14 +1151,14 @@ lemma has_restart_cap_sep_wp: lemma lift_do_kernel_op': "\\s'. P s'\ f \\_ s'. Q s'\ \ \\s. P (kernel_state s)\ do_kernel_op f \\_ s. Q (kernel_state s)\" apply (simp add: do_kernel_op_def split_def) - apply (wp select_wp) + apply wp apply (simp add: valid_def split_def) done lemma lift_do_kernel_op: "\\s. s = s'\ f \\_ s. s = s'\ \ \\s. (kernel_state s) = s'\ do_kernel_op f \\_ s. (kernel_state s) = s'\" apply (simp add: do_kernel_op_def split_def) - apply (wp select_wp) + apply wp apply (simp add: valid_def split_def) done @@ -1179,7 +1179,7 @@ lemma schedule_no_choice_wp: schedule \\r s. cdl_current_thread s = Some current_thread \ cdl_current_domain s = current_domain \ P s\" apply (simp add:schedule_def switch_to_thread_def change_current_domain_def) - apply (wp alternative_wp select_wp) + apply wp apply (case_tac s,clarsimp) done diff --git a/proof/capDL-api/ProofHelpers_DP.thy b/proof/capDL-api/ProofHelpers_DP.thy index bf1609f26d..2a9aa2dcd1 100644 --- a/proof/capDL-api/ProofHelpers_DP.thy +++ b/proof/capDL-api/ProofHelpers_DP.thy @@ -11,7 +11,7 @@ imports begin crunch_ignore (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE diff --git a/proof/capDL-api/Retype_DP.thy b/proof/capDL-api/Retype_DP.thy index 09363f4db8..fc420ae076 100644 --- a/proof/capDL-api/Retype_DP.thy +++ b/proof/capDL-api/Retype_DP.thy @@ -50,7 +50,7 @@ lemma create_objects_mapM_x': qed crunch inv[wp]: generate_object_ids P -(wp:crunch_wps select_wp) + (wp: crunch_wps) lemma pick_rev: assumes "target_object_ids = map (\x. {x}) ids" @@ -111,7 +111,7 @@ lemma generate_object_ids_rv: \\r s. r = map (\x. {x}) (map pick r) \ length r = n \ set (map pick r) \ obj_range \ distinct (map pick r) \" apply (clarsimp simp:generate_object_ids_def) - apply (wp select_wp) + apply wp apply clarsimp apply (simp add: distinct_map) apply (intro conjI) @@ -185,7 +185,7 @@ lemma update_available_range_wp: apply (rule_tac x = new_range in exI) apply (intro conjI,assumption+) apply (sep_select 2,assumption) - apply (wp select_wp) + apply wp apply clarsimp+ done @@ -254,10 +254,10 @@ lemma reset_untyped_cap_wp: apply (rule_tac P = "\fr. cap = UntypedCap dev obj_range fr \ (\fr\ set x. free_range \ fr \ fr \ obj_range)" in hoare_gen_asmE) apply clarsimp - apply (wp whenE_wp mapME_x_wp alternativeE_wp) + apply (wp whenE_wp mapME_x_wp) apply (rule ballI) apply (rule hoare_pre) - apply (wp alternative_wp) + apply wp apply simp apply (rule hoare_post_imp[OF _ set_cap_wp]) apply clarsimp @@ -265,7 +265,7 @@ lemma reset_untyped_cap_wp: apply ((rule conjI, fastforce)+, sep_solve) apply clarsimp apply sep_solve - apply (wp select_wp | clarsimp)+ + apply (wp | clarsimp)+ apply (subst dummy_detype_if_untyped) apply simp apply (sep_select_asm 2) @@ -292,12 +292,12 @@ lemma reset_untyped_cap_wp: done crunch cdl_current_domain[wp]: reset_untyped_cap "\s. P (cdl_current_domain s)" -(wp:select_wp mapM_x_wp' mapME_x_inv_wp alternativeE_wp crunch_wps unless_wp - simp: detype_def crunch_simps) + (wp: mapM_x_wp' mapME_x_inv_wp crunch_wps unless_wp + simp: detype_def crunch_simps) crunch cdl_current_domain[wp]: invoke_untyped "\s. P (cdl_current_domain s)" -(wp: select_wp mapM_x_wp' mapME_x_inv_wp alternativeE_wp crunch_wps unless_wp - simp: detype_def crunch_simps validE_E_def) + (wp: mapM_x_wp' mapME_x_inv_wp crunch_wps unless_wp + simp: detype_def crunch_simps validE_E_def) lemma invoke_untyped_wp: "\ K (default_object nt ts minBound = Some obj \ nt \ UntypedType @@ -416,7 +416,7 @@ lemma decode_untyped_invocation_rvu: get_index_def throw_on_none_def decode_untyped_invocation_def mapME_x_singleton) apply (rule hoare_pre) - apply (wp alternativeE_wp unlessE_wp + apply (wp unlessE_wp lookup_slot_for_cnode_op_rvu' | wpc | clarsimp)+ done @@ -439,17 +439,13 @@ abbreviation (input) "retype_with_kids uinv \ (case uinv of (InvokeUntyped (Retype uref nt ts dest has_kids n)) \ has_kids)" -crunch cdt[wp]: retype_region "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) - -crunch has_children[wp]: retype_region "has_children slot" -(wp:select_wp simp:crunch_simps corrupt_intents_def simp:has_children_def is_cdt_parent_def) - -crunch cdt[wp]: update_available_range "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) +crunches retype_region, update_available_range + for cdt[wp]: "\s. P (cdl_cdt s)" + (simp: crunch_simps corrupt_intents_def) -crunch has_children[wp]: update_available_range "has_children slot" -(wp:select_wp simp:crunch_simps corrupt_intents_def simp:has_children_def is_cdt_parent_def) +crunches retype_region, update_available_range + for has_children[wp]: "has_children slot" + (simp: crunch_simps corrupt_intents_def has_children_def is_cdt_parent_def) lemma invoke_untyped_one_has_children: "uinv = (Retype uref nt ts [slot] has_kids (Suc 0)) @@ -484,7 +480,7 @@ lemma invoke_untyped_exception: apply (rule hoare_name_pre_stateE) apply (cases uinv) apply clarsimp - apply (wp unlessE_wp alternative_wp + apply (wp unlessE_wp | wpc | simp add: reset_untyped_cap_def)+ apply (rule_tac P = "available_range cap = cap_objects cap" in hoare_gen_asmEx) apply (simp add: whenE_def) @@ -537,14 +533,9 @@ lemma mark_tcb_intent_error_has_children[wp]: by (wpsimp simp: has_children_def is_cdt_parent_def mark_tcb_intent_error_def update_thread_def set_object_def) -crunch cdt[wp]: corrupt_frame "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) - -crunch cdt[wp]: corrupt_tcb_intent "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) - -crunch cdt[wp]: corrupt_ipc_buffer "\s. P (cdl_cdt s)" -(wp:select_wp simp:crunch_simps corrupt_intents_def) +crunches corrupt_frame, corrupt_tcb_intent, corrupt_ipc_buffer + for cdt[wp]: "\s. P (cdl_cdt s)" + (simp: crunch_simps corrupt_intents_def) lemma corrupt_ipc_buffer_has_children[wp]: "\\s. P (has_children ptr s)\ @@ -703,10 +694,10 @@ lemma seL4_Untyped_Retype_sep: **********************************************************************) crunch cdt_inc[wp]: schedule "\s. cdl_cdt s child = parent" -(wp:select_wp alternative_wp crunch_wps simp:crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch cdt_inc[wp]: handle_pending_interrupts "\s. cdl_cdt s child = parent" -(wp:select_wp alternative_wp simp:crunch_simps) + (wp: simp: crunch_simps) lemmas gets_the_resolve_cap_sym = gets_the_resolve_cap[symmetric] @@ -795,7 +786,7 @@ lemma invoke_untyped_cdt_inc[wp]: apply (simp add: reset_untyped_cap_def validE_def sum.case_eq_if) apply (rule_tac Q = "\r s. cdl_cdt s child = Some parent" in hoare_post_imp) apply simp - apply (wp whenE_wp alternativeE_wp mapME_x_inv_wp select_wp | simp)+ + apply (wp whenE_wp mapME_x_inv_wp | simp)+ apply (clarsimp simp:detype_def) done @@ -839,10 +830,9 @@ lemma lookup_cap_rvu': done crunch cdl_current_thread [wp]: handle_pending_interrupts "\s. P (cdl_current_thread s)" -(wp: alternative_wp select_wp) crunch cdl_current_thread [wp]: lookup_cap "\s. P (cdl_current_thread s)" -(wp: alternative_wp select_wp hoare_drop_imps) + (wp: hoare_drop_imps) lemma throw_opt_wp_valid: "\P\ throw_opt err x \\r. P\" @@ -891,10 +881,10 @@ lemma corrupt_intents_no_pending: done crunch no_pending[wp]: corrupt_ipc_buffer no_pending - (wp: crunch_wps select_wp update_thread_no_pending corrupt_intents_no_pending) + (wp: crunch_wps update_thread_no_pending corrupt_intents_no_pending) crunch no_pending[wp]: mark_tcb_intent_error no_pending - (wp: crunch_wps select_wp update_thread_no_pending corrupt_intents_no_pending) + (wp: crunch_wps update_thread_no_pending corrupt_intents_no_pending) lemma detype_one_wp: "o - \* R> s @@ -955,11 +945,11 @@ lemma invoke_untyped_preempt: sep_map_set_conj sep_any_map_o obj_range \* Q) s\" apply (simp add: invoke_untyped_def) apply (wp unlessE_wp) - apply (simp add: reset_untyped_cap_def whenE_liftE | wp whenE_wp alternative_wp)+ + apply (simp add: reset_untyped_cap_def whenE_liftE | wp whenE_wp)+ apply (rule_tac P = "\a. cap = UntypedCap dev obj_range a" in hoare_gen_asmEx) apply (rule hoare_post_impErr[where E = E and F = E for E]) apply (rule mapME_x_inv_wp[where P = P and E = "\r. P" for P]) - apply (wp alternative_wp) + apply wp apply simp apply (wp hoare_vcg_ex_lift) apply (rule hoare_post_imp[OF _ set_cap_wp]) @@ -969,7 +959,7 @@ lemma invoke_untyped_preempt: apply sep_solve apply simp apply simp - apply (wp select_wp)+ + apply wp+ apply clarsimp apply (frule opt_cap_sep_imp) apply (clarsimp dest!: reset_cap_asid_untyped_cap_eqD) @@ -1004,14 +994,13 @@ lemma set_parent_cdl_parent: done crunch cdl_parent[wp]: reset_untyped_cap "\s. cdl_cdt s slot = Some parent" - (wp: assert_inv crunch_wps select_wp mapME_x_inv_wp alternative_wp -simp: crunch_simps detype_def) + (wp: assert_inv crunch_wps mapME_x_inv_wp + simp: crunch_simps detype_def) crunch cdl_parent[wp]: insert_cap_child, corrupt_ipc_buffer, corrupt_tcb_intent, update_thread, derive_cap, insert_cap_sibling "\s. cdl_cdt s slot = Some parent" - (wp: crunch_wps select_wp set_parent_cdl_parent simp: crunch_simps -corrupt_intents_def) + (wp: crunch_wps set_parent_cdl_parent simp: crunch_simps corrupt_intents_def) lemma transfer_caps_loop_cdl_parent: "\\s. cdl_cdt s slot = Some parent\ @@ -1019,7 +1008,7 @@ lemma transfer_caps_loop_cdl_parent: \\_ s. cdl_cdt s slot = Some parent\" apply (induct caps arbitrary: dest; clarsimp split del: if_split) apply (rule hoare_pre) - apply (wp alternative_wp crunch_wps | assumption + apply (wp crunch_wps | assumption | simp add: crunch_simps split del: if_split)+ done @@ -1053,7 +1042,7 @@ lemma set_cap_no_pending[wp]: \no_pending\ set_cap slot cap \\rv s. no_pending s\" apply (simp add: set_cap_def) apply (cases slot, simp) - apply (wp set_object_no_pending select_wp | wpc | simp add: no_pending_def)+ + apply (wp set_object_no_pending | wpc | simp add: no_pending_def)+ apply (drule_tac x = a in spec) apply (rule conjI) apply (clarsimp simp: tcb_pending_op_slot_def tcb_ipcbuffer_slot_def) @@ -1103,9 +1092,8 @@ lemma reset_untyped_cap_no_pending[wp]: "\no_pending \ reset_untyped_cap cref \\rv. no_pending\" apply (simp add: reset_untyped_cap_def) apply (wp whenE_wp) - apply (rule_tac P = "snd cref = tcb_pending_op_slot \ \ is_pending_cap cap" in hoare_gen_asmEx) - apply (wp mapME_x_inv_wp alternativeE_wp | simp)+ - apply (wp select_wp)+ + apply (rule_tac P = "snd cref = tcb_pending_op_slot \ \ is_pending_cap cap" in hoare_gen_asmEx) + apply (wp mapME_x_inv_wp | simp)+ apply (clarsimp simp: detype_no_pending) apply (cases cref, clarsimp simp: no_pending_def) done @@ -1158,9 +1146,9 @@ lemma reset_untyped_cap_not_pending_cap[wp]: apply (simp add: reset_untyped_cap_def) apply (wp whenE_wp) apply (rule_tac P = " \ is_pending_cap cap" in hoare_gen_asmEx) - apply (wp mapME_x_inv_wp alternativeE_wp set_cap_opt_cap)+ + apply (wp mapME_x_inv_wp set_cap_opt_cap)+ apply simp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: detype_no_pending) apply (cases cref) apply (clarsimp simp: detype_def opt_cap_def slots_of_def object_slots_def @@ -1172,11 +1160,10 @@ lemma invoke_untyped_no_pending[wp]: invoke_untyped (Retype ref a b c d e) \\rv. no_pending\" apply (simp add: invoke_untyped_def create_cap_def) - apply (wpsimp wp: mapM_x_wp' set_cap_no_pending_asm_in_pre get_cap_wp select_wp - simp: update_available_range_def - )+ + apply (wpsimp wp: mapM_x_wp' set_cap_no_pending_asm_in_pre get_cap_wp + simp: update_available_range_def) apply (wp (once) hoare_drop_imps) - apply (wpsimp split_del: if_split)+ + apply (wpsimp split_del: if_split)+ apply (rule_tac Q' = "\r s. no_pending s \ ((\y. opt_cap ref s = Some y) \ \ is_pending_cap (the (opt_cap ref s)))" in hoare_post_imp_R) apply (wp reset_untyped_cap_no_pending) diff --git a/proof/capDL-api/TCB_DP.thy b/proof/capDL-api/TCB_DP.thy index d8f5d39461..23436c7afa 100644 --- a/proof/capDL-api/TCB_DP.thy +++ b/proof/capDL-api/TCB_DP.thy @@ -51,7 +51,7 @@ lemma restart_wp: restart tcb \\_. < (tcb,tcb_pending_op_slot) \c cap \* R > \" apply (clarsimp simp: restart_def) - apply (wp alternative_wp) + apply wp apply (wp set_cap_wp[sep_wand_wp])+ apply (clarsimp) apply (rule hoare_pre_cont) @@ -70,7 +70,7 @@ lemma invoke_tcb_write: invoke_tcb (WriteRegisters tcb x y z) \\_. < (tcb,tcb_pending_op_slot) \c cap \* R >\" apply (clarsimp simp: invoke_tcb_def) - apply (wp alternative_wp restart_wp | simp)+ + apply (wp restart_wp | simp)+ done lemma not_memory_cap_reset_asid: @@ -93,16 +93,15 @@ lemma tcb_update_thread_slot_wp: apply (clarsimp simp: tcb_update_thread_slot_def) apply (rule hoare_name_pre_state) apply (clarsimp) - apply (wp) - apply (wp alternative_wp) - apply (wp insert_cap_child_wp) - apply (wp insert_cap_sibling_wp get_cap_rv)+ + apply wp + apply (wp insert_cap_child_wp) + apply (wp insert_cap_sibling_wp get_cap_rv)+ apply (safe) apply (sep_solve) apply (drule not_memory_cap_reset_asid') apply (clarsimp simp: is_memory_cap_def split:cdl_cap.splits) apply (clarsimp) -done + done lemma tcb_empty_thread_slot_wp: "\<(target_tcb,slot) \c NullCap \* R>\ tcb_empty_thread_slot target_tcb slot \\_. <(target_tcb,slot) \c NullCap \* R>\ " apply (simp add:tcb_empty_thread_slot_def whenE_def | wp)+ @@ -330,7 +329,7 @@ lemma decode_tcb_invocation_wp[wp]: decode_tcb_invocation cap cap_ref caps (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer) \\_. P\, \\_. P\" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternative_wp) + apply wp apply (clarsimp) done @@ -356,7 +355,7 @@ lemma decode_invocation_tcb_rv': decode_tcb_invocation cap cap_ref caps (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer) \P\, -" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternativeE_R_wp) + apply wp apply (wp throw_on_none_rvR)+ apply (safe) apply (clarsimp simp: get_index_def) @@ -474,7 +473,7 @@ lemma tcb_update_vspace_root_inv: tcb_update_vspace_root a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_vspace_root_def) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -486,7 +485,7 @@ lemma tcb_update_cspace_root_inv: tcb_update_cspace_root a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_cspace_root_def) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -497,7 +496,7 @@ lemma tcb_update_ipc_buffer_inv: tcb_update_ipc_buffer a b c \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: tcb_update_ipc_buffer_def) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (wp tcb_empty_thread_slot_wp_inv) apply auto @@ -516,7 +515,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: \\_ s. P (cdl_current_thread s) \" including no_pre apply (simp add:invoke_tcb_def comp_def) - apply (wp alternative_wp whenE_wp + apply (wp whenE_wp tcb_empty_thread_slot_wp_inv [where R = "(target_tcb, tcb_vspace_slot) \c - \* (target_tcb,tcb_cspace_slot) \c - @@ -527,7 +526,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: apply (clarsimp simp:conj_comms) apply (rule hoare_post_impErr[OF valid_validE,rotated],assumption) apply (fastforce split:option.splits) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_erule_concl refl_imp sep_any_imp, assumption) @@ -553,7 +552,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp apply (sep_solve) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -591,7 +590,7 @@ lemma invoke_tcb_ThreadControl_cur_thread: " in hoare_post_impErr[rotated -1]) apply assumption apply (wp whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_schem) @@ -660,7 +659,7 @@ lemma decode_tcb_invocation_current_thread_inv[wp]: (TcbConfigureIntent fault_ep cspace_root_data vspace_root_data buffer_addr) \\_ s. P (cdl_current_thread s)\" apply (clarsimp simp: decode_tcb_invocation_def) - apply (wp alternative_wp) + apply wp apply (safe) done @@ -782,7 +781,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: \ invoke_tcb (ThreadControl target_tcb tcb_cap_slot faultep croot vroot ipc_buffer) \\_ s. P (cdl_current_domain s) \" apply (simp add:invoke_tcb_def comp_def) - apply (wp alternative_wp whenE_wp + apply (wp whenE_wp tcb_empty_thread_slot_wp_inv [where R = "(target_tcb, tcb_vspace_slot) \c - \* (target_tcb,tcb_cspace_slot) \c - @@ -793,7 +792,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: apply (clarsimp simp:conj_comms) apply (rule hoare_post_impErr[OF valid_validE,rotated],assumption) apply (fastforce split:option.splits) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_schem) @@ -818,7 +817,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: apply (wp tcb_empty_thread_slot_wp_inv) apply clarsimp apply (sep_solve) - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -856,7 +855,7 @@ lemma invoke_tcb_ThreadControl_cdl_current_domain: " in hoare_post_impErr[rotated -1]) apply assumption apply (wp whenE_wp |wpc|simp add:tcb_update_cspace_root_def)+ - apply (wp hoare_drop_imps whenE_wp alternative_wp + apply (wp hoare_drop_imps whenE_wp | simp add: tcb_update_vspace_root_def tcb_update_thread_slot_def)+ apply (rule hoare_post_imp[OF _ insert_cap_child_wp]) apply (sep_select 2) @@ -1167,7 +1166,7 @@ lemma restart_cdl_current_domain: "\\s. <(ptr,tcb_pending_op_slot) \c cap \* \ > s \ \ is_pending_cap cap \ P (cdl_current_domain s)\ restart ptr \\r s. P (cdl_current_domain s)\" apply (simp add:restart_def) - apply (wp alternative_wp) + apply wp apply (simp add:cancel_ipc_def) apply (wpsimp wp: hoare_pre_cont[where f="revoke_cap_simple sl" for sl])+ apply (drule opt_cap_sep_imp) @@ -1180,7 +1179,7 @@ lemma restart_cdl_current_thread: "\\s. <(ptr,tcb_pending_op_slot) \c cap \* \ > s \ \ is_pending_cap cap \ P (cdl_current_thread s)\ restart ptr \\r s. P (cdl_current_thread s)\" apply (simp add:restart_def) - apply (wp alternative_wp) + apply wp apply (simp add:cancel_ipc_def) apply (wpsimp wp: hoare_pre_cont[where f="revoke_cap_simple sl" for sl])+ apply (drule opt_cap_sep_imp) @@ -1244,8 +1243,6 @@ lemma seL4_TCB_WriteRegisters_wp: apply (simp add: decode_invocation_def throw_opt_def get_tcb_intent_def decode_tcb_invocation_def) apply wp - apply (rule alternativeE_wp) - apply (wp+)[2] apply (clarsimp simp:conj_comms lookup_extra_caps_def mapME_def sequenceE_def) apply (rule returnOk_wp) @@ -1335,8 +1332,6 @@ lemma seL4_TCB_Resume_wp: apply (simp add: decode_invocation_def throw_opt_def get_tcb_intent_def decode_tcb_invocation_def) apply wp - apply (rule alternativeE_wp) - apply (wp+)[2] apply (clarsimp simp: lookup_extra_caps_def mapME_def sequenceE_def) apply (rule returnOk_wp) apply (rule lookup_cap_and_slot_rvu diff --git a/proof/crefine/ARM/ADT_C.thy b/proof/crefine/ARM/ADT_C.thy index dd69714fb6..b45a62fcd7 100644 --- a/proof/crefine/ARM/ADT_C.thy +++ b/proof/crefine/ARM/ADT_C.thy @@ -84,7 +84,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption diff --git a/proof/crefine/ARM/Arch_C.thy b/proof/crefine/ARM/Arch_C.thy index 168052fa28..516e609e78 100644 --- a/proof/crefine/ARM/Arch_C.thy +++ b/proof/crefine/ARM/Arch_C.thy @@ -50,7 +50,7 @@ lemma performPageTableInvocationUnmap_ccorres: apply (ctac add: unmapPageTable_ccorres) apply csymbr apply (simp add: storePTE_def swp_def) - apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[unfolded dc_def]) + apply (ctac add: clearMemory_PT_setObject_PTE_ccorres) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) @@ -397,7 +397,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -532,10 +534,10 @@ shows pageBits_def split: if_split) apply (clarsimp simp: ARMSmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption - apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (simp add: ghost_assertion_data_get_gs_clear_region) apply (drule valid_global_refsD_with_objSize, clarsimp)+ apply (clarsimp simp: isCap_simps dest!: ccte_relation_ccap_relation) apply (cut_tac ptr=frame and bits=12 @@ -1133,8 +1135,7 @@ lemma createSafeMappingEntries_PDE_ccorres: vm_attribs_relation_def superSectionPDEOffsets_def pdeBits_def from_bool_mask_simp[unfolded mask_def, simplified] - ptr_range_to_list_def upto_enum_step_def - o_def upto_enum_word + ptr_range_to_list_def upto_enum_step_def upto_enum_word cong: if_cong) apply (frule(1) page_directory_at_rf_sr, clarsimp) apply (frule array_ptr_valid_array_assertionD[OF h_t_valid_clift]) @@ -1467,13 +1468,13 @@ lemma pdeCheckIfMapped_ccorres: (Call pdeCheckIfMapped_'proc)" apply (cinit lift: pde___ptr_to_struct_pde_C_') apply (rule ccorres_pre_getObject_pde) - apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation rv pde'}" + apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation pd pde'}" in ccorres_from_vcg_throws[where P="\s. True"]) apply simp_all apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: cpde_relation_invalid isInvalidPDE_def + apply (case_tac pd, simp_all add: cpde_relation_invalid isInvalidPDE_def split: if_split) done @@ -1794,7 +1795,7 @@ lemma performPageInvocationMapPDE_ccorres: apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def) apply vcg apply simp - apply (wp valid_pde_slots_lift2) + apply (wpsimp wp: valid_pde_slots_lift2) apply clarsimp apply (clarsimp simp: pde_range_relation_def ptr_range_to_list_def) apply (rule order_less_le_trans) @@ -1889,8 +1890,6 @@ lemma setMRs_single: (* usually when we call setMR directly, we mean to only set a single message register which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg = val\ @@ -1907,7 +1906,7 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: length_msgRegisters n_msgRegisters_def not_le conj_commute) @@ -1917,7 +1916,7 @@ lemma setMR_as_setRegister_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -1943,7 +1942,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -1966,7 +1965,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -1979,10 +1978,10 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg @@ -2173,12 +2172,12 @@ where lemma resolve_ret_rel_None[simp]: "resolve_ret_rel None y = (valid_C y = scast false)" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def to_bool_def split: if_splits) + by (clarsimp simp: resolve_ret_rel_def to_option_def to_bool_def split: if_splits) lemma resolve_ret_rel_Some: "\valid_C y = scast true; frameSize_C y = framesize_from_H (fst x); snd x = frameBase_C y\ \ resolve_ret_rel (Some x) y" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def) + by (clarsimp simp: resolve_ret_rel_def to_option_def) lemma resolveVAddr_ccorres: "ccorres resolve_ret_rel ret__struct_resolve_ret_C_' @@ -2605,7 +2604,7 @@ lemma decodeARMFrameInvocation_ccorres: apply csymbr apply (simp add: ARM.pptrBase_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -2632,7 +2631,6 @@ lemma decodeARMFrameInvocation_ccorres: simp add: ARM.pptrBase_def ARM.pptrBase_def hd_conv_nth length_ineq_not_Nil, ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip, clarsimp) apply (subgoal_tac "cap_get_tag cap = SCAST(32 signed \ 32) cap_frame_cap \ cap_get_tag cap = SCAST(32 signed \ 32) cap_small_frame_cap", diff --git a/proof/crefine/ARM/CSpace_All.thy b/proof/crefine/ARM/CSpace_All.thy index 29c643614e..ab54a3a670 100644 --- a/proof/crefine/ARM/CSpace_All.thy +++ b/proof/crefine/ARM/CSpace_All.thy @@ -25,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf diff --git a/proof/crefine/ARM/CSpace_C.thy b/proof/crefine/ARM/CSpace_C.thy index 7f5e139148..607f475755 100644 --- a/proof/crefine/ARM/CSpace_C.thy +++ b/proof/crefine/ARM/CSpace_C.thy @@ -770,7 +770,7 @@ lemma update_freeIndex': supply if_cong[cong] apply (cinit lift: cap_ptr_' v32_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -892,7 +892,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -918,14 +918,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1031,19 +1031,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1214,7 +1212,7 @@ lemma cteMove_ccorres: apply (intro conjI, simp+) apply (erule (2) is_aligned_3_prev) apply (erule (2) is_aligned_3_next) - apply (clarsimp simp: dc_def split del: if_split) + apply (clarsimp split del: if_split) apply (simp add: ccap_relation_NullCap_iff) apply (clarsimp simp: cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) done @@ -1956,7 +1954,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -1967,9 +1964,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2018,7 +2015,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2058,12 +2055,11 @@ lemma emptySlot_ccorres: apply csymbr apply (rule ccorres_move_c_guard_cte) \ \--- instruction y \ updateMDB slot (\a. nullMDBNode);\ - apply (ctac (no_vcg) - add: ccorres_updateMDB_const [unfolded const_def]) + apply (ctac (no_vcg) add: ccorres_updateMDB_const) \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2135,8 +2131,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2145,7 +2141,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ diff --git a/proof/crefine/ARM/CSpace_RAB_C.thy b/proof/crefine/ARM/CSpace_RAB_C.thy index eda10c9a31..84412cb532 100644 --- a/proof/crefine/ARM/CSpace_RAB_C.thy +++ b/proof/crefine/ARM/CSpace_RAB_C.thy @@ -168,10 +168,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer diff --git a/proof/crefine/ARM/Ctac_lemmas_C.thy b/proof/crefine/ARM/Ctac_lemmas_C.thy index 7389fdaca3..2b42ab3a0c 100644 --- a/proof/crefine/ARM/Ctac_lemmas_C.thy +++ b/proof/crefine/ARM/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/ARM/Delete_C.thy b/proof/crefine/ARM/Delete_C.thy index 62ba1a05c9..2d74008235 100644 --- a/proof/crefine/ARM/Delete_C.thy +++ b/proof/crefine/ARM/Delete_C.thy @@ -804,7 +804,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -826,7 +826,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -855,7 +855,7 @@ lemma finaliseSlot_ccorres: apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -950,26 +950,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) diff --git a/proof/crefine/ARM/Detype_C.thy b/proof/crefine/ARM/Detype_C.thy index 38f0623b5e..97b3c51563 100644 --- a/proof/crefine/ARM/Detype_C.thy +++ b/proof/crefine/ARM/Detype_C.thy @@ -1434,7 +1434,7 @@ lemma deleteObjects_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - diff --git a/proof/crefine/ARM/Fastpath_C.thy b/proof/crefine/ARM/Fastpath_C.thy index b0e9b7ce97..79948463ae 100644 --- a/proof/crefine/ARM/Fastpath_C.thy +++ b/proof/crefine/ARM/Fastpath_C.thy @@ -732,8 +732,7 @@ lemma switchToThread_fp_ccorres: apply (simp add: storeWordUser_def bind_assoc case_option_If2 split_def del: Collect_const) - apply (simp only: dmo_clearExMonitor_setCurThread_swap - dc_def[symmetric]) + apply (simp only: dmo_clearExMonitor_setCurThread_swap) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -985,10 +984,7 @@ lemma ccorres_call_hSkip: apply - apply (rule ccorres_call_hSkip') apply (erule ccorres_guard_imp) - apply simp - apply clarsimp - apply (simp_all add: ggl xfdc_def) - apply (clarsimp simp: igl) + apply (clarsimp simp: ggl igl xfdc_def)+ done lemma bind_case_sum_rethrow: @@ -1777,7 +1773,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -1812,7 +1807,7 @@ proof - apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) apply (rule ccorres_call_hSkip) - apply (erule disjE; simp flip: dc_def; rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -1827,7 +1822,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1864,7 +1858,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1887,7 +1880,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1945,29 +1937,25 @@ proof - apply (simp add: ctcb_relation_unat_tcbPriority_C word_less_nat_alt linorder_not_le) apply ceqv - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) - apply (simp add: if_1_0_0 ccap_relation_ep_helpers from_bool_0 word_le_not_less - del: Collect_const cong: call_ignore_cong) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp add: bindE_assoc del: Collect_const) apply (rule ccorres_Guard_Seq) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (clarsimp simp: to_bool_def) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0) + apply (clarsimp simp: from_bool_eq_if' word_le_not_less from_bool_0) apply (clarsimp simp: return_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply (clarsimp simp: isHighestPrio_def' simpler_gets_def) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: from_bool_0) apply clarsimp apply vcg apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) @@ -1981,7 +1969,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: bindE_assoc from_bool_0 catch_throwError del: Collect_const) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2000,7 +1987,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2015,7 +2001,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2044,7 +2029,6 @@ proof - apply (rule ccorres_seq_cond_raise[THEN iffD2]) apply (rule_tac R=\ in ccorres_cond2', blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2100,7 +2084,7 @@ proof - ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply csymbr - apply (simp add: cteInsert_def bind_assoc dc_def[symmetric] + apply (simp add: cteInsert_def bind_assoc del: Collect_const cong: call_ignore_cong) apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE) apply (simp only: getThreadState_def) @@ -2223,7 +2207,6 @@ proof - apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -2503,7 +2486,7 @@ lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add[where tcb="\s. Ptr (tcb' s)" for tcb', simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where tcb="\s. Ptr (tcb' s)" for tcb', simplified]] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] @@ -2608,7 +2591,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2642,7 +2624,7 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) apply (rule ccorres_call_hSkip) - apply (erule disjE; simp flip: dc_def; rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -2657,7 +2639,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2682,7 +2663,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_split_throws) apply simp - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2712,7 +2692,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2746,7 +2725,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp cong: conj_cong) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2766,7 +2744,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2788,7 +2765,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2820,7 +2796,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond2'[where R=\], blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2835,7 +2810,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2866,7 +2840,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2906,7 +2879,7 @@ lemma fastpath_reply_recv_ccorres: apply ceqv apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) - apply (rule fastpath_enqueue_ccorres[unfolded o_def,simplified]) + apply (rule fastpath_enqueue_ccorres[simplified]) apply simp apply ceqv apply (simp add: liftM_def del: Collect_const cong: call_ignore_cong) @@ -2995,7 +2968,6 @@ lemma fastpath_reply_recv_ccorres: apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -3020,7 +2992,7 @@ lemma fastpath_reply_recv_ccorres: apply (wp setCTE_cte_wp_at_other) apply (simp del: Collect_const) apply vcg - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ diff --git a/proof/crefine/ARM/Fastpath_Equiv.thy b/proof/crefine/ARM/Fastpath_Equiv.thy index 9de7239e4e..fcb1e1e8e7 100644 --- a/proof/crefine/ARM/Fastpath_Equiv.thy +++ b/proof/crefine/ARM/Fastpath_Equiv.thy @@ -963,7 +963,7 @@ lemma tcbSchedDequeue_rewrite_not_queued: apply (monadic_rewrite_l monadic_rewrite_if_l_False \wp threadGet_const\) apply (monadic_rewrite_symb_exec_l, rule monadic_rewrite_refl) apply wp+ - apply (clarsimp simp: o_def obj_at'_def) + apply clarsimp done lemma schedule_known_rewrite: @@ -1396,8 +1396,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: capFaultOnFailure_def rethrowFailure_injection injection_handler_catch bind_bindE_assoc getThreadCallerSlot_def bind_assoc - getSlotCap_def - case_bool_If o_def + getSlotCap_def case_bool_If isRight_def[where x="Inr v" for v] isRight_def[where x="Inl v" for v] cong: if_cong) @@ -1516,9 +1515,9 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply wp apply (rule monadic_rewrite_trans) apply (rule_tac rv=rab_ret - in monadic_rewrite_gets_known[where m="NonDetMonad.lift f" + in monadic_rewrite_gets_known[where m="Nondet_Monad.lift f" for f, folded bindE_def]) - apply (simp add: NonDetMonad.lift_def isRight_case_sum) + apply (simp add: Nondet_Monad.lift_def isRight_case_sum) apply monadic_rewrite_symb_exec_l apply (rename_tac ep_cap2) apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm) @@ -1560,8 +1559,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: setThreadState_no_sch_change setThreadState_obj_at_unchanged sts_st_tcb_at'_cases sts_bound_tcb_at' fastpathBestSwitchCandidate_lift[where f="setThreadState s t" for s t] - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift | wps)+ apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) @@ -1574,8 +1573,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: emptySlot_cnode_caps user_getreg_inv asUser_typ_ats asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] | simp del: comp_apply @@ -1586,8 +1585,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (clarsimp cong: conj_cong) apply ((wp user_getreg_inv asUser_typ_ats asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift | clarsimp simp: obj_at'_weakenE[OF _ TrueI] | solves \ diff --git a/proof/crefine/ARM/Finalise_C.thy b/proof/crefine/ARM/Finalise_C.thy index 6e66d50ae7..b72f830bd7 100644 --- a/proof/crefine/ARM/Finalise_C.thy +++ b/proof/crefine/ARM/Finalise_C.thy @@ -200,8 +200,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -210,7 +209,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -278,10 +277,10 @@ lemma cancelAllIPC_ccorres: apply (cinit lift: epptr_') apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -290,8 +289,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -324,7 +323,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -341,12 +340,10 @@ lemma cancelAllIPC_ccorres: apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -374,7 +371,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -400,11 +397,6 @@ lemma cancelAllIPC_ccorres: apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] @@ -412,10 +404,10 @@ lemma cancelAllSignals_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -424,18 +416,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -443,8 +432,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -461,7 +450,7 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -654,8 +643,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -674,7 +663,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -685,7 +674,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -735,7 +724,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -770,9 +759,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -789,13 +778,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -992,7 +981,6 @@ lemma invalidateASIDEntry_ccorres: apply (rule order_le_less_trans, rule word_and_le1) apply (simp add: mask_def) apply (rule ccorres_return_Skip) - apply (fold dc_def) apply (ctac add: invalidateASID_ccorres) apply wp apply (simp add: guard_is_UNIV_def) @@ -1025,8 +1013,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) @@ -1173,14 +1160,12 @@ lemma deleteASID_ccorres: apply ceqv apply csymbr apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False del: Collect_const cong: call_ignore_cong) apply (rule ccorres_cond_false) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] when_def - Collect_True liftM_def + apply (simp add: when_def Collect_True liftM_def cong: conj_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getObject_asidpool) @@ -1361,7 +1346,7 @@ lemma unmapPageTable_ccorres: apply (ctac(no_vcg) add: pageTableMapped_ccorres) apply wpc apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -1371,7 +1356,6 @@ lemma unmapPageTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) - apply (fold dc_def) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) apply (ctac(no_vcg) add:flushTable_ccorres) @@ -1417,7 +1401,7 @@ method return_NullCap_pair_ccorres = (rule allI, rule conseqPre, vcg), (clarsimp simp: return_def ccap_relation_NullCap_iff)\ lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -1787,12 +1771,6 @@ lemma Arch_finaliseCap_ccorres: apply (frule cap_get_tag_isCap_unfolded_H_cap, simp) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1889,7 +1867,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1900,12 +1878,12 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) @@ -1941,7 +1919,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) apply (rule ccorres_Guard_intStateIRQNode_array_Ptr) @@ -2182,18 +2160,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) diff --git a/proof/crefine/ARM/Interrupt_C.thy b/proof/crefine/ARM/Interrupt_C.thy index 4a2fe6d3f4..c5b5d8d1e0 100644 --- a/proof/crefine/ARM/Interrupt_C.thy +++ b/proof/crefine/ARM/Interrupt_C.thy @@ -74,11 +74,11 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply (simp add: pred_conj_def)+ - apply (strengthen ntfn_badge_derived_enough_strg[unfolded o_def] + apply (strengthen ntfn_badge_derived_enough_strg invs_mdb_strengthen' valid_objs_invs'_strg) - apply (wp cteDeleteOne_other_cap[unfolded o_def])[1] + apply (wp cteDeleteOne_other_cap[unfolded o_def]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -108,7 +108,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply simp apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) diff --git a/proof/crefine/ARM/Invoke_C.thy b/proof/crefine/ARM/Invoke_C.thy index 9dc5f384b9..73061fed5e 100644 --- a/proof/crefine/ARM/Invoke_C.thy +++ b/proof/crefine/ARM/Invoke_C.thy @@ -65,10 +65,10 @@ lemma setDomain_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) apply (simp add: when_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -76,13 +76,16 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (clarsimp simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) @@ -388,7 +391,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -618,9 +621,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -703,8 +704,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -731,8 +731,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -808,8 +807,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -888,7 +886,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -907,8 +905,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1007,13 +1004,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1022,7 +1017,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1052,8 +1047,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1097,12 +1091,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1162,8 +1154,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1225,8 +1216,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1234,8 +1224,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1249,7 +1238,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1313,16 +1302,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1337,7 +1326,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_lift_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1556,7 +1545,7 @@ lemma clearMemory_untyped_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -1761,8 +1750,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2648,7 +2636,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] untypedBits_defs[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -2826,8 +2813,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (ctac add: ccorres_injection_handler_csum1 [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM o_def split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -2939,8 +2926,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (simp add: ccorres_cond_iffs returnOk_def) apply (rule ccorres_return_Skip') apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ - apply (simp add: ccorres_cond_iffs inl_rrel_inl_rrel) - apply (rule ccorres_return_C_errorE_inl_rrel, simp+)[1] + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C_errorE_inl_rrel; simp) apply wp apply (simp add: all_ex_eq_helper) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3035,8 +3022,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3081,7 +3067,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule conseqPre,vcg,clarsimp) apply vcg apply (rule ccorres_guard_imp[where Q =\ and Q' = UNIV,rotated], assumption+) - apply (simp add: o_def) + apply simp apply simp apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3146,7 +3132,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3273,7 +3259,7 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) diff --git a/proof/crefine/ARM/IpcCancel_C.thy b/proof/crefine/ARM/IpcCancel_C.thy index 69c8b8dbd7..5a7a7e1216 100644 --- a/proof/crefine/ARM/IpcCancel_C.thy +++ b/proof/crefine/ARM/IpcCancel_C.thy @@ -912,7 +912,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1033,7 +1032,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1273,7 +1272,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1542,7 +1540,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1674,7 +1672,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1779,7 +1776,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -2129,11 +2126,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2150,7 +2142,6 @@ lemma possibleSwitchTo_ccorres: (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2175,7 +2166,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2505,7 +2496,7 @@ lemma cmap_relation_ep: by (simp add: Let_def) (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2646,8 +2637,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2669,8 +2660,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2682,15 +2673,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "word32" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2763,7 +2754,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2781,7 +2772,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -2922,7 +2913,6 @@ lemma cancelIPC_ccorres1: apply wpc \ \BlockedOnReceive\ apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -2951,7 +2941,6 @@ lemma cancelIPC_ccorres1: apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -2991,14 +2980,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3011,7 +2998,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3020,10 +3008,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3049,7 +3039,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg diff --git a/proof/crefine/ARM/Ipc_C.thy b/proof/crefine/ARM/Ipc_C.thy index 8a21a54e38..4ec3ea85ef 100644 --- a/proof/crefine/ARM/Ipc_C.thy +++ b/proof/crefine/ARM/Ipc_C.thy @@ -1428,61 +1428,62 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="ARM_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat32 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=2]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=2, simplified]) - apply (simp)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) - apply (simp add: n_msgRegisters_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="ARM_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat32 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=2]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=2, simplified]) + apply (simp)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply (simp add: n_msgRegisters_def) + apply simp apply (frule (1) option_to_0_imp) apply (subst drop_zip) apply (subst drop_n) apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (rule mapM_x_wp_inv) @@ -1866,7 +1867,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def ARM.badgeRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -1905,7 +1906,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -2890,10 +2891,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -2949,9 +2951,10 @@ proof - del: Collect_const) apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply csymbr apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, @@ -2961,7 +2964,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 4) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -2971,7 +2974,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 4" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 4" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -2992,7 +2995,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3033,7 +3036,7 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv \ (\x \ set rva. snd x \ 0)" + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def @@ -3128,7 +3131,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3173,7 +3176,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (simp add: Collect_const_mem) apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def @@ -3227,7 +3230,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3296,12 +3298,10 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender @@ -3358,7 +3358,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3407,7 +3407,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3561,7 +3561,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=2, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3599,8 +3598,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -3612,7 +3611,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -3703,7 +3702,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -3749,7 +3748,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -3844,7 +3843,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -3964,7 +3963,6 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) @@ -3988,7 +3986,6 @@ proof - fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4018,10 +4015,9 @@ proof - apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres_valid_queues'_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def @@ -4090,8 +4086,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4112,7 +4107,7 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp @@ -4167,7 +4162,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4188,7 +4183,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4541,7 +4536,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4557,12 +4552,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -4738,12 +4733,9 @@ lemma sendIPC_ccorres [corres]: apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not possibleSwitchTo_sch_act_not sts_st_tcb' @@ -4940,7 +4932,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4956,12 +4948,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5066,7 +5058,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5087,7 +5079,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5236,7 +5228,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5350,7 +5342,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5380,20 +5372,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5405,18 +5395,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5494,8 +5482,6 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) @@ -5574,12 +5560,12 @@ lemma receiveIPC_ccorres [corres]: apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -5607,11 +5593,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -5631,7 +5616,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5805,7 +5790,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -5820,7 +5805,7 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" @@ -5828,7 +5813,7 @@ lemma sendSignal_ccorres [corres]: apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def ARM_H.badgeRegister_def Kernel_C.badgeRegister_def ARM.badgeRegister_def Kernel_C.R0_def @@ -5884,7 +5869,7 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) @@ -5963,16 +5948,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "word32" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6034,7 +6020,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6050,12 +6036,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6192,11 +6178,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6207,7 +6192,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6263,8 +6248,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6276,7 +6260,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM/IsolatedThreadAction.thy b/proof/crefine/ARM/IsolatedThreadAction.thy index 1ee5ead207..a4326bc4da 100644 --- a/proof/crefine/ARM/IsolatedThreadAction.thy +++ b/proof/crefine/ARM/IsolatedThreadAction.thy @@ -443,7 +443,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -915,7 +915,7 @@ lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonadVCG.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) @@ -1094,8 +1094,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) diff --git a/proof/crefine/ARM/Machine_C.thy b/proof/crefine/ARM/Machine_C.thy index c6ea7f984f..edd276229a 100644 --- a/proof/crefine/ARM/Machine_C.thy +++ b/proof/crefine/ARM/Machine_C.thy @@ -413,13 +413,13 @@ lemma cleanCacheRange_PoC_ccorres: apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply (ccorres_remove_UNIV_guard) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -460,8 +460,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) - apply (ctac (no_vcg) add: dsb_ccorres[simplified dc_def]) - apply (wp | clarsimp simp: guard_is_UNIVI o_def)+ + apply (ctac (no_vcg) add: dsb_ccorres) + apply (wp | clarsimp simp: guard_is_UNIVI)+ apply (frule(1) ghost_assertion_size_logic) apply (clarsimp simp: o_def) done @@ -484,7 +484,7 @@ lemma cleanCacheRange_RAM_ccorres: in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) - apply (ctac (no_vcg) add: cleanL2Range_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: cleanL2Range_ccorres) apply wp+ apply clarsimp apply (auto dest: ghost_assertion_size_logic simp: o_def) @@ -505,13 +505,13 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_basic_srnoop2, simp) apply (simp add: cleanCacheRange_PoU_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA_PoU) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -544,14 +544,14 @@ lemma invalidateCacheRange_RAM_ccorres: apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) @@ -574,7 +574,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv - apply (ctac add: dsb_ccorres[unfolded dc_def]) + apply (ctac add: dsb_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -602,13 +602,13 @@ lemma invalidateCacheRange_I_ccorres: apply (ccorres_remove_UNIV_guard) apply (simp add: invalidateCacheRange_I_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_invalidateByVA_I) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: invalidateByVA_I_ccorres[unfolded dc_def]) + apply (ctac add: invalidateByVA_I_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) @@ -628,13 +628,13 @@ lemma branchFlushRange_ccorres: apply (ccorres_remove_UNIV_guard) apply (simp add: branchFlushRange_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_branchFlush) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: branchFlush_ccorres[unfolded dc_def]) + apply (ctac add: branchFlush_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 5" in sym, simp add: cache_range_lineIndex_helper) diff --git a/proof/crefine/ARM/PSpace_C.thy b/proof/crefine/ARM/PSpace_C.thy index b08ffdecc2..3cb5beee00 100644 --- a/proof/crefine/ARM/PSpace_C.thy +++ b/proof/crefine/ARM/PSpace_C.thy @@ -49,7 +49,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: word32) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/ARM/Recycle_C.thy b/proof/crefine/ARM/Recycle_C.thy index 44cae716b7..b1e5e98d21 100644 --- a/proof/crefine/ARM/Recycle_C.thy +++ b/proof/crefine/ARM/Recycle_C.thy @@ -230,7 +230,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: word32) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -304,8 +304,8 @@ lemma invalidateTLBByASID_ccorres: apply (simp add: case_option_If2 del: Collect_const) apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) apply (clarsimp simp: pde_stored_asid_def to_bool_def split: if_split) - apply (rule ccorres_return_void_C[unfolded dc_def]) - apply (simp add: dc_def[symmetric]) + apply (rule ccorres_return_void_C) + apply simp apply csymbr apply (ctac add: invalidateTranslationASID_ccorres) apply vcg @@ -484,8 +484,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -583,8 +583,8 @@ lemma cancelBadgedSends_ccorres: cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -594,22 +594,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -684,7 +684,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: tcb_queue_relation'_def EPState_Send_def mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -694,7 +694,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -719,9 +719,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -772,8 +772,8 @@ lemma cancelBadgedSends_ccorres: apply (drule_tac x=p in spec) subgoal by fastforce apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] + apply (erule cready_queues_relation_not_queue_ptrs; + fastforce dest: null_ep_schedD[unfolded o_def] simp: o_def) apply (simp add: carch_state_relation_def cmachine_state_relation_def h_t_valid_clift_Some_iff) @@ -791,9 +791,9 @@ lemma cancelBadgedSends_ccorres: apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases sts_sch_act sts_valid_queues setThreadState_oa_queued) apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) diff --git a/proof/crefine/ARM/Refine_C.thy b/proof/crefine/ARM/Refine_C.thy index d80b220e85..022e74912b 100644 --- a/proof/crefine/ARM/Refine_C.thy +++ b/proof/crefine/ARM/Refine_C.thy @@ -479,7 +479,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -633,9 +633,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -663,7 +663,7 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state | simp add: tcb_cap_cases_def tcb_arch_ref_def | rule threadSet_ct_in_state' @@ -713,12 +713,12 @@ lemma entry_corres_C: apply (rule setTCBContext_C_corres, rule ccontext_rel_to_C, simp) apply simp apply (rule corres_split) - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -820,7 +820,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineStatea, clarsimp) @@ -847,7 +847,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -915,7 +915,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -935,7 +935,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def diff --git a/proof/crefine/ARM/Retype_C.thy b/proof/crefine/ARM/Retype_C.thy index 811848b7db..baaeaa0efb 100644 --- a/proof/crefine/ARM/Retype_C.thy +++ b/proof/crefine/ARM/Retype_C.thy @@ -816,7 +816,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -2391,6 +2391,9 @@ lemma ccorres_fail: apply (simp add: fail_def) done +(* always unfold StrictC'_mode_object_defs together with api_object_defs *) +lemmas api_object_defs = api_object_defs StrictC'_mode_object_defs + lemma object_type_from_H_toAPIType_simps: "(object_type_from_H tp = scast seL4_UntypedObject) = (toAPIType tp = Some ArchTypes_H.apiobject_type.Untyped)" "(object_type_from_H tp = scast seL4_TCBObject) = (toAPIType tp = Some ArchTypes_H.apiobject_type.TCBObject)" @@ -3638,17 +3641,17 @@ lemma copyGlobalMappings_ccorres: apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply csymbr apply (rule ccorres_rel_imp) - apply (rule_tac F="\_ s. rv = armKSGlobalPD (ksArchState s) - \ is_aligned rv pdBits \ valid_pde_mappings' s + apply (rule_tac F="\_ s. globalPD = armKSGlobalPD (ksArchState s) + \ is_aligned globalPD pdBits \ valid_pde_mappings' s \ page_directory_at' pd s \ page_directory_at' (armKSGlobalPD (ksArchState s)) s" - and i="0xE00" - in ccorres_mapM_x_while') + and i="0xE00" + in ccorres_mapM_x_while') apply (clarsimp simp del: Collect_const) apply (rule ccorres_guard_imp2) apply (rule ccorres_pre_getObject_pde) apply (simp add: storePDE_def del: Collect_const) - apply (rule_tac P="\s. ko_at' rva (armKSGlobalPD (ksArchState s) + apply (rule_tac P="\s. ko_at' rv (armKSGlobalPD (ksArchState s) + ((0xE00 + of_nat n) << 2)) s \ page_directory_at' pd s \ valid_pde_mappings' s \ page_directory_at' (armKSGlobalPD (ksArchState s)) s" @@ -3663,7 +3666,7 @@ lemma copyGlobalMappings_ccorres: apply (rule cmap_relationE1[OF rf_sr_cpde_relation], assumption, erule_tac ko=ko' in ko_at_projectKO_opt) apply (rule cmap_relationE1[OF rf_sr_cpde_relation], - assumption, erule_tac ko=rva in ko_at_projectKO_opt) + assumption, erule_tac ko=rv in ko_at_projectKO_opt) apply (clarsimp simp: typ_heap_simps') apply (drule(1) page_directory_at_rf_sr)+ apply clarsimp @@ -3859,12 +3862,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4776,7 +4777,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -4947,7 +4948,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply vcg - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (clarsimp simp: CPSR_def) apply (rule conseqPre, vcg, clarsimp) @@ -6775,7 +6776,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -6946,9 +6947,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -6982,7 +6983,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -7129,9 +7130,9 @@ shows "ccorres dc xfdc apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) ghost_assertion_size_logic)+ - apply (simp add: o_def) - apply (case_tac newType,simp_all add:object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + apply (case_tac newType, + simp_all add: object_type_from_H_def Kernel_C_defs nAPIObjects_def APIType_capBits_def o_def + split: apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp: objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/ARM/SR_lemmas_C.thy b/proof/crefine/ARM/SR_lemmas_C.thy index 0460479902..f37cc9137c 100644 --- a/proof/crefine/ARM/SR_lemmas_C.thy +++ b/proof/crefine/ARM/SR_lemmas_C.thy @@ -309,15 +309,15 @@ lemma tcb_cte_cases_proj_eq [simp]: by (auto split: if_split_asm) lemma map_to_ctes_upd_tcb': - "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; - ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = - (%x. if EX getF setF. + "\ ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; + ps_clear p tcbBlockSizeBits s \ + \ map_to_ctes ((ksPSpace s)(p \ KOTCB tcb)) = + (\x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & - getF tcb ~= getF tcb' - then case tcb_cte_cases (x - p) of - Some (getF, setF) => Some (getF tcb) - else ctes_of s x)" + getF tcb \ getF tcb' + then case tcb_cte_cases (x - p) of + Some (getF, setF) \ Some (getF tcb) + else ctes_of s x)" apply (erule (1) map_to_ctes_upd_tcb) apply (simp add: field_simps ps_clear_def3 mask_def objBits_defs) done @@ -431,18 +431,19 @@ qed lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); - (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); - (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); - (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); - (map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s')); - (map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s')); - (map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s')); - (map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s')); - (map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s')); - (map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) - = map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s')); - \T p. typ_at' T p s = typ_at' T p s'\ \ P" + s' = s \ ksPSpace := ksPSpace s' \; + ctes_of s' = (ctes_of s)(dest \ cte); + map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s'); + map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s'); + map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s'); + map_to_ptes (ksPSpace s) = map_to_ptes (ksPSpace s'); + map_to_asidpools (ksPSpace s) = map_to_asidpools (ksPSpace s'); + map_to_user_data (ksPSpace s) = map_to_user_data (ksPSpace s'); + map_to_user_data_device (ksPSpace s) = map_to_user_data_device (ksPSpace s'); + map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s) + = map_option tcb_no_ctes_proj \ map_to_tcbs (ksPSpace s'); + \T p. typ_at' T p s = typ_at' T p s'\ + \ P" shows "P" proof - from fst_setCTE0 [where cte = cte, OF ct] @@ -458,7 +459,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -636,7 +637,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1407,7 +1407,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1421,14 +1421,14 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1436,14 +1436,14 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1451,13 +1451,13 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1465,14 +1465,14 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1481,26 +1481,26 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1508,21 +1508,21 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split diff --git a/proof/crefine/ARM/Schedule_C.thy b/proof/crefine/ARM/Schedule_C.thy index 1ebcdad08c..b7c80e2f54 100644 --- a/proof/crefine/ARM/Schedule_C.thy +++ b/proof/crefine/ARM/Schedule_C.thy @@ -58,7 +58,6 @@ lemma Arch_switchToThread_ccorres: apply (ctac (no_vcg) add: setVMRoot_ccorres) apply (simp (no_asm) del: Collect_const) apply (rule_tac A'=UNIV in ccorres_guard_imp2) - apply (fold dc_def)[1] apply (ctac add: clearExMonitor_ccorres) apply clarsimp apply wp @@ -167,14 +166,14 @@ lemma ceqv_remove_tail_Guard_Skip: done lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done @@ -266,14 +265,14 @@ proof - apply (intro conjI impI) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) apply (prop_tac "ksCurDomain s = 0") using unsigned_eq_0_iff apply force apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) done qed @@ -349,7 +348,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -391,7 +389,6 @@ lemma isHighestPrio_ccorres: lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -405,7 +402,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -422,7 +419,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -450,7 +447,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -470,7 +467,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -507,18 +504,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -611,13 +607,12 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') apply (rule conjI) @@ -635,7 +630,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -688,7 +683,7 @@ lemma timerTick_ccorres: apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -710,17 +705,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -732,7 +727,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -740,13 +734,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM/SyscallArgs_C.thy b/proof/crefine/ARM/SyscallArgs_C.thy index 7f645270cd..ac977cc863 100644 --- a/proof/crefine/ARM/SyscallArgs_C.thy +++ b/proof/crefine/ARM/SyscallArgs_C.thy @@ -47,7 +47,7 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) apply (rule hoare_vcg_all_lift) apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) apply (rule hoare_strengthen_post, rule gts_sp') @@ -289,7 +289,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -631,7 +631,7 @@ lemma getMRs_tcbContext: apply (wp|wpc)+ apply (rule_tac P="n < length x" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -755,11 +755,13 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply (ctac (no_vcg)) + apply (rename_tac bufferCap bufferCap') apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap bufferCap \ isPageCap (capCap bufferCap)" + in ccorres_case_bools') apply simp apply (rule ccorres_symb_exec_r) - apply (rule_tac b="capVPSize (capCap rva) \ ARMSmallPage" in ccorres_case_bools') + apply (rule_tac b="capVPSize (capCap bufferCap) \ ARMSmallPage" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -767,7 +769,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -821,7 +823,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -1049,7 +1051,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) diff --git a/proof/crefine/ARM/Syscall_C.thy b/proof/crefine/ARM/Syscall_C.thy index 4b7e65a54c..cdd7f8c900 100644 --- a/proof/crefine/ARM/Syscall_C.thy +++ b/proof/crefine/ARM/Syscall_C.thy @@ -264,22 +264,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -695,7 +695,7 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] @@ -880,8 +880,7 @@ lemma handleInvocation_ccorres: apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift sts_ksQ' @@ -1046,7 +1045,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1064,7 +1063,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1186,7 +1184,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1204,8 +1202,8 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) @@ -1253,7 +1251,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1270,7 +1268,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1283,7 +1281,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1304,7 +1302,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1317,9 +1315,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1531,11 +1529,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1554,7 +1552,7 @@ lemma handleInterrupt_ccorres: apply (rule getIRQSlot_ccorres3) apply (rule ccorres_getSlotCap_cte_at) apply (rule_tac P="cte_at' rv" in ccorres_cross_over_guard) - supply ccorres_move_array_assertion_tcb_ctes [corres_pre del] + supply ccorres_move_array_assertion_tcb_ctes [ccorres_pre del] apply ctac apply csymbr apply csymbr @@ -1573,7 +1571,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1582,7 +1580,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -1594,7 +1592,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -1603,7 +1601,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1615,7 +1612,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReserveIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply vcg apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up) diff --git a/proof/crefine/ARM/TcbAcc_C.thy b/proof/crefine/ARM/TcbAcc_C.thy index 4fd5f9ddda..c9e9e58920 100644 --- a/proof/crefine/ARM/TcbAcc_C.thy +++ b/proof/crefine/ARM/TcbAcc_C.thy @@ -110,7 +110,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -139,7 +139,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/ARM/TcbQueue_C.thy b/proof/crefine/ARM/TcbQueue_C.thy index ccc1d1bfe7..e14fa45ae9 100644 --- a/proof/crefine/ARM/TcbQueue_C.thy +++ b/proof/crefine/ARM/TcbQueue_C.thy @@ -970,8 +970,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1059,7 +1059,7 @@ lemma rf_sr_tcb_update_no_queue: (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes heap_to_user_data_def) @@ -1108,7 +1108,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes diff --git a/proof/crefine/ARM/Tcb_C.thy b/proof/crefine/ARM/Tcb_C.thy index 8437004c8b..8a515d6123 100644 --- a/proof/crefine/ARM/Tcb_C.thy +++ b/proof/crefine/ARM/Tcb_C.thy @@ -72,8 +72,8 @@ begin lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -131,8 +131,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -142,8 +142,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -240,8 +240,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -274,7 +274,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -374,8 +374,8 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' @@ -399,7 +399,7 @@ lemma setPriority_ccorres: apply (frule (1) valid_objs'_maxDomain[where t=t]) apply (frule (1) valid_objs'_maxPriority[where t=t]) apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -551,7 +551,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -597,7 +597,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -606,7 +606,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp @@ -620,7 +620,7 @@ lemma invokeTCB_ThreadControl_ccorres: tcb_at' target s \ ksCurDomain s \ maxDomain \ valid_queues' s \ fst (the priority) \ maxPriority)"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr @@ -635,7 +635,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -645,7 +645,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def @@ -662,7 +662,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply(rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -671,7 +671,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def) @@ -688,7 +688,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -698,7 +698,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) @@ -736,7 +736,7 @@ lemma invokeTCB_ThreadControl_ccorres: simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues invs_valid_queues' | wp hoare_drop_imps)+ @@ -778,7 +778,6 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) @@ -800,8 +799,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -809,13 +807,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (fastforce simp: guard_is_UNIV_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) @@ -839,7 +837,6 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ @@ -865,8 +862,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -874,14 +870,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) @@ -897,13 +893,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def cintr_def Collect_const_mem) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -930,7 +926,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1143,10 +1139,10 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (simp add: word_bits_def frame_gp_registers_convs n_gpRegisters_def) apply simp apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (ctac add: postModifyRegisters_ccorres[simplified]) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. thread = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1211,8 +1207,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1221,8 +1217,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1281,8 +1277,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1294,8 +1290,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1366,7 +1362,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1443,7 +1439,7 @@ lemma threadSet_same: by (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp) fastforce lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1547,15 +1543,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1727,6 +1722,7 @@ shows apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1787,15 +1783,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1803,7 +1799,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n (ARM_H.frameRegisters @ ARM_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -1913,11 +1909,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n (ARM_H.frameRegisters @ ARM_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -1988,11 +1983,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2024,15 +2019,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2041,7 +2036,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2061,12 +2056,12 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) @@ -2156,7 +2151,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2167,13 +2163,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2267,7 +2263,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2278,13 +2275,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2292,7 +2289,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2313,7 +2310,7 @@ lemma decodeWriteRegisters_ccorres: numeral_eqs simp del: unsigned_numeral) apply (frule arg_cong[where f="\x. unat (of_nat x :: word32)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2570,7 +2567,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2591,7 +2588,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -3150,7 +3147,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3218,8 +3214,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3284,7 +3279,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3352,8 +3347,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3431,7 +3425,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3498,8 +3492,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3722,7 +3715,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3742,7 +3735,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply (auto simp: option_to_ctcb_ptr_def obj_at'_def objBits_simps projectKOs bindNTFN_alignment_junk)[4] apply (simp add: carch_state_relation_def typ_heap_simps') @@ -3754,7 +3747,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3820,7 +3813,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -4212,7 +4205,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4345,7 +4338,7 @@ lemma decodeSetSpace_ccorres: done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4356,7 +4349,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) diff --git a/proof/crefine/ARM/VSpace_C.thy b/proof/crefine/ARM/VSpace_C.thy index 5f379ddcd1..725ae72eea 100644 --- a/proof/crefine/ARM/VSpace_C.thy +++ b/proof/crefine/ARM/VSpace_C.thy @@ -171,7 +171,7 @@ lemma loadHWASID_ccorres: apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_gets]) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_findPDForASIDAssert]) apply (rename_tac pd) - apply (rule_tac P="\s. pd_at_asid' pd asid s \ rv = armKSASIDMap (ksArchState s) + apply (rule_tac P="\s. pd_at_asid' pd asid s \ asidMap = armKSASIDMap (ksArchState s) \ pd \ ran (option_map snd o armKSASIDMap (ksArchState s) |` (- {asid})) \ option_map snd (armKSASIDMap (ksArchState s) asid) \ {None, Some pd} @@ -746,7 +746,7 @@ lemma lookupPTSlot_ccorres: apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE rv)" + apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE pde)" in ccorres_gen_asm2) apply (rule ccorres_cond2'[where R=\]) apply (clarsimp simp: Collect_const_mem) @@ -761,9 +761,10 @@ lemma lookupPTSlot_ccorres: apply (simp add: checkPTAt_def bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable rv)) - and ko_at' rv (lookup_pd_slot pd vptr) - and K (isPageTablePDE rv)" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable pde)) + and ko_at' pde (lookup_pd_slot pd vptr) and K (isPageTablePDE pde)" + and P'=UNIV + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def Collect_const_mem lookup_pd_slot_def word_sle_def) @@ -913,7 +914,7 @@ lemma findPDForASID_ccorres: apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def bind_def NonDetMonad.lift_def) + apply (clarsimp simp: throwError_def return_def bindE_def bind_def Nondet_Monad.lift_def) apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def) apply (simp add: lookup_fault_lift_invalid_root) @@ -1018,7 +1019,7 @@ lemma flushSpace_ccorres: apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) apply (clarsimp simp: Collect_const_mem pde_stored_asid_def) apply (simp add: if_split_eq1 to_bool_def) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply csymbr apply (clarsimp simp: pde_stored_asid_def) apply (case_tac "to_bool (stored_asid_valid_CL (pde_pde_invalid_lift stored_hw_asid___struct_pde_C))") @@ -1030,7 +1031,7 @@ lemma flushSpace_ccorres: apply clarsimp apply clarsimp apply (rule ccorres_call, - rule invalidateTranslationASID_ccorres [simplified dc_def xfdc_def], + rule invalidateTranslationASID_ccorres, simp+)[1] apply vcg apply wp+ @@ -1168,15 +1169,15 @@ lemma findFreeHWASID_ccorres: apply (rule_tac xf=hw_asid_offset_' and i=0 and xf_update=hw_asid_offset_'_update and r'=dc and xf'=xfdc and Q=UNIV - and F="\n s. rv = armKSHWASIDTable (ksArchState s) - \ nextASID = armKSNextASID (ksArchState s) - \ valid_arch_state' s" + and F="\n s. hwASIDTable = armKSHWASIDTable (ksArchState s) + \ nextASID = armKSNextASID (ksArchState s) + \ valid_arch_state' s" in ccorres_sequenceE_while_gen') apply (rule ccorres_from_vcg_might_throw) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: rf_sr_armKSNextASID) apply (subst down_cast_same [symmetric], - simp add: is_down_def target_size_def source_size_def word_size)+ + simp add: is_down_def target_size_def source_size_def word_size)+ apply (simp add: ucast_ucast_mask ucast_ucast_add ucast_and_mask ucast_of_nat_small asidInvalid_def @@ -1214,7 +1215,7 @@ lemma findFreeHWASID_ccorres: apply ceqv apply (rule ccorres_assert) apply (rule_tac A="\s. nextASID = armKSNextASID (ksArchState s) - \ rv = armKSHWASIDTable (ksArchState s) + \ hwASIDTable = armKSHWASIDTable (ksArchState s) \ valid_arch_state' s \ valid_pde_mappings' s" in ccorres_guard_imp2[where A'=UNIV]) apply (simp add: split_def) @@ -1325,7 +1326,6 @@ lemma armv_contextSwitch_ccorres: apply (cinit lift: cap_pd_' asid_') apply simp apply (ctac(no_vcg) add: getHWASID_ccorres) - apply (fold dc_def) apply (ctac (no_vcg)add: armv_contextSwitch_HWASID_ccorres) apply wp apply clarsimp @@ -1357,11 +1357,11 @@ lemma setVMRoot_ccorres: apply (simp add: cap_case_isPageDirectoryCap cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) @@ -1381,11 +1381,11 @@ lemma setVMRoot_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: cap_case_isPageDirectoryCap) @@ -1410,28 +1410,28 @@ lemma setVMRoot_ccorres: apply (simp add: whenE_def throwError_def checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: whenE_def returnOk_def) - apply (ctac (no_vcg) add: armv_contextSwitch_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) apply (simp add: checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc)+ - apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armKSGlobalPD_ksArchState) apply (rule ccorres_h_t_valid_armKSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply simp @@ -1477,9 +1477,9 @@ lemma setVMRootForFlush_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -1597,7 +1597,6 @@ lemma doFlush_ccorres: empty_fail_invalidateCacheRange_I empty_fail_branchFlushRange empty_fail_isb doMachineOp_bind) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def) apply (ctac (no_vcg) add: cleanCacheRange_PoU_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: invalidateCacheRange_I_ccorres) @@ -1606,13 +1605,13 @@ lemma doFlush_ccorres: apply wp+ apply simp apply (clarsimp simp: Collect_const_mem) - apply (auto simp: flushtype_relation_def o_def - Kernel_C.ARMPageClean_Data_def Kernel_C.ARMPDClean_Data_def - Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def - Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def - Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def - dest: ghost_assertion_size_logic[rotated] - split: ARM_H.flush_type.splits) + apply (auto simp: flushtype_relation_def + Kernel_C.ARMPageClean_Data_def Kernel_C.ARMPDClean_Data_def + Kernel_C.ARMPageInvalidate_Data_def Kernel_C.ARMPDInvalidate_Data_def + Kernel_C.ARMPageCleanInvalidate_Data_def Kernel_C.ARMPDCleanInvalidate_Data_def + Kernel_C.ARMPageUnify_Instruction_def Kernel_C.ARMPDUnify_Instruction_def + dest: ghost_assertion_size_logic[rotated] + split: ARM_H.flush_type.splits) done end @@ -1654,7 +1653,7 @@ lemma performPageFlush_ccorres: apply (rule ccorres_return_Skip) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: order_less_imp_le) @@ -1683,12 +1682,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1837,7 +1836,6 @@ lemma flushPage_ccorres: apply (rule ccorres_cond2[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (fold dc_def) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) apply (wp | simp add: cur_tcb'_def[symmetric])+ @@ -2094,8 +2092,7 @@ lemma unmapPage_ccorres: (unmapPage sz asid vptr pptr) (Call unmapPage_'proc)" apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid_' vptr_' pptr_') - apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive - Collect_True + apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive Collect_True del: Collect_const) apply ccorres_remove_UNIV_guard apply csymbr @@ -2107,16 +2104,16 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \ARMSmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_splitE_novcg) - apply (simp only: inl_rrel_inl_rrel) + apply simp apply (rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') @@ -2125,7 +2122,7 @@ lemma unmapPage_ccorres: pte_pte_small_lift_def pte_pte_invalid_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM Collect_const[symmetric] dc_def[symmetric] + apply (simp add: liftE_liftM Collect_const[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr @@ -2133,8 +2130,7 @@ lemma unmapPage_ccorres: apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply csymbr - apply simp - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -2148,18 +2144,17 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \ARMLargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] + apply (simp add: gen_framesize_to_H_def largePagePTEOffsets_def pteBits_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac ptSlot lookupPTSlot_ret) - apply (simp add: Collect_False dc_def[symmetric] del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pte_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpte_relation_def Let_def pte_lift_def @@ -2167,7 +2162,7 @@ lemma unmapPage_ccorres: pte_pte_large_lift_def pte_pte_invalid_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM dc_def[symmetric] + apply (simp add: liftE_liftM mapM_discarded whileAnno_def ARMLargePageBits_def ARMSmallPageBits_def Collect_False word_sle_def del: Collect_const) @@ -2198,7 +2193,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pte ccorres_move_array_assertion_pte_16)+ apply (rule ccorres_add_return2, - ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pte_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned ptSlot 6" in hoare_gen_asm) @@ -2240,32 +2235,29 @@ lemma unmapPage_ccorres: apply (rule ccorres_Cond_rhs) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM + apply (simp add: gen_framesize_to_H_def liftE_liftM del: Collect_const) apply (simp split: if_split, rule conjI[rotated], rule impI, rule ccorres_empty, rule impI) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pde_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: pde_pde_section_lift_def cpde_relation_def pde_lift_def Let_def pde_tag_defs isSectionPDE_def split: pde.split_asm if_split_asm) apply (rule ceqv_refl) - apply (simp add: Collect_False dc_def[symmetric] - del: Collect_const) - apply (rule ccorres_handlers_weaken2, simp) + apply (simp add: Collect_False del: Collect_const) + apply (rule ccorres_handlers_weaken2) apply csymbr apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp @@ -2280,15 +2272,13 @@ lemma unmapPage_ccorres: apply (case_tac "pd = pde_Ptr (lookup_pd_slot pdPtr vptr)") prefer 2 apply (simp, rule ccorres_empty) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM mapM_discarded whileAnno_def - superSectionPDEOffsets_def pdeBits_def - del: Collect_const) + apply (simp add: gen_framesize_to_H_def liftE_liftM mapM_discarded whileAnno_def + superSectionPDEOffsets_def pdeBits_def + del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pde_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpde_relation_def Let_def pde_lift_def @@ -2332,7 +2322,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pde ccorres_move_array_assertion_pde_16)+ apply (rule ccorres_add_return2) - apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pde_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned pdPtr pdBits" in hoare_gen_asm) @@ -2369,14 +2359,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_empty[where P=\]) apply ceqv apply (simp add: liftE_liftM) - apply (ctac add: flushPage_ccorres[unfolded dc_def]) + apply (ctac add: flushPage_ccorres) apply ((wp lookupPTSlot_inv mapM_storePTE_invs[unfolded swp_def] mapM_storePDE_invs[unfolded swp_def] | wpc | simp)+)[1] apply (simp add: guard_is_UNIV_def) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply (simp add: lookup_pd_slot_def Let_def) apply (wp hoare_vcg_const_imp_lift_R) @@ -2891,13 +2881,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -2965,7 +2955,7 @@ lemma performASIDPoolInvocation_ccorres: apply (wp getASID_wp) apply simp apply wp - apply (simp add: o_def inv_def) + apply (simp add: inv_def) apply (wp getASID_wp) apply simp apply (rule empty_fail_getObject) @@ -3020,14 +3010,14 @@ lemma flushTable_ccorres: apply (rule_tac R=\ in ccorres_cond2) apply (clarsimp simp: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (ctac (no_vcg) add: setVMRoot_ccorres [unfolded dc_def]) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (wp static_imp_wp) + apply (ctac (no_vcg) add: setVMRoot_ccorres) + apply (rule ccorres_return_Skip) + apply (wp hoare_weak_lift_imp) apply clarsimp apply (rule_tac Q="\_ s. invs' s \ cur_tcb' s" in hoare_post_imp) apply (simp add: invs'_invs_no_cicd cur_tcb'_def) apply (wp mapM_x_wp_inv getPTE_wp | wpc)+ - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply wp apply clarsimp apply (strengthen invs_valid_pde_mappings') diff --git a/proof/crefine/ARM_HYP/ADT_C.thy b/proof/crefine/ARM_HYP/ADT_C.thy index 1fed61071a..9e90927251 100644 --- a/proof/crefine/ARM_HYP/ADT_C.thy +++ b/proof/crefine/ARM_HYP/ADT_C.thy @@ -93,7 +93,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption diff --git a/proof/crefine/ARM_HYP/ArchMove_C.thy b/proof/crefine/ARM_HYP/ArchMove_C.thy index 59302d72e2..530b503ebb 100644 --- a/proof/crefine/ARM_HYP/ArchMove_C.thy +++ b/proof/crefine/ARM_HYP/ArchMove_C.thy @@ -486,7 +486,7 @@ lemma ps_clear_entire_slotI: by (fastforce simp: ps_clear_def) lemma ps_clear_ksPSpace_upd_same[simp]: - "ps_clear p n (s\ksPSpace := ksPSpace s(p \ v)\) = ps_clear p n s" + "ps_clear p n (s\ksPSpace := (ksPSpace s)(p \ v)\) = ps_clear p n s" by (fastforce simp: ps_clear_def) lemma getObject_vcpu_prop: diff --git a/proof/crefine/ARM_HYP/Arch_C.thy b/proof/crefine/ARM_HYP/Arch_C.thy index 90c36f2058..f651380021 100644 --- a/proof/crefine/ARM_HYP/Arch_C.thy +++ b/proof/crefine/ARM_HYP/Arch_C.thy @@ -71,8 +71,7 @@ lemma performPageTableInvocationUnmap_ccorres: apply (ctac add: unmapPageTable_ccorres) apply csymbr apply (simp add: storePTE_def' swp_def) - apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[simplified objBits_InvalidPTE, - unfolded dc_def, simplified]) + apply (ctac add: clearMemory_PT_setObject_PTE_ccorres[unfolded objBits_InvalidPTE, simplified]) apply wp apply (simp del: Collect_const) apply (vcg exspec=unmapPageTable_modifies) @@ -439,7 +438,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -575,10 +576,10 @@ shows pageBits_def split: if_split) apply (clarsimp simp: ARMSmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption - apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + apply (simp add: ghost_assertion_data_get_gs_clear_region) apply (drule valid_global_refsD_with_objSize, clarsimp)+ apply (clarsimp simp: isCap_simps dest!: ccte_relation_ccap_relation) apply (cut_tac ptr=frame and bits=12 @@ -1153,8 +1154,7 @@ lemma createSafeMappingEntries_PDE_ccorres: apply (clarsimp simp: vmsz_aligned'_def gen_framesize_to_H_def vm_page_size_defs vm_attribs_relation_def from_bool_mask_simp[unfolded mask_def, simplified] - ptr_range_to_list_def upto_enum_step_def - o_def upto_enum_word + ptr_range_to_list_def upto_enum_step_def upto_enum_word cong: if_cong) apply (frule(1) page_directory_at_rf_sr, clarsimp) apply (frule array_ptr_valid_array_assertionD[OF h_t_valid_clift]) @@ -1499,13 +1499,13 @@ lemma pdeCheckIfMapped_ccorres: (Call pdeCheckIfMapped_'proc)" apply (cinit lift: pde___ptr_to_struct_pde_C_') apply (rule ccorres_pre_getObject_pde) - apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation rv pde'}" + apply (rule_tac P'="{s. \pde'. cslift s (pde_Ptr slot) = Some pde' \ cpde_relation pd pde'}" in ccorres_from_vcg_throws[where P="\s. True"]) apply simp_all apply clarsimp apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps' return_def) - apply (case_tac rv, simp_all add: cpde_relation_invalid isInvalidPDE_def + apply (case_tac pd, simp_all add: cpde_relation_invalid isInvalidPDE_def split: if_split) done @@ -2158,7 +2158,7 @@ lemma performPageInvocationMapPDE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -2184,7 +2184,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -2207,7 +2207,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -2220,10 +2220,10 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg @@ -2381,9 +2381,9 @@ lemma setVMRootForFlush_ccorres2: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -2439,12 +2439,12 @@ where lemma resolve_ret_rel_None[simp]: "resolve_ret_rel None y = (valid_C y = scast false)" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def to_bool_def split: if_splits) + by (clarsimp simp: resolve_ret_rel_def to_option_def to_bool_def split: if_splits) lemma resolve_ret_rel_Some: "\valid_C y = scast true; frameSize_C y = framesize_from_H (fst x); snd x = frameBase_C y\ \ resolve_ret_rel (Some x) y" - by (clarsimp simp: resolve_ret_rel_def o_def to_option_def) + by (clarsimp simp: resolve_ret_rel_def to_option_def) lemma pte_get_tag_exhaust: "pte_get_tag pte = 0 \ pte_get_tag pte = 1 \ pte_get_tag pte = 2 \ pte_get_tag pte = 3" @@ -2970,7 +2970,7 @@ lemma decodeARMFrameInvocation_ccorres: apply csymbr apply (simp add: ARM_HYP.pptrBase_def ARM_HYP.pptrBase_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -2996,7 +2996,6 @@ lemma decodeARMFrameInvocation_ccorres: simp add: ARM_HYP.pptrBase_def ARM_HYP.pptrBase_def hd_conv_nth length_ineq_not_Nil, ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip, clarsimp) apply clarsimp apply (subgoal_tac "cap_get_tag cap = SCAST(32 signed \ 32) cap_frame_cap @@ -4326,7 +4325,7 @@ lemma vcpuRegSavedWhenDisabled_spec[simp]: by (simp add: vcpuRegSavedWhenDisabled_def split: vcpureg.splits) lemma writeVCPUReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres dc xfdc (vcpu_at' vcpuptr and no_0_obj') @@ -4372,7 +4371,7 @@ lemma writeVCPUReg_ccorres: done lemma readVCPUReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((=)) ret__unsigned_long_' (vcpu_at' vcpuptr and no_0_obj') @@ -4426,7 +4425,7 @@ lemma readVCPUReg_ccorres: lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres *) - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart) @@ -4460,7 +4459,7 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) \ \now if we are part of a call\ @@ -4487,7 +4486,7 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -4496,13 +4495,13 @@ lemma invokeVCPUReadReg_ccorres: (* styled after invokeTCB_ReadRegisters_ccorres apply clarsimp apply (vcg) apply wpsimp - apply (clarsimp simp: dc_def msgInfoRegister_def ARM_HYP.msgInfoRegister_def Kernel_C.msgInfoRegister_def Kernel_C.R1_def) + apply (clarsimp simp: msgInfoRegister_def ARM_HYP.msgInfoRegister_def Kernel_C.msgInfoRegister_def Kernel_C.R1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_all_lift hoare_vcg_imp_lift) @@ -4532,7 +4531,7 @@ lemma liftE_invokeVCPUWriteReg_empty_return: by (clarsimp simp: liftE_bindE bind_assoc) lemma invokeVCPUWriteReg_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr) @@ -4548,7 +4547,7 @@ lemma invokeVCPUWriteReg_ccorres: apply (ctac (no_vcg) add: writeVCPUReg_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) by (wpsimp simp: invs_no_0_obj')+ lemma decodeVCPUWriteReg_ccorres: @@ -4627,7 +4626,7 @@ lemma liftE_invokeVCPUInjectIRQ_empty_return: by (clarsimp simp: liftE_bindE bind_assoc) lemma invokeVCPUInjectIRQ_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr and K (idx < 64)) @@ -4655,7 +4654,7 @@ lemma invokeVCPUInjectIRQ_ccorres: apply clarsimp apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) - apply (rule allI, rule conseqPre, vcg, clarsimp simp: dc_def return_def) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) apply (rule wp_post_taut) apply (simp only:) apply (clarsimp simp: bind_assoc) @@ -4663,7 +4662,7 @@ lemma invokeVCPUInjectIRQ_ccorres: apply (rule ccorres_move_c_guard_vcpu) apply (ctac (no_vcg) add: vgicUpdateLR_ccorres) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) - apply (rule allI, rule conseqPre, vcg, clarsimp simp: dc_def return_def) + apply (rule allI, rule conseqPre, vcg, clarsimp simp: return_def) apply wpsimp+ apply (clarsimp simp: unat_of_nat_eq word_of_nat_less) done @@ -4748,7 +4747,7 @@ lemma decodeVCPUInjectIRQ_ccorres: liftE_liftM[symmetric] liftE_bindE_assoc) (* symbolically execute the gets on LHS *) - apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState[simplified comp_def], + apply (rule_tac ccorres_pre_gets_armKSGICVCPUNumListRegs_ksArchState, rename_tac nregs) (* unfortunately directly looking at \gic_vcpu_num_list_regs means we need to abstract the IF condition*) @@ -4836,7 +4835,7 @@ lemma decodeVCPUInjectIRQ_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply wp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=invokeVCPUInjectIRQ_modifies) apply (wpsimp wp: sts_invs_minor' ct_in_state'_set)+ apply (vcg exspec=setThreadState_modifies) @@ -4973,7 +4972,7 @@ lemma decodeVCPUReadReg_ccorres: done lemma invokeVCPUSetTCB_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' tptr and vcpu_at' vcpuptr) @@ -4985,10 +4984,10 @@ lemma invokeVCPUSetTCB_ccorres: apply clarsimp apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: associateVCPUTCB_ccorres) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) by (wpsimp simp: invs_no_0_obj')+ lemma liftE_associateVCPUTCB_empty_return: @@ -5081,7 +5080,7 @@ lemma decodeVCPUSetTCB_ccorres: done lemma invokeVCPUAckVPPI_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (K (K \) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and vcpu_at' vcpuptr) @@ -5098,7 +5097,7 @@ lemma invokeVCPUAckVPPI_ccorres: where v=False, simplified from_bool_vals]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp+ apply (case_tac vppi, simp add: fromEnum_def enum_vppievent_irq flip: word_unat.Rep_inject) done @@ -5178,7 +5177,7 @@ proof - apply (simp add: throwError_bind invocationCatch_def whenE_def injection_handler_throwError) apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types cong: StateSpace.state.fold_congs globals.fold_congs) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (solves \simp add: syscall_error_to_H_cases\) apply (clarsimp simp: irqVPPIEventIndex_not_invalid; ccorres_rewrite) diff --git a/proof/crefine/ARM_HYP/CSpace_All.thy b/proof/crefine/ARM_HYP/CSpace_All.thy index 29c643614e..ab54a3a670 100644 --- a/proof/crefine/ARM_HYP/CSpace_All.thy +++ b/proof/crefine/ARM_HYP/CSpace_All.thy @@ -25,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf diff --git a/proof/crefine/ARM_HYP/CSpace_C.thy b/proof/crefine/ARM_HYP/CSpace_C.thy index d526eec4c0..7e5f0ed9ec 100644 --- a/proof/crefine/ARM_HYP/CSpace_C.thy +++ b/proof/crefine/ARM_HYP/CSpace_C.thy @@ -796,7 +796,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v32_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -919,7 +919,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -945,14 +945,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1071,19 +1071,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -1254,7 +1252,7 @@ lemma cteMove_ccorres: apply (intro conjI, simp+) apply (erule (2) is_aligned_3_prev) apply (erule (2) is_aligned_3_next) - apply (clarsimp simp: dc_def split del: if_split) + apply (clarsimp split del: if_split) apply (simp add: ccap_relation_NullCap_iff) apply (clarsimp simp: cmdbnode_relation_def mdb_node_to_H_def nullMDBNode_def) done @@ -1398,7 +1396,6 @@ lemma cteMove_ccorres_verbose: \ \***--------------------------***\ \ \***C generalised precondition***\ \ \***--------------------------***\ - apply (unfold dc_def) apply (clarsimp simp: ccap_relation_NullCap_iff split del: if_split) \ \cmdbnode_relation nullMDBNode va\ apply (simp add: cmdbnode_relation_def) @@ -2377,7 +2374,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2388,9 +2384,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2439,7 +2435,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2484,7 +2480,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2556,8 +2552,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2566,7 +2562,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ diff --git a/proof/crefine/ARM_HYP/CSpace_RAB_C.thy b/proof/crefine/ARM_HYP/CSpace_RAB_C.thy index 94c4a36093..4659236034 100644 --- a/proof/crefine/ARM_HYP/CSpace_RAB_C.thy +++ b/proof/crefine/ARM_HYP/CSpace_RAB_C.thy @@ -54,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -205,10 +205,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer diff --git a/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy b/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy index dcf5c72655..e099909f4e 100644 --- a/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy +++ b/proof/crefine/ARM_HYP/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/ARM_HYP/Delete_C.thy b/proof/crefine/ARM_HYP/Delete_C.thy index 8576732936..e51026d0f5 100644 --- a/proof/crefine/ARM_HYP/Delete_C.thy +++ b/proof/crefine/ARM_HYP/Delete_C.thy @@ -845,7 +845,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -867,7 +867,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -896,7 +896,7 @@ lemma finaliseSlot_ccorres: apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -991,26 +991,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) diff --git a/proof/crefine/ARM_HYP/Detype_C.thy b/proof/crefine/ARM_HYP/Detype_C.thy index 015982cacc..697df03bf8 100644 --- a/proof/crefine/ARM_HYP/Detype_C.thy +++ b/proof/crefine/ARM_HYP/Detype_C.thy @@ -1541,7 +1541,7 @@ lemma deleteObjects_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - diff --git a/proof/crefine/ARM_HYP/Fastpath_C.thy b/proof/crefine/ARM_HYP/Fastpath_C.thy index e29ea7a080..b125d3047e 100644 --- a/proof/crefine/ARM_HYP/Fastpath_C.thy +++ b/proof/crefine/ARM_HYP/Fastpath_C.thy @@ -769,7 +769,7 @@ lemma switchToThread_fp_ccorres: ceqv, rename_tac "hw_asid_ret") apply (ctac(no_vcg) add: armv_contextSwitch_HWASID_ccorres) apply (simp add: storeWordUser_def bind_assoc case_option_If2 split_def del: Collect_const) - apply (simp only: dmo_clearExMonitor_setCurThread_swap dc_def[symmetric]) + apply (simp only: dmo_clearExMonitor_setCurThread_swap) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) @@ -1029,10 +1029,7 @@ lemma ccorres_call_hSkip: apply - apply (rule ccorres_call_hSkip') apply (erule ccorres_guard_imp) - apply simp - apply clarsimp - apply (simp_all add: ggl xfdc_def) - apply (clarsimp simp: igl) + apply (clarsimp simp: ggl igl xfdc_def)+ done lemma bind_case_sum_rethrow: @@ -1821,7 +1818,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -1856,7 +1852,7 @@ proof - apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) apply (rule ccorres_call_hSkip) - apply (erule disjE; simp flip: dc_def; rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -1871,7 +1867,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1908,7 +1903,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1931,7 +1925,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -1989,29 +1982,25 @@ proof - apply (simp add: ctcb_relation_unat_tcbPriority_C word_less_nat_alt linorder_not_le) apply ceqv - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) - apply (simp add: if_1_0_0 ccap_relation_ep_helpers from_bool_0 word_le_not_less - del: Collect_const cong: call_ignore_cong) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp add: bindE_assoc del: Collect_const) apply (rule ccorres_Guard_Seq) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) + apply (simp add: from_bool_eq_if from_bool_eq_if' from_bool_0 ccorres_IF_True del: Collect_const) apply (clarsimp simp: to_bool_def) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0) + apply (clarsimp simp: from_bool_eq_if' word_le_not_less from_bool_0) apply (clarsimp simp: return_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) - apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply (clarsimp simp: isHighestPrio_def' simpler_gets_def) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: from_bool_0) apply clarsimp apply vcg apply (simp add: Collect_const_mem from_bool_eq_if from_bool_eq_if' from_bool_0 if_1_0_0 ccorres_IF_True del: Collect_const) @@ -2025,7 +2014,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: bindE_assoc from_bool_0 catch_throwError del: Collect_const) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2044,7 +2032,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2059,7 +2046,6 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2088,7 +2074,6 @@ proof - apply (rule ccorres_seq_cond_raise[THEN iffD2]) apply (rule_tac R=\ in ccorres_cond2', blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2144,7 +2129,7 @@ proof - ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply csymbr - apply (simp add: cteInsert_def bind_assoc dc_def[symmetric] + apply (simp add: cteInsert_def bind_assoc del: Collect_const cong: call_ignore_cong) apply (rule ccorres_pre_getCTE2, rename_tac curThreadReplyCTE) apply (simp only: getThreadState_def) @@ -2267,7 +2252,6 @@ proof - apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -2294,7 +2278,7 @@ proof - apply (wp updateMDB_weak_cte_wp_at) apply simp apply (vcg exspec=mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged_modifies) - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ @@ -2545,7 +2529,7 @@ lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add[where tcb="\s. Ptr (tcb' s)" for tcb', simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)[where tcb="\s. Ptr (tcb' s)" for tcb', simplified]] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] @@ -2650,7 +2634,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_alternative2) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2684,7 +2667,7 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond_true_seq) apply (rule ccorres_split_throws) apply (rule ccorres_call_hSkip) - apply (erule disjE; simp flip: dc_def; rule slowpath_ccorres) + apply (erule disjE; simp; rule slowpath_ccorres) apply simp apply simp apply (vcg exspec=slowpath_noreturn_spec) @@ -2699,7 +2682,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres) apply simp @@ -2724,7 +2706,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (rule ccorres_split_throws) apply simp - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2754,7 +2735,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2770,7 +2750,7 @@ lemma fastpath_reply_recv_ccorres: and val="tcb_ptr_to_ctcb_ptr curThread" in ccorres_abstract_known) apply (rule Seq_weak_ceqv, rule Basic_ceqv) - apply (rule rewrite_xfI, clarsimp simp only: o_def) + apply (rule rewrite_xfI) apply (rule refl) apply csymbr apply (rule ccorres_move_c_guard_cte) @@ -2788,7 +2768,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp cong: conj_cong) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2808,7 +2787,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp del: Collect_const not_None_eq) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2830,7 +2808,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2862,7 +2839,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_cond2'[where R=\], blast) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2877,7 +2853,6 @@ lemma fastpath_reply_recv_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (simp add: pde_stored_asid_def asid_map_pd_to_hwasids_def) apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2908,7 +2883,6 @@ lemma fastpath_reply_recv_ccorres: apply simp apply (rule ccorres_split_throws) - apply (fold dc_def)[1] apply (rule ccorres_call_hSkip) apply (rule slowpath_ccorres, simp+) apply (vcg exspec=slowpath_noreturn_spec) @@ -2948,7 +2922,7 @@ lemma fastpath_reply_recv_ccorres: apply ceqv apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac xf'=xfdc and r'=dc in ccorres_split_nothrow) - apply (rule fastpath_enqueue_ccorres[unfolded o_def,simplified]) + apply (rule fastpath_enqueue_ccorres[simplified]) apply simp apply ceqv apply (simp add: liftM_def del: Collect_const cong: call_ignore_cong) @@ -3037,7 +3011,6 @@ lemma fastpath_reply_recv_ccorres: apply csymbr apply csymbr apply (rule ccorres_call_hSkip) - apply (fold dc_def)[1] apply (rule fastpath_restore_ccorres) apply simp apply simp @@ -3062,7 +3035,7 @@ lemma fastpath_reply_recv_ccorres: apply (wp setCTE_cte_wp_at_other) apply (simp del: Collect_const) apply vcg - apply (simp add: o_def) + apply simp apply (wp | simp | wp (once) updateMDB_weak_cte_wp_at | wp (once) updateMDB_cte_wp_at_other)+ diff --git a/proof/crefine/ARM_HYP/Fastpath_Equiv.thy b/proof/crefine/ARM_HYP/Fastpath_Equiv.thy index 1aacccb1a4..9a015a7f92 100644 --- a/proof/crefine/ARM_HYP/Fastpath_Equiv.thy +++ b/proof/crefine/ARM_HYP/Fastpath_Equiv.thy @@ -966,7 +966,7 @@ lemma tcbSchedDequeue_rewrite_not_queued: apply (monadic_rewrite_l monadic_rewrite_if_l_False \wp threadGet_const\) apply (monadic_rewrite_symb_exec_l, rule monadic_rewrite_refl) apply wp+ - apply (clarsimp simp: o_def obj_at'_def) + apply clarsimp done lemma schedule_known_rewrite: @@ -1399,8 +1399,7 @@ lemma fastpath_callKernel_SysReplyRecv_corres: capFaultOnFailure_def rethrowFailure_injection injection_handler_catch bind_bindE_assoc getThreadCallerSlot_def bind_assoc - getSlotCap_def - case_bool_If o_def + getSlotCap_def case_bool_If isRight_def[where x="Inr v" for v] isRight_def[where x="Inl v" for v] cong: if_cong) @@ -1519,9 +1518,9 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply wp apply (rule monadic_rewrite_trans) apply (rule_tac rv=rab_ret - in monadic_rewrite_gets_known[where m="NonDetMonad.lift f" + in monadic_rewrite_gets_known[where m="Nondet_Monad.lift f" for f, folded bindE_def]) - apply (simp add: NonDetMonad.lift_def isRight_case_sum) + apply (simp add: Nondet_Monad.lift_def isRight_case_sum) apply monadic_rewrite_symb_exec_l apply (rename_tac ep_cap2) apply (rule_tac P="cteCap ep_cap2 = cteCap ep_cap" in monadic_rewrite_gen_asm) @@ -1563,8 +1562,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: setThreadState_no_sch_change setThreadState_obj_at_unchanged sts_st_tcb_at'_cases sts_bound_tcb_at' fastpathBestSwitchCandidate_lift[where f="setThreadState s t" for s t] - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift | wps)+ apply (strengthen imp_consequent[where Q="tcb_at' t s" for t s]) @@ -1577,8 +1576,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: emptySlot_cnode_caps user_getreg_inv asUser_typ_ats asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift fastpathBestSwitchCandidate_lift[where f="emptySlot a b" for a b] | simp del: comp_apply @@ -1589,8 +1588,8 @@ lemma fastpath_callKernel_SysReplyRecv_corres: apply (clarsimp cong: conj_cong) apply ((wp user_getreg_inv asUser_typ_ats asUser_obj_at_not_queued asUser_obj_at' mapM_x_wp' - static_imp_wp hoare_vcg_all_lift hoare_vcg_imp_lift - static_imp_wp cnode_caps_gsCNodes_lift + hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_imp_lift + hoare_weak_lift_imp cnode_caps_gsCNodes_lift hoare_vcg_ex_lift | clarsimp simp: obj_at'_weakenE[OF _ TrueI] | solves \ diff --git a/proof/crefine/ARM_HYP/Finalise_C.thy b/proof/crefine/ARM_HYP/Finalise_C.thy index 5be106d4f6..31b8baa885 100644 --- a/proof/crefine/ARM_HYP/Finalise_C.thy +++ b/proof/crefine/ARM_HYP/Finalise_C.thy @@ -200,8 +200,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -210,7 +209,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -278,10 +277,10 @@ lemma cancelAllIPC_ccorres: apply (cinit lift: epptr_') apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -290,8 +289,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -324,7 +323,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -341,12 +340,10 @@ lemma cancelAllIPC_ccorres: apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -374,7 +371,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -400,11 +397,6 @@ lemma cancelAllIPC_ccorres: apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] @@ -412,10 +404,10 @@ lemma cancelAllSignals_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -424,18 +416,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -443,8 +432,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -461,7 +450,7 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -688,8 +677,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -708,7 +697,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def typ_heap_simps') subgoal by (simp add: cmachine_state_relation_def) subgoal by (simp add: h_t_valid_clift_Some_iff) @@ -719,7 +708,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -769,7 +758,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -804,9 +793,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -823,13 +812,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -1026,7 +1015,6 @@ lemma invalidateASIDEntry_ccorres: apply (rule order_le_less_trans, rule word_and_le1) apply (simp add: mask_def) apply (rule ccorres_return_Skip) - apply (fold dc_def) apply (ctac add: invalidateASID_ccorres) apply wp apply (simp add: guard_is_UNIV_def) @@ -1059,8 +1047,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_armKSASIDTable_ksArchState) @@ -1207,14 +1194,12 @@ lemma deleteASID_ccorres: apply ceqv apply csymbr apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False del: Collect_const cong: call_ignore_cong) apply (rule ccorres_cond_false) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] when_def - Collect_True liftM_def + apply (simp add: when_def Collect_True liftM_def cong: conj_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getObject_asidpool) @@ -1310,7 +1295,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val (val' :: 'a). \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1434,7 +1419,7 @@ lemma unmapPageTable_ccorres: apply (ctac(no_vcg) add: pageTableMapped_ccorres) apply wpc apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (simp add: option_to_ptr_def option_to_0_def ccorres_cond_iffs) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -1444,7 +1429,6 @@ lemma unmapPageTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) - apply (fold dc_def) apply csymbr apply (ctac add: cleanByVA_PoU_ccorres) apply (ctac(no_vcg) add:flushTable_ccorres) @@ -1481,12 +1465,6 @@ lemma no_0_pd_at'[elim!]: apply (drule spec[where x=0], clarsimp) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1583,7 +1561,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1594,12 +1572,11 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] - del: Collect_const) + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) @@ -1636,7 +1613,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1730,7 +1707,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) @@ -1770,7 +1747,7 @@ lemma sched_queue_relation_shift: lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1781,7 +1758,7 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1822,7 +1799,7 @@ lemma archThreadSet_tcbVCPU_Basic_ccorres: (* MOVE *) lemma update_vcpu_map_to_vcpu: - "map_to_vcpus (ksPSpace s(p \ KOArch (KOVCPU vcpu))) + "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOVCPU vcpu))) = (map_to_vcpus (ksPSpace s))(p \ vcpu)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) @@ -1919,7 +1896,6 @@ lemma vcpuInvalidateActive_ccorres: "ccorres dc xfdc invs' UNIV hs vcpuInvalidateActive (Call vcpu_invalidate_active_'proc)" - supply dc_simp[simp del] apply cinit apply (rule ccorres_pre_getCurVCPU) apply (subst modify_armHSCurVCPU_when_split) @@ -1993,7 +1969,6 @@ lemma dissociateVCPUTCB_ccorres: (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs (dissociateVCPUTCB vcpuptr tptr) (Call dissociateVCPUTCB_'proc)" - supply dc_simp[simp del] apply (cinit lift: tcb_' vcpu_') apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) @@ -2100,7 +2075,6 @@ lemma associateVCPUTCB_ccorres: (UNIV \ {s. tcb_' s = tcb_ptr_to_ctcb_ptr tptr } \ {s. vcpu_' s = vcpu_Ptr vcpuptr }) hs (associateVCPUTCB vcpuptr tptr) (Call associateVCPUTCB_'proc)" - supply dc_simp[simp del] apply (cinit lift: tcb_' vcpu_') apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet, rename_tac tcbVCPU) @@ -2197,7 +2171,6 @@ lemma vcpuFinalise_ccorres: "ccorres dc xfdc (invs' and vcpu_at' vcpuptr) ({s. vcpu_' s = Ptr vcpuptr}) [] (vcpuFinalise vcpuptr) (Call vcpu_finalise_'proc)" - supply dc_simp[simp del] apply (cinit lift: vcpu_') apply (rule ccorres_move_c_guard_vcpu) apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) @@ -2250,7 +2223,7 @@ method return_NullCap_pair_ccorres = (rule allI, rule conseqPre, vcg), (clarsimp simp: return_def ccap_relation_NullCap_iff)\ lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -2645,7 +2618,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet, rename_tac vcpuopt) @@ -2821,18 +2793,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) @@ -2895,7 +2867,6 @@ lemma Arch_checkIRQ_ccorres: length_ineq_not_Nil hd_conv_nth cast_simps del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_bind) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg apply (rule conseqPre, vcg) diff --git a/proof/crefine/ARM_HYP/Interrupt_C.thy b/proof/crefine/ARM_HYP/Interrupt_C.thy index c6f6354364..d02130cd52 100644 --- a/proof/crefine/ARM_HYP/Interrupt_C.thy +++ b/proof/crefine/ARM_HYP/Interrupt_C.thy @@ -75,12 +75,12 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp apply (simp add: pred_conj_def) - apply (strengthen ntfn_badge_derived_enough_strg[unfolded o_def] + apply (strengthen ntfn_badge_derived_enough_strg invs_mdb_strengthen' valid_objs_invs'_strg) apply (wp cteDeleteOne_other_cap[unfolded o_def])[1] apply vcg @@ -112,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply simp apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) diff --git a/proof/crefine/ARM_HYP/Invoke_C.thy b/proof/crefine/ARM_HYP/Invoke_C.thy index 14ee6b3576..e9b38cf4ee 100644 --- a/proof/crefine/ARM_HYP/Invoke_C.thy +++ b/proof/crefine/ARM_HYP/Invoke_C.thy @@ -65,10 +65,10 @@ lemma setDomain_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) apply (simp add: when_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -76,13 +76,16 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (clarsimp simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) @@ -388,7 +391,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -637,8 +640,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler[symmetric, unfolded o_def] - dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -720,8 +722,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -748,8 +749,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -825,8 +825,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -905,7 +904,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -924,8 +923,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1024,13 +1022,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1039,7 +1035,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1069,8 +1065,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1114,12 +1109,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1179,8 +1172,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1242,8 +1234,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1251,8 +1242,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1266,7 +1256,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1330,16 +1320,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1354,7 +1344,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_lift_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1473,7 +1463,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1530,7 +1520,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1715,7 +1705,7 @@ lemma clearMemory_untyped_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -1920,8 +1910,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2852,7 +2841,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] untypedBits_defs[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -3031,8 +3019,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (ctac add: ccorres_injection_handler_csum1 [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3144,8 +3132,8 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (simp add: ccorres_cond_iffs returnOk_def) apply (rule ccorres_return_Skip') apply (rule ccorres_Guard_Seq ccorres_rhs_assoc)+ - apply (simp add: ccorres_cond_iffs inl_rrel_inl_rrel) - apply (rule ccorres_return_C_errorE_inl_rrel, simp+)[1] + apply (simp add: ccorres_cond_iffs) + apply (rule ccorres_return_C_errorE_inl_rrel; simp) apply wp apply (simp add: all_ex_eq_helper) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3242,8 +3230,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3291,7 +3278,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule conseqPre,vcg,clarsimp) apply vcg apply (rule ccorres_guard_imp[where Q =\ and Q' = UNIV,rotated], assumption+) - apply (simp add: o_def) + apply simp apply simp apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3356,7 +3343,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) apply (vcg exspec=ensureEmptySlot_modifies) @@ -3484,7 +3471,7 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) diff --git a/proof/crefine/ARM_HYP/IpcCancel_C.thy b/proof/crefine/ARM_HYP/IpcCancel_C.thy index c709b39058..ba7082881e 100644 --- a/proof/crefine/ARM_HYP/IpcCancel_C.thy +++ b/proof/crefine/ARM_HYP/IpcCancel_C.thy @@ -1043,7 +1043,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1166,7 +1165,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1406,7 +1405,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1679,7 +1677,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1811,7 +1809,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1919,7 +1916,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -2309,11 +2306,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2330,7 +2322,6 @@ lemma possibleSwitchTo_ccorres: (Call possibleSwitchTo_'proc)" supply if_split [split del] if_cong[cong] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] if_1_0_0[simp] @@ -2355,7 +2346,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2685,7 +2676,7 @@ lemma cmap_relation_ep: by (simp add: Let_def) (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2826,8 +2817,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2849,8 +2840,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2862,15 +2853,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "word32" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2944,7 +2935,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2962,7 +2953,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -3103,7 +3094,6 @@ lemma cancelIPC_ccorres1: apply wpc \ \BlockedOnReceive\ apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3132,7 +3122,6 @@ lemma cancelIPC_ccorres1: apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3172,14 +3161,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3192,7 +3179,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3201,10 +3189,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3230,7 +3220,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg diff --git a/proof/crefine/ARM_HYP/Ipc_C.thy b/proof/crefine/ARM_HYP/Ipc_C.thy index a69c20338d..8e8a9c706c 100644 --- a/proof/crefine/ARM_HYP/Ipc_C.thy +++ b/proof/crefine/ARM_HYP/Ipc_C.thy @@ -799,7 +799,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1665,53 +1665,54 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="ARM_HYP_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat32 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=2]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=2, simplified]) - apply (simp add: wb_gt_2)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((atcbContext o tcbArch) tcb) ARM_HYP_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="ARM_HYP_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat32 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=2]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=2, simplified]) + apply (simp add: wb_gt_2)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply simp apply (simp add: n_msgRegisters_def) apply (frule (1) option_to_0_imp) apply (subst drop_zip) @@ -1719,7 +1720,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (rule mapM_x_wp_inv) @@ -1809,7 +1810,7 @@ proof - apply (simp add: zip_upt_Cons guard_is_UNIVI seL4_VMFault_FSR_def split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -2282,7 +2283,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def ARM_HYP.badgeRegister_def "StrictC'_register_defs") @@ -2320,7 +2321,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -3357,10 +3358,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3414,9 +3416,10 @@ proof - apply (rule ccorres_symb_exec_r) apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply csymbr apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, @@ -3426,7 +3429,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 4) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3436,7 +3439,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 4" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 4" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3457,7 +3460,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3498,8 +3501,7 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def @@ -3594,7 +3596,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3639,7 +3641,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (simp add: Collect_const_mem) apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def @@ -3703,7 +3705,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3772,12 +3773,10 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender @@ -3810,7 +3809,7 @@ lemma Arch_getSanitiseRegisterInfo_ccorres: apply (cinit' lift: thread_' simp: getSanitiseRegisterInfo_def2) apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_pre_archThreadGet) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. v \ Some 0" in ccorres_cross_over_guard) apply (rule ccorres_return_C, simp+) apply (clarsimp simp: typ_heap_simps ctcb_relation_def carch_tcb_relation_def) apply (rule conjI) @@ -3851,7 +3850,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3904,7 +3903,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -4059,7 +4058,6 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=2, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -4098,8 +4096,8 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -4111,7 +4109,7 @@ apply (ctac(no_vcg) add: Arch_getSanitiseRegisterInfo_ccorres) apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -4232,7 +4230,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -4278,7 +4276,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4373,7 +4371,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -4493,7 +4491,6 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) @@ -4517,7 +4514,6 @@ proof - fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4547,10 +4543,9 @@ proof - apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres_valid_queues'_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def @@ -4619,8 +4614,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4641,7 +4635,7 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp @@ -4695,7 +4689,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4716,7 +4710,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5068,7 +5062,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5084,12 +5078,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5265,12 +5259,9 @@ lemma sendIPC_ccorres [corres]: apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not possibleSwitchTo_sch_act_not sts_st_tcb' @@ -5466,7 +5457,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5482,12 +5473,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5592,7 +5583,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5613,7 +5604,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5761,7 +5752,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5875,7 +5866,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5905,20 +5896,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5930,18 +5919,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -6019,8 +6006,6 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) @@ -6099,12 +6084,12 @@ lemma receiveIPC_ccorres [corres]: apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -6132,11 +6117,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -6156,7 +6140,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -6333,7 +6317,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6348,7 +6332,7 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" @@ -6356,7 +6340,7 @@ lemma sendSignal_ccorres [corres]: apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def ARM_HYP_H.badgeRegister_def Kernel_C.badgeRegister_def ARM_HYP.badgeRegister_def Kernel_C.R0_def @@ -6412,7 +6396,7 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) @@ -6491,16 +6475,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "word32" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6563,7 +6548,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6579,12 +6564,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6721,11 +6706,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6736,7 +6720,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6792,8 +6776,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6805,7 +6788,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM_HYP/IsolatedThreadAction.thy b/proof/crefine/ARM_HYP/IsolatedThreadAction.thy index e9438a8c41..fdf7abf47c 100644 --- a/proof/crefine/ARM_HYP/IsolatedThreadAction.thy +++ b/proof/crefine/ARM_HYP/IsolatedThreadAction.thy @@ -412,7 +412,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -646,7 +646,7 @@ lemma setVCPU_isolatable: apply (subst setObject_assert_modify; simp add: projectKOs objBits_simps archObjSize_def vcpuBits_def vcpu_bits_def pageBits_def)+ apply (clarsimp simp: select_f_asserts assert_def obj_at_partial_overwrite_id2 split: if_splits) - apply (clarsimp simp: select_f_def simpler_modify_def bind_def o_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def) apply (case_tac s) apply simp apply (rule ext) @@ -1203,7 +1203,7 @@ lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonadVCG.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) @@ -1382,8 +1382,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) diff --git a/proof/crefine/ARM_HYP/Machine_C.thy b/proof/crefine/ARM_HYP/Machine_C.thy index 191ca72711..3fe2164561 100644 --- a/proof/crefine/ARM_HYP/Machine_C.thy +++ b/proof/crefine/ARM_HYP/Machine_C.thy @@ -560,13 +560,13 @@ lemma cleanCacheRange_PoC_ccorres: apply (cinit' lift: start_' end_' pstart_') apply (clarsimp simp: cleanCacheRange_PoC_def word_sle_def whileAnno_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) @@ -606,8 +606,8 @@ lemma cleanInvalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=cleanInvalByVA_modifies) apply (rule ceqv_refl) - apply (ctac (no_vcg) add: dsb_ccorres[simplified dc_def]) - apply (wp | clarsimp simp: guard_is_UNIVI o_def)+ + apply (ctac (no_vcg) add: dsb_ccorres) + apply (wp | clarsimp simp: guard_is_UNIVI)+ apply (frule(1) ghost_assertion_size_logic) apply (clarsimp simp: o_def) done @@ -630,7 +630,7 @@ lemma cleanCacheRange_RAM_ccorres: in ccorres_cross_over_guard) apply (rule ccorres_Guard_Seq) apply (rule ccorres_basic_srnoop2, simp) - apply (ctac (no_vcg) add: cleanL2Range_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: cleanL2Range_ccorres) apply wp+ apply clarsimp apply (auto dest: ghost_assertion_size_logic simp: o_def) @@ -650,13 +650,13 @@ lemma cleanCacheRange_PoU_ccorres: apply (rule ccorres_basic_srnoop2, simp) apply (simp add: cleanCacheRange_PoU_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_cleanByVA_PoU) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) @@ -688,14 +688,14 @@ lemma invalidateCacheRange_RAM_ccorres: apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_cond[where R=\]) apply (clarsimp simp: lineStart_def cacheLineBits_def) apply csymbr apply (rule ccorres_call[OF cleanCacheRange_RAM_ccorres, where xf'=xfdc], (clarsimp)+) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply ceqv apply (rule_tac P="\s. unat (w2 - w1) \ gsMaxObjectSize s" in ccorres_cross_over_guard) @@ -718,7 +718,7 @@ lemma invalidateCacheRange_RAM_ccorres: apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) apply (vcg exspec=invalidateByVA_modifies) apply ceqv - apply (ctac add: dsb_ccorres[unfolded dc_def]) + apply (ctac add: dsb_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -758,13 +758,13 @@ lemma branchFlushRange_ccorres: apply (clarsimp simp: word_sle_def whileAnno_def) apply (simp add: branchFlushRange_def) apply csymbr - apply (rule cacheRangeOp_ccorres[simplified dc_def]) + apply (rule cacheRangeOp_ccorres) apply (rule empty_fail_branchFlush) apply clarsimp apply (cinitlift index_') apply (rule ccorres_guard_imp2) apply csymbr - apply (ctac add: branchFlush_ccorres[unfolded dc_def]) + apply (ctac add: branchFlush_ccorres) apply (clarsimp simp: lineStart_def cacheLineBits_def shiftr_shiftl1 mask_out_sub_mask) apply (drule_tac s="w1 && mask 6" in sym, simp add: cache_range_lineIndex_helper) diff --git a/proof/crefine/ARM_HYP/PSpace_C.thy b/proof/crefine/ARM_HYP/PSpace_C.thy index 50d4aed6cd..97b7236e95 100644 --- a/proof/crefine/ARM_HYP/PSpace_C.thy +++ b/proof/crefine/ARM_HYP/PSpace_C.thy @@ -47,7 +47,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: word32) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/ARM_HYP/Recycle_C.thy b/proof/crefine/ARM_HYP/Recycle_C.thy index d4e2bda75b..ed2183467f 100644 --- a/proof/crefine/ARM_HYP/Recycle_C.thy +++ b/proof/crefine/ARM_HYP/Recycle_C.thy @@ -366,7 +366,7 @@ lemma clearMemory_PageCap_ccorres: subgoal by (simp add: pageBits_def ko_at_projectKO_opt[OF user_data_at_ko]) subgoal by simp apply csymbr - apply (ctac add: cleanCacheRange_RAM_ccorres[unfolded dc_def]) + apply (ctac add: cleanCacheRange_RAM_ccorres) apply wp apply (simp add: guard_is_UNIV_def unat_of_nat word_bits_def capAligned_def word_of_nat_less) @@ -456,7 +456,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: word32) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -530,8 +530,8 @@ lemma invalidateTLBByASID_ccorres: apply (simp add: case_option_If2 del: Collect_const) apply (rule ccorres_if_cond_throws2[where Q=\ and Q'=\]) apply (clarsimp simp: pde_stored_asid_def to_bool_def split: if_split) - apply (rule ccorres_return_void_C[unfolded dc_def]) - apply (simp add: dc_def[symmetric]) + apply (rule ccorres_return_void_C) + apply simp apply csymbr apply (ctac add: invalidateTranslationASID_ccorres) apply vcg @@ -812,8 +812,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -922,8 +922,8 @@ lemma cancelBadgedSends_ccorres: cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -933,22 +933,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1023,7 +1023,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: tcb_queue_relation'_def EPState_Send_def mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -1033,7 +1033,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -1058,9 +1058,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -1112,8 +1112,8 @@ lemma cancelBadgedSends_ccorres: apply (drule_tac x=p in spec) subgoal by fastforce apply (rule conjI) - apply (erule cready_queues_relation_not_queue_ptrs, - auto dest: null_ep_schedD[unfolded o_def] simp: o_def)[1] + apply (erule cready_queues_relation_not_queue_ptrs; + fastforce dest: null_ep_schedD[unfolded o_def] simp: o_def) apply (simp add: carch_state_relation_def cmachine_state_relation_def h_t_valid_clift_Some_iff) @@ -1131,9 +1131,9 @@ lemma cancelBadgedSends_ccorres: apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases sts_sch_act sts_valid_queues setThreadState_oa_queued) apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) diff --git a/proof/crefine/ARM_HYP/Refine_C.thy b/proof/crefine/ARM_HYP/Refine_C.thy index e4baa8129f..7343ad1485 100644 --- a/proof/crefine/ARM_HYP/Refine_C.thy +++ b/proof/crefine/ARM_HYP/Refine_C.thy @@ -472,7 +472,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -525,7 +525,7 @@ lemma handleVCPUFault_ccorres: apply (ctac (no_vcg) add: schedule_ccorres) apply (rule ccorres_stateAssert_after) apply (rule ccorres_guard_imp) - apply (ctac (no_vcg) add: activateThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: activateThread_ccorres) apply (clarsimp, assumption) apply assumption apply (wp schedule_sch_act_wf schedule_invs'|strengthen invs_queues invs_valid_objs')+ @@ -642,9 +642,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -677,7 +677,7 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state | simp add: tcb_cap_cases_def | rule threadSet_ct_in_state' @@ -729,9 +729,9 @@ lemma entry_corres_C: apply (simp add: ccontext_rel_to_C) apply simp apply (rule corres_split) - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) apply (rule getContext_corres, simp) @@ -836,7 +836,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -863,7 +863,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -931,7 +931,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -951,7 +951,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def diff --git a/proof/crefine/ARM_HYP/Retype_C.thy b/proof/crefine/ARM_HYP/Retype_C.thy index 483c128777..767dcac4dc 100644 --- a/proof/crefine/ARM_HYP/Retype_C.thy +++ b/proof/crefine/ARM_HYP/Retype_C.thy @@ -1070,7 +1070,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -2332,6 +2332,9 @@ definition | ARM_HYP_H.PageDirectoryObject \ scast seL4_ARM_PageDirectoryObject | ARM_HYP_H.VCPUObject \ scast seL4_ARM_VCPUObject" +(* always unfold StrictC'_mode_object_defs together with api_object_defs *) +lemmas api_object_defs = api_object_defs StrictC'_mode_object_defs + lemmas nAPIObjects_def = seL4_NonArchObjectTypeCount_def lemma nAPIOBjects_object_type_from_H: @@ -4446,12 +4449,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -5312,7 +5313,7 @@ lemma monadic_rewrite_setObject_vcpu_as_init: supply fun_upd_apply[simp del] apply simp apply (rule monadic_rewrite_gen_asm) - apply wp_pre + apply monadic_rewrite_pre apply (simp add: vcpuWriteReg_def vgicUpdate_def bind_assoc) apply (clarsimp simp: vcpuUpdate_def bind_assoc) (* explicitly state the vcpu we are setting for each setObject *) @@ -5364,7 +5365,7 @@ lemma ptr_retyp_fromzeroVCPU: assumes cover: "range_cover p vcpu_bits vcpu_bits 1" assumes al: "is_aligned p vcpu_bits" assumes sr: "(\, \') \ rf_sr" - shows "(\\ksPSpace := ksPSpace \(p \ ko_vcpu)\, + shows "(\\ksPSpace := (ksPSpace \)(p \ ko_vcpu)\, globals_update (t_hrs_'_update (hrs_htd_update (ptr_retyp (vcpu_Ptr p)))) \') \ rf_sr" (is "(\\ksPSpace := ?ks\, globals_update ?gs' \') \ rf_sr") @@ -5435,8 +5436,8 @@ proof - have map_vcpus: "cmap_relation (map_to_vcpus (ksPSpace \)) (cslift \') vcpu_Ptr cvcpu_relation - \ cmap_relation (map_to_vcpus (ksPSpace \)(p \ vcpu0)) - (cslift \'(vcpu_Ptr p \ ?zeros)) vcpu_Ptr cvcpu_relation" + \ cmap_relation ((map_to_vcpus (ksPSpace \))(p \ vcpu0)) + ((cslift \')(vcpu_Ptr p \ ?zeros)) vcpu_Ptr cvcpu_relation" apply (erule cmap_vcpus) apply (simp add: vcpu0_def from_bytes_def) apply (simp add: typ_info_simps vcpu_C_tag_def) @@ -5539,13 +5540,13 @@ proof - by (simp add: objBitsKO_def archObjSize_def vcpu_bits_def' vcpuBits_def') have rl_vcpu: - "(projectKO_opt \\<^sub>m (ksPSpace \(p \ KOArch (KOVCPU vcpu0))) :: word32 \ vcpu option) + "(projectKO_opt \\<^sub>m ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) :: word32 \ vcpu option) = (projectKO_opt \\<^sub>m ksPSpace \)(p \ vcpu0)" by (rule ext) (clarsimp simp: projectKOs map_comp_def vcpu0_def split: if_split) have ctes: - "map_to_ctes (ksPSpace \(p \ KOArch (KOVCPU vcpu0))) = ctes_of \" + "map_to_ctes ((ksPSpace \)(p \ KOArch (KOVCPU vcpu0))) = ctes_of \" using pal pdst al pno apply (clarsimp simp: fun_upd_def) apply (frule (2) pspace_no_overlap_base') @@ -5662,7 +5663,6 @@ proof - done show ?thesis - supply dc_simp[simp del] apply (cinit' lift: vcpu_' simp: makeObject_vcpu) apply clarsimp apply (rule monadic_rewrite_ccorres_assemble[OF _ monadic_rewrite_setObject_vcpu_as_init]) @@ -5689,7 +5689,6 @@ lemma placeNewObject_vcpu_ccorres: hs (placeNewObject regionBase (makeObject :: vcpu) 0) (global_htd_update (\_. (ptr_retyp (vcpu_Ptr regionBase)));; CALL vcpu_init(vcpu_Ptr regionBase))" - supply dc_simp[simp del] apply (rule ccorres_guard_imp) apply (rule monadic_rewrite_ccorres_assemble[OF _ monadic_rewrite_placeNewObject_vcpu_decompose[where vcpupre=fromzeroVCPU]]) @@ -5964,7 +5963,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -6134,7 +6133,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply vcg - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (clarsimp simp: CPSR_def) apply (rule conseqPre, vcg, clarsimp) @@ -6323,7 +6322,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -8141,7 +8140,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -8308,9 +8307,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -8344,7 +8343,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -8491,9 +8490,9 @@ shows "ccorres dc xfdc apply (frule(1) range_cover_gsMaxObjectSize, fastforce, assumption) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) ghost_assertion_size_logic)+ - apply (simp add: o_def) - apply (case_tac newType,simp_all add:object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + apply (case_tac newType, + simp_all add: object_type_from_H_def Kernel_C_defs nAPIObjects_def APIType_capBits_def o_def + split: apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps',unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/ARM_HYP/SR_lemmas_C.thy b/proof/crefine/ARM_HYP/SR_lemmas_C.thy index 58983be4ab..6a34b458be 100644 --- a/proof/crefine/ARM_HYP/SR_lemmas_C.thy +++ b/proof/crefine/ARM_HYP/SR_lemmas_C.thy @@ -324,21 +324,21 @@ lemma tcb_cte_cases_proj_eq [simp]: lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done lemma map_to_ctes_upd_tcb': - "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; - ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = - (%x. if EX getF setF. + "\ ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; + ps_clear p tcbBlockSizeBits s \ + \ map_to_ctes ((ksPSpace s)(p \ KOTCB tcb)) = + (\x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & - getF tcb ~= getF tcb' - then case tcb_cte_cases (x - p) of - Some (getF, setF) => Some (getF tcb) - else ctes_of s x)" + getF tcb \ getF tcb' + then case tcb_cte_cases (x - p) of + Some (getF, setF) \ Some (getF tcb) + else ctes_of s x)" apply (erule (1) map_to_ctes_upd_tcb) apply (simp add: field_simps ps_clear_def3 mask_def objBits_defs) done @@ -459,7 +459,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_pdes (ksPSpace s) = map_to_pdes (ksPSpace s')); @@ -486,7 +486,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -676,7 +676,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1512,7 +1511,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1526,15 +1525,15 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KONotification ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KONotification ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1542,15 +1541,15 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOEndpoint ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1558,14 +1557,14 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOTCB ko)) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOTCB ko)) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1573,15 +1572,15 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1590,27 +1589,27 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1618,22 +1617,22 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_vcpus (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_vcpus (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_vcpus ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_vcpus (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1643,15 +1642,15 @@ lemma update_pde_map_tos: lemma update_vcpu_map_tos: fixes P :: "vcpu \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_ptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_ptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOVCPU vcpu)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -2135,7 +2134,7 @@ lemma gs_set_assn_Delete_cstate_relation: lemma update_typ_at: assumes at: "obj_at' P p s" and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" - shows "typ_at' T p' (s \ksPSpace := ksPSpace s(p \ ko)\) = typ_at' T p' s" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" using at by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def dest!: tp[rule_format] @@ -2403,7 +2402,7 @@ lemma rf_sr_armKSGICVCPUNumListRegs: by (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def Let_def) lemma update_vcpu_map_to_vcpu: - "map_to_vcpus (ksPSpace s(p \ KOArch (KOVCPU vcpu))) + "map_to_vcpus ((ksPSpace s)(p \ KOArch (KOVCPU vcpu))) = (map_to_vcpus (ksPSpace s))(p \ vcpu)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) diff --git a/proof/crefine/ARM_HYP/Schedule_C.thy b/proof/crefine/ARM_HYP/Schedule_C.thy index 8248da601f..5e2a90cd7f 100644 --- a/proof/crefine/ARM_HYP/Schedule_C.thy +++ b/proof/crefine/ARM_HYP/Schedule_C.thy @@ -79,7 +79,6 @@ lemma Arch_switchToThread_ccorres: apply (ctac (no_vcg) add: setVMRoot_ccorres) apply (simp (no_asm) del: Collect_const) apply (rule_tac A'=UNIV in ccorres_guard_imp2) - apply (fold dc_def)[1] apply (ctac add: clearExMonitor_ccorres) apply wpsimp+ apply (vcg exspec=vcpu_switch_modifies) @@ -217,14 +216,14 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done @@ -316,14 +315,14 @@ proof - apply (intro conjI impI) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) apply (prop_tac "ksCurDomain s = 0") using unsigned_eq_0_iff apply force apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) done qed @@ -404,7 +403,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -445,7 +443,6 @@ lemma isHighestPrio_ccorres: lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -459,7 +456,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -476,7 +473,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -504,7 +501,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -524,7 +521,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -561,18 +558,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -665,13 +661,12 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') apply (rule conjI) @@ -689,7 +684,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -742,7 +737,7 @@ lemma timerTick_ccorres: apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -764,17 +759,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -786,7 +781,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -794,13 +788,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/ARM_HYP/SyscallArgs_C.thy b/proof/crefine/ARM_HYP/SyscallArgs_C.thy index 678b824be7..101cce83fd 100644 --- a/proof/crefine/ARM_HYP/SyscallArgs_C.thy +++ b/proof/crefine/ARM_HYP/SyscallArgs_C.thy @@ -47,7 +47,7 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) apply (rule hoare_vcg_all_lift) apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) apply (rule hoare_strengthen_post, rule gts_sp') @@ -289,7 +289,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -655,7 +655,7 @@ lemma getMRs_tcbContext: apply (wp|wpc)+ apply (rule_tac P="n < length x" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -784,11 +784,13 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_move_c_guard_tcb_ctes)+ apply (ctac (no_vcg)) + apply (rename_tac bufferCap bufferCap') apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap bufferCap \ isPageCap (capCap bufferCap)" + in ccorres_case_bools') apply simp apply (rule ccorres_symb_exec_r) - apply (rule_tac b="capVPSize (capCap rva) \ ARMSmallPage" in ccorres_case_bools') + apply (rule_tac b="capVPSize (capCap bufferCap) \ ARMSmallPage" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -796,7 +798,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -850,7 +852,7 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap bufferCap" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -1083,7 +1085,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) diff --git a/proof/crefine/ARM_HYP/Syscall_C.thy b/proof/crefine/ARM_HYP/Syscall_C.thy index 78d990399f..142feb4946 100644 --- a/proof/crefine/ARM_HYP/Syscall_C.thy +++ b/proof/crefine/ARM_HYP/Syscall_C.thy @@ -270,22 +270,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -755,7 +755,7 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] @@ -952,8 +952,7 @@ lemma handleInvocation_ccorres: apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift sts_ksQ' @@ -1118,7 +1117,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1136,7 +1135,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1298,7 +1296,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1316,8 +1314,8 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) @@ -1365,7 +1363,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1382,7 +1380,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1395,7 +1393,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1416,7 +1414,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1429,9 +1427,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1689,7 +1687,7 @@ lemma virq_virq_active_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1702,7 +1700,7 @@ lemma virq_virq_invalid_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1715,7 +1713,7 @@ lemma virq_virq_pending_set_virqEOIIRQEN_spec': \ \ret__struct_virq_C = virq_C (ARRAY _. virqSetEOIIRQEN (virq_to_H \<^bsup>s\<^esup>virq) \<^bsup>s\<^esup>v32) \" apply (hoare_rule HoarePartial.ProcNoRec1) (* force vcg to unfold non-recursive procedure *) apply vcg - apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def o_def) + apply (clarsimp simp: virq_to_H_def ARM_A.virqSetEOIIRQEN_def) apply (case_tac virq) apply clarsimp apply (rule array_ext) @@ -1794,8 +1792,8 @@ definition where "eisr_calc eisr0 eisr1 \ if eisr0 \ 0 then word_ctz eisr0 else word_ctz eisr1 + 32" -lemma ccorres_vgicMaintenance: - notes dc_simp[simp del] Collect_const[simp del] +lemma ccorres_vgicMaintenance: + notes Collect_const[simp del] notes scast_specific_plus32[simp] scast_specific_plus32_signed[simp] notes virq_virq_active_set_virqEOIIRQEN_spec = virq_virq_active_set_virqEOIIRQEN_spec' notes virq_virq_invalid_set_virqEOIIRQEN_spec = virq_virq_invalid_set_virqEOIIRQEN_spec' @@ -2067,7 +2065,7 @@ proof - apply wpsimp apply wpsimp apply wpsimp - apply (clarsimp simp: cur_vcpu_relation_def dc_def eisr_calc_def split: option.splits) + apply (clarsimp simp: cur_vcpu_relation_def eisr_calc_def split: option.splits) done qed @@ -2132,8 +2130,8 @@ lemma vcpuUpdate_vppi_masked_ccorres_armHSCurVCPU: apply (clarsimp dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def split: option.splits) done -lemma ccorres_VPPIEvent: - notes dc_simp[simp del] Collect_const[simp del] +lemma ccorres_VPPIEvent: + notes Collect_const[simp del] notes scast_specific_plus32[simp] scast_specific_plus32_signed[simp] shows "ccorres dc xfdc @@ -2224,7 +2222,6 @@ lemma ccorres_handleReservedIRQ: (\p. ksCurThread s \ set (ksReadyQueues s p)))) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] supply Collect_const[simp del] apply (cinit lift: irq_') apply (clarsimp simp: ucast_up_ucast is_up) @@ -2275,11 +2272,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -2298,7 +2295,7 @@ lemma handleInterrupt_ccorres: apply (rule getIRQSlot_ccorres3) apply (rule ccorres_getSlotCap_cte_at) apply (rule_tac P="cte_at' rv" in ccorres_cross_over_guard) - supply ccorres_move_array_assertion_tcb_ctes [corres_pre del] + supply ccorres_move_array_assertion_tcb_ctes [ccorres_pre del] apply ctac apply csymbr apply csymbr @@ -2317,7 +2314,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -2326,7 +2323,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -2338,7 +2335,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -2347,7 +2344,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -2359,7 +2355,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up) diff --git a/proof/crefine/ARM_HYP/TcbAcc_C.thy b/proof/crefine/ARM_HYP/TcbAcc_C.thy index 73f1b5c191..1ea50d2157 100644 --- a/proof/crefine/ARM_HYP/TcbAcc_C.thy +++ b/proof/crefine/ARM_HYP/TcbAcc_C.thy @@ -178,7 +178,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -207,7 +207,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/ARM_HYP/TcbQueue_C.thy b/proof/crefine/ARM_HYP/TcbQueue_C.thy index 3fb6ca79a9..853d4615e5 100644 --- a/proof/crefine/ARM_HYP/TcbQueue_C.thy +++ b/proof/crefine/ARM_HYP/TcbQueue_C.thy @@ -1017,8 +1017,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1106,7 +1106,7 @@ lemma rf_sr_tcb_update_no_queue: (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes heap_to_user_data_def) @@ -1155,7 +1155,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes diff --git a/proof/crefine/ARM_HYP/Tcb_C.thy b/proof/crefine/ARM_HYP/Tcb_C.thy index 714bb2431b..b4b2fd819c 100644 --- a/proof/crefine/ARM_HYP/Tcb_C.thy +++ b/proof/crefine/ARM_HYP/Tcb_C.thy @@ -97,8 +97,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -156,8 +156,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -167,8 +167,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -265,8 +265,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -299,7 +299,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -412,8 +412,8 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' @@ -437,7 +437,7 @@ lemma setPriority_ccorres: apply (frule (1) valid_objs'_maxDomain[where t=t]) apply (frule (1) valid_objs'_maxPriority[where t=t]) apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -518,7 +518,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -612,7 +612,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -658,7 +658,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -667,7 +667,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp @@ -681,7 +681,7 @@ lemma invokeTCB_ThreadControl_ccorres: tcb_at' target s \ ksCurDomain s \ maxDomain \ valid_queues' s \ fst (the priority) \ maxPriority)"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr @@ -696,7 +696,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (trace_schematic_insts \rule ccorres_cond2[where R=\], simp add: Collect_const_mem\) @@ -706,7 +706,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def @@ -723,7 +723,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply(rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -732,7 +732,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def) @@ -749,7 +749,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -759,7 +759,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) @@ -797,7 +797,7 @@ lemma invokeTCB_ThreadControl_ccorres: simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues invs_valid_queues' | wp hoare_drop_imps)+ @@ -839,11 +839,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -861,8 +860,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -870,13 +868,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (fastforce simp: guard_is_UNIV_def Kernel_C.tcbVTable_def tcbVTableSlot_def cte_level_bits_def size_of_def) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) @@ -900,12 +898,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -926,8 +923,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -935,14 +931,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def) apply simp apply (rule ccorres_split_throws, rule ccorres_return_C_errorE, simp+) @@ -958,13 +954,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def cintr_def Collect_const_mem) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -991,7 +987,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1204,10 +1200,10 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (simp add: word_bits_def frame_gp_registers_convs n_gpRegisters_def) apply simp apply (rule ccorres_pre_getCurThread) + apply (rename_tac thread) apply (ctac add: postModifyRegisters_ccorres[simplified]) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. thread = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1274,8 +1270,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1284,8 +1280,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1344,8 +1340,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1357,8 +1353,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1429,7 +1425,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1514,7 +1510,7 @@ lemma asUser_setRegister_ko_at': done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1621,15 +1617,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" - in ccorres_when) + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp apply (ctac (no_vcg) add: rescheduleRequired_ccorres) @@ -1808,6 +1803,7 @@ shows apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1868,15 +1864,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1884,7 +1880,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n (ARM_HYP_H.frameRegisters @ ARM_HYP_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -1993,11 +1989,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((atcbContextGet o tcbArch) tcb) (genericTake n (ARM_HYP_H.frameRegisters @ ARM_HYP_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2068,11 +2063,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2104,15 +2099,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2121,7 +2116,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2141,12 +2136,12 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) @@ -2227,7 +2222,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2238,13 +2234,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2338,7 +2334,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2349,13 +2346,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2363,7 +2360,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2384,7 +2381,7 @@ lemma decodeWriteRegisters_ccorres: numeral_eqs simp del: unsigned_numeral) apply (frule arg_cong[where f="\x. unat (of_nat x :: word32)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2664,7 +2661,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2685,7 +2682,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -3244,7 +3241,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3312,8 +3308,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3378,7 +3373,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3446,8 +3441,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3518,7 +3512,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3585,8 +3579,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3809,7 +3802,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3829,7 +3822,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply (auto simp: option_to_ctcb_ptr_def obj_at'_def objBits_simps projectKOs bindNTFN_alignment_junk)[4] apply (simp add: carch_state_relation_def typ_heap_simps') @@ -3841,7 +3834,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3907,7 +3900,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -4303,7 +4296,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4436,7 +4429,7 @@ lemma decodeSetSpace_ccorres: done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4447,7 +4440,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) diff --git a/proof/crefine/ARM_HYP/VSpace_C.thy b/proof/crefine/ARM_HYP/VSpace_C.thy index 44e2d15f55..16f86c76cc 100644 --- a/proof/crefine/ARM_HYP/VSpace_C.thy +++ b/proof/crefine/ARM_HYP/VSpace_C.thy @@ -262,7 +262,7 @@ lemma loadHWASID_ccorres: apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_gets]) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_findPDForASIDAssert]) apply (rename_tac pd) - apply (rule_tac P="\s. pd_at_asid' pd asid s \ rv = armKSASIDMap (ksArchState s) + apply (rule_tac P="\s. pd_at_asid' pd asid s \ asidMap = armKSASIDMap (ksArchState s) \ pd \ ran (option_map snd o armKSASIDMap (ksArchState s) |` (- {asid})) \ option_map snd (armKSASIDMap (ksArchState s) asid) \ {None, Some pd} @@ -827,7 +827,7 @@ lemma lookupPTSlot_ccorres: apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE rv)" + apply (rule_tac P="(ret__unsigned = scast pde_pde_coarse) = (isPageTablePDE pde)" in ccorres_gen_asm2) apply (rule ccorres_cond2'[where R=\]) apply (clarsimp simp: Collect_const_mem) @@ -842,9 +842,10 @@ lemma lookupPTSlot_ccorres: apply (simp add: checkPTAt_def bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable rv)) - and ko_at' rv (lookup_pd_slot pd vptr) - and K (isPageTablePDE rv)" and P'=UNIV in ccorres_from_vcg_throws) + apply (rule_tac P="page_table_at' (ptrFromPAddr (pdeTable pde)) + and ko_at' pde (lookup_pd_slot pd vptr) and K (isPageTablePDE pde)" + and P'=UNIV + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def Collect_const_mem lookup_pd_slot_def word_sle_def) @@ -988,7 +989,7 @@ lemma findPDForASID_ccorres: apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def bind_def NonDetMonad.lift_def) + apply (clarsimp simp: throwError_def return_def bindE_def bind_def Nondet_Monad.lift_def) apply (clarsimp simp: EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def) apply (simp add: lookup_fault_lift_invalid_root) @@ -1095,7 +1096,7 @@ lemma flushSpace_ccorres: apply (rule_tac Q=\ and Q'=\ in ccorres_if_cond_throws2) apply (clarsimp simp: Collect_const_mem pde_stored_asid_def) apply (simp add: if_split_eq1 to_bool_def) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply csymbr apply (clarsimp simp: pde_stored_asid_def) apply (case_tac "to_bool (stored_asid_valid_CL (pde_pde_invalid_lift stored_hw_asid___struct_pde_C))") @@ -1107,7 +1108,7 @@ lemma flushSpace_ccorres: apply clarsimp apply clarsimp apply (rule ccorres_call, - rule invalidateTranslationASID_ccorres [simplified dc_def xfdc_def], + rule invalidateTranslationASID_ccorres, simp+)[1] apply vcg apply wp+ @@ -1248,15 +1249,15 @@ lemma findFreeHWASID_ccorres: apply (rule_tac xf=hw_asid_offset_' and i=0 and xf_update=hw_asid_offset_'_update and r'=dc and xf'=xfdc and Q=UNIV - and F="\n s. rv = armKSHWASIDTable (ksArchState s) - \ nextASID = armKSNextASID (ksArchState s) - \ valid_arch_state' s" + and F="\n s. hwASIDTable = armKSHWASIDTable (ksArchState s) + \ nextASID = armKSNextASID (ksArchState s) + \ valid_arch_state' s" in ccorres_sequenceE_while_gen') apply (rule ccorres_from_vcg_might_throw) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: rf_sr_armKSNextASID) apply (subst down_cast_same [symmetric], - simp add: is_down_def target_size_def source_size_def word_size)+ + simp add: is_down_def target_size_def source_size_def word_size)+ apply (simp add: ucast_ucast_mask ucast_ucast_add ucast_and_mask ucast_of_nat_small asidInvalid_def @@ -1294,7 +1295,7 @@ lemma findFreeHWASID_ccorres: apply ceqv apply (rule ccorres_assert) apply (rule_tac A="\s. nextASID = armKSNextASID (ksArchState s) - \ rv = armKSHWASIDTable (ksArchState s) + \ hwASIDTable = armKSHWASIDTable (ksArchState s) \ valid_arch_state' s \ valid_pde_mappings' s" in ccorres_guard_imp2[where A'=UNIV]) apply (simp add: split_def) @@ -1405,7 +1406,6 @@ lemma armv_contextSwitch_ccorres: apply (cinit lift: cap_pd_' asid_') apply simp apply (ctac(no_vcg) add: getHWASID_ccorres) - apply (fold dc_def) apply (ctac (no_vcg)add: armv_contextSwitch_HWASID_ccorres) apply wp apply clarsimp @@ -1651,7 +1651,7 @@ lemma vcpu_write_reg_ccorres: \ \ \value = v \) hs (vcpuWriteReg vcpuptr reg v) (Call vcpu_write_reg_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit lift: vcpu_' reg_' value_') apply (rule ccorres_assert) apply clarsimp @@ -1666,12 +1666,13 @@ lemma vcpu_write_reg_ccorres: lemma vcpu_save_reg_ccorres: "ccorres dc xfdc (vcpu_at' vcpuptr) (UNIV \ \unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs (vcpuSaveReg vcpuptr r) (Call vcpu_save_reg_'proc)" - supply dc_simp[simp del] Collect_const[simp del] + supply Collect_const[simp del] apply (cinit lift: reg_' vcpu_') apply (rule ccorres_assert2) apply (rule ccorres_cond_false_seq, simp) apply (ctac add: vcpu_hw_read_reg_ccorres) - apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu, simp del: fun_upd_apply)+ + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ + apply (simp del: fun_upd_apply) apply (ctac add: vcpuUpdate_vcpuRegs_ccorres) apply wpsimp apply (vcg exspec=vcpu_hw_read_reg_modifies) @@ -1683,11 +1684,11 @@ lemma vcpu_restore_reg_ccorres: "ccorres dc xfdc (vcpu_at' vcpuptr) (UNIV \ \unat \reg = fromEnum r\ \ \ \vcpu = vcpu_Ptr vcpuptr \) hs (vcpuRestoreReg vcpuptr r) (Call vcpu_restore_reg_'proc)" - supply dc_simp[simp del] Collect_const[simp del] + supply Collect_const[simp del] apply (cinit lift: reg_' vcpu_') apply (rule ccorres_assert2) apply (rule ccorres_cond_false_seq, simp) - apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu, simp)+ + apply (rule ccorres_move_const_guard ccorres_move_c_guard_vcpu)+ apply (rule ccorres_pre_getObject_vcpu, rename_tac vcpu) apply (ctac add: vcpu_hw_write_reg_ccorres) apply (frule maxBound_is_bound') @@ -1733,7 +1734,6 @@ lemma vcpu_restore_reg_range_ccorres: apply (rule ccorres_grab_asm) apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) apply csymbr - apply (clarsimp, fold dc_def) apply (rule ccorres_dc_from_rrel) (* supplying these as dest/intro/simp to proof tactics has no desired effect *) using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] @@ -1770,7 +1770,6 @@ lemma vcpu_save_reg_range_ccorres: apply (rule ccorres_grab_asm) apply (cinit lift: start_' end_' vcpu_' simp: whileAnno_def) apply csymbr - apply (clarsimp, fold dc_def) apply (rule ccorres_dc_from_rrel) (* supplying these as dest/intro/simp to proof tactics has no desired effect *) using maxBound_is_bound[of start, simplified fromEnum_maxBound_vcpureg_def] @@ -1912,7 +1911,6 @@ lemma restore_virt_timer_ccorres: apply (rule ccorres_call) apply (rule_tac P="obj_at' (\vcpu'. vcpuVPPIMasked vcpu' vppievent_irq.VPPIEventIRQ_VTimer = vcpuVPPIMasked vcpu vppievent_irq.VPPIEventIRQ_VTimer) vcpuptr" in ccorres_cross_over_guard) - apply (fold dc_def) apply (rule maskInterrupt_ccorres, simp) apply simp apply simp @@ -1994,7 +1992,6 @@ lemma save_virt_timer_ccorres: apply (ctac (no_vcg) add: vcpu_write_reg_ccorres) apply (ctac (no_vcg) add: read_cntpct_ccorres) apply clarsimp - apply (fold dc_def) apply (rule vcpuUpdate_vTimer_pcount_ccorres) apply wpsimp+ apply (simp add: vcpureg_eq_use_types[where reg=VCPURegCNTV_CVALhigh, simplified, symmetric] @@ -2049,10 +2046,9 @@ lemma vcpu_disable_ccorres: apply (ctac (no_vcg) add: isb_ccorres) apply (ctac (no_vcg) add: setSCTLR_ccorres) apply (ctac (no_vcg) add: setHCR_ccorres) - apply (ctac (no_vcg) add: isb_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: isb_ccorres) apply (wpc; ccorres_rewrite) - apply (rule ccorres_return_Skip[simplified dc_def]) - apply (fold dc_def) + apply (rule ccorres_return_Skip) apply (rename_tac vcpu_ptr) apply (rule_tac P="the v \ 0" in ccorres_gen_asm) apply ccorres_rewrite @@ -2080,9 +2076,9 @@ lemma vcpu_enable_ccorres: apply (ctac (no_vcg) add: setHCR_ccorres) apply (ctac (no_vcg) add: isb_ccorres) apply (rule_tac P="ko_at' vcpu v" in ccorres_cross_over_guard) - apply (ctac pre: ccorres_move_c_guard_vcpu add: set_gic_vcpu_ctrl_hcr_ccorres[unfolded dc_def]) + apply (ctac pre: ccorres_move_c_guard_vcpu add: set_gic_vcpu_ctrl_hcr_ccorres) apply wpsimp+ - apply (fold dc_def, ctac (no_vcg) add: restore_virt_timer_ccorres) + apply (ctac (no_vcg) add: restore_virt_timer_ccorres) apply simp apply wpsimp apply (vcg exspec=set_gic_vcpu_ctrl_hcr_modifies) @@ -2126,7 +2122,7 @@ lemma ccorres_abstract_known: done lemma vcpu_restore_ccorres: - notes upt_Suc[simp del] dc_simp[simp del] Collect_const[simp del] + notes upt_Suc[simp del] Collect_const[simp del] shows "ccorres dc xfdc (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' @@ -2164,7 +2160,7 @@ lemma vcpu_restore_ccorres: apply (rule_tac P="n \ 63" in ccorres_gen_asm) apply (rule ccorres_move_c_guard_vcpu) apply (ctac (no_vcg) add: set_gic_vcpu_ctrl_lr_ccorres) - apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D dc_def upt_Suc) + apply (clarsimp simp: virq_to_H_def ko_at_vcpu_at'D upt_Suc) apply (rule conjI[rotated]) subgoal (* FIXME extract into separate lemma *) by (fastforce simp: word_less_nat_alt unat_of_nat_eq elim: order_less_le_trans) @@ -2182,7 +2178,7 @@ lemma vcpu_restore_ccorres: apply wpsimp apply (vcg exspec=vcpu_restore_reg_range_modifies) apply (wpsimp wp: crunch_wps) - apply (wpsimp simp: guard_is_UNIV_def dc_def upt_Suc ko_at_vcpu_at'D wp: mapM_x_wp_inv + apply (wpsimp simp: guard_is_UNIV_def upt_Suc ko_at_vcpu_at'D wp: mapM_x_wp_inv | rule UNIV_I | wp hoare_vcg_imp_lift hoare_vcg_all_lift hoare_vcg_disj_lift)+ apply (fastforce simp: fromEnum_def enum_vcpureg seL4_VCPUReg_SPSRfiq_def) @@ -2259,7 +2255,7 @@ lemma vgicUpdateLR_ccorres: done lemma vcpu_save_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres dc xfdc (pspace_aligned' and pspace_distinct' and valid_objs' and no_0_obj' and valid_arch_state' @@ -2296,7 +2292,7 @@ lemma vcpu_save_ccorres: apply (rule ccorres_move_c_guard_vcpu) apply clarsimp apply (ctac (no_vcg) add: vgicUpdate_APR_ccorres) - apply (ctac (no_vcg) add: ccorres_gets_armKSGICVCPUNumListRegs[simplified comp_def]) + apply (ctac (no_vcg) add: ccorres_gets_armKSGICVCPUNumListRegs) apply (rename_tac lr_num lr_num') apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) @@ -2325,7 +2321,7 @@ lemma vcpu_save_ccorres: apply ceqv apply (ctac (no_vcg) add: armv_vcpu_save_ccorres) apply (wpsimp simp: guard_is_UNIV_def wp: mapM_x_wp_inv)+ - apply (simp add: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def dc_def) + apply (simp add: invs_no_cicd'_def valid_arch_state'_def max_armKSGICVCPUNumListRegs_def) done lemma vcpu_switch_ccorres_None: @@ -2342,7 +2338,7 @@ lemma vcpu_switch_ccorres_None: apply wpc (* v = None & CurVCPU = None *) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) (* v = None & CurVCPU \ None *) apply ccorres_rewrite apply wpc @@ -2352,7 +2348,7 @@ lemma vcpu_switch_ccorres_None: apply (rule_tac R="\s. armHSCurVCPU (ksArchState s) = Some (ccurv, cactive)" in ccorres_cond) apply (clarsimp simp: cur_vcpu_relation_def dest!: rf_sr_ksArchState_armHSCurVCPU) apply (ctac add: vcpu_disable_ccorres) - apply (rule_tac v=x2 in armHSCurVCPU_update_active_ccorres[simplified dc_def]) + apply (rule_tac v=x2 in armHSCurVCPU_update_active_ccorres) apply simp apply simp apply wp @@ -2360,7 +2356,7 @@ lemma vcpu_switch_ccorres_None: apply assumption apply clarsimp apply (vcg exspec=vcpu_disable_modifies) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp, rule conjI) apply (fastforce dest: invs_cicd_arch_state' simp: valid_arch_state'_def vcpu_at_is_vcpu' ko_wp_at'_def split: option.splits) by (auto dest!: rf_sr_ksArchState_armHSCurVCPU simp: cur_vcpu_relation_def)+ @@ -2384,7 +2380,7 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_cond_false_seq) apply ccorres_rewrite apply (ctac add: vcpu_restore_ccorres) - apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres[unfolded dc_def]) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) apply wp apply clarsimp apply (vcg exspec=vcpu_restore_modifies) @@ -2401,7 +2397,7 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_cond_true_seq) apply (ctac add: vcpu_save_ccorres) apply (ctac add: vcpu_restore_ccorres) - apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres[unfolded dc_def]) + apply (rule_tac curv="Some (v, True)" in armHSCurVCPU_update_ccorres) apply wp apply clarsimp apply (vcg exspec=vcpu_restore_modifies) @@ -2419,13 +2415,13 @@ lemma vcpu_switch_ccorres_Some: apply (rule ccorres_rhs_assoc) apply (ctac (no_vcg) add: isb_ccorres) apply (ctac (no_vcg) add: vcpu_enable_ccorres) - apply (rule_tac v="(v, cactive)" in armHSCurVCPU_update_active_ccorres[simplified dc_def]) + apply (rule_tac v="(v, cactive)" in armHSCurVCPU_update_active_ccorres) apply simp apply simp apply wp apply (wpsimp wp: hoare_vcg_conj_lift vcpuSave_invs_no_cicd' vcpuSave_typ_at') (* ccactive =true *) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) (* last goal *) apply simp apply (rule conjI @@ -2466,11 +2462,11 @@ lemma setVMRoot_ccorres: apply (simp add: cap_case_isPageDirectoryCap cong: if_cong) apply (rule ccorres_cond_true_seq) apply (rule ccorres_rhs_assoc) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) @@ -2490,11 +2486,11 @@ lemma setVMRoot_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: cap_case_isPageDirectoryCap) @@ -2518,31 +2514,31 @@ lemma setVMRoot_ccorres: apply (simp add: whenE_def throwError_def checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (simp add: whenE_def returnOk_def) - apply (ctac (no_vcg) add: armv_contextSwitch_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: armv_contextSwitch_ccorres) apply (rename_tac tcb) apply simp apply clarsimp apply (simp add: checkPDNotInASIDMap_def checkPDASIDMapMembership_def) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc)+ - apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_armUSGlobalPD_ksArchState) apply (rule ccorres_h_t_valid_armUSGlobalPD) apply csymbr apply (rule ccorres_add_return2) apply (ctac(no_vcg) add: setCurrentPD_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply simp @@ -2588,9 +2584,9 @@ lemma setVMRootForFlush_ccorres: del: Collect_const) apply (rule ccorres_if_lhs) apply (rule_tac P="(capPDIsMapped_CL (cap_page_directory_cap_lift threadRoot) = 0) - = (capPDMappedASID (capCap rva) = None) + = (capPDMappedASID (capCap rv) = None) \ capPDBasePtr_CL (cap_page_directory_cap_lift threadRoot) - = capPDBasePtr (capCap rva)" in ccorres_gen_asm2) + = capPDBasePtr (capCap rv)" in ccorres_gen_asm2) apply (rule ccorres_rhs_assoc | csymbr | simp add: Collect_True del: Collect_const)+ apply (rule ccorres_split_throws) apply (rule ccorres_return_C, simp+) @@ -2701,7 +2697,7 @@ lemma doFlush_ccorres: apply (rule_tac xf'=invLabel___int_' in ccorres_abstract, ceqv, rename_tac invlabel) apply (rule_tac P="flushtype_relation t invlabel" in ccorres_gen_asm2) apply (simp only: dmo_flushtype_case Let_def) - apply (wpc ; simp add: dc_def[symmetric]) + apply (wpc ; simp) apply (rule ccorres_cond_true) apply (ctac (no_vcg) add: cleanCacheRange_RAM_ccorres) apply (rule ccorres_cond_false) @@ -2719,7 +2715,6 @@ lemma doFlush_ccorres: empty_fail_invalidateCacheRange_I empty_fail_branchFlushRange empty_fail_isb doMachineOp_bind empty_fail_cond) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def) apply (ctac (no_vcg) add: cleanCacheRange_PoU_ccorres) apply (ctac (no_vcg) add: dsb_ccorres) apply (ctac (no_vcg) add: invalidateCacheRange_I_ccorres) @@ -2777,7 +2772,7 @@ lemma performPageFlush_ccorres: apply (rule ccorres_return_Skip) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply wpsimp apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: order_less_imp_le) @@ -2806,12 +2801,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -2842,8 +2837,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg = val\ @@ -2860,7 +2853,7 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) @@ -3004,7 +2997,6 @@ lemma flushPage_ccorres: apply (rule ccorres_cond2[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (fold dc_def) apply (ctac add: setVMRoot_ccorres) apply (rule ccorres_return_Skip) apply (wp | simp add: cur_tcb'_def[symmetric])+ @@ -3267,8 +3259,7 @@ lemma unmapPage_ccorres: (unmapPage sz asid vptr pptr) (Call unmapPage_'proc)" apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid_' vptr_' pptr_') - apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive - Collect_True + apply (simp add: ignoreFailure_liftM ptr_add_assertion_positive Collect_True del: Collect_const) apply ccorres_remove_UNIV_guard apply csymbr @@ -3280,12 +3271,12 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \ARMSmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) @@ -3300,7 +3291,7 @@ lemma unmapPage_ccorres: split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] + Collect_const[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr @@ -3308,8 +3299,7 @@ lemma unmapPage_ccorres: apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply csymbr - apply simp - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (simp add: guard_is_UNIV_def) apply wp @@ -3323,18 +3313,17 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \ARMLargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric]) + apply (simp add: gen_framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac ptSlot lookupPTSlot_ret) - apply (simp add: Collect_False dc_def[symmetric] del: Collect_const) + apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, - rule checkMappingPPtr_pte_ccorres) + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pte_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') subgoal by (simp add: cpte_relation_def Let_def pte_lift_def @@ -3342,7 +3331,7 @@ lemma unmapPage_ccorres: pte_pte_small_lift_def split: if_split_asm pte.split_asm) apply (rule ceqv_refl) - apply (simp add: liftE_liftM dc_def[symmetric] + apply (simp add: liftE_liftM mapM_discarded whileAnno_def ARMLargePageBits_def ARMSmallPageBits_def Collect_False unfold_checkMapping_return word_sle_def del: Collect_const) @@ -3376,7 +3365,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pte ccorres_move_array_assertion_pte_16)+ apply (rule ccorres_add_return2, - ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pte_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned ptSlot 7" in hoare_gen_asm) @@ -3419,15 +3408,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_Cond_rhs) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM + apply (simp add: gen_framesize_to_H_def liftE_liftM del: Collect_const) apply (simp split: if_split, rule conjI[rotated], rule impI, rule ccorres_empty, rule impI) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) - apply (rule ccorres_splitE_novcg, simp only: inl_rrel_inl_rrel, + apply (rule ccorres_splitE_novcg, simp, rule checkMappingPPtr_pde_ccorres) apply (rule conseqPre, vcg) apply (clarsimp simp: typ_heap_simps') @@ -3435,16 +3423,16 @@ lemma unmapPage_ccorres: Let_def pde_tag_defs isSectionPDE_def split: pde.split_asm if_split_asm) apply (rule ceqv_refl) - apply (simp add: unfold_checkMapping_return Collect_False dc_def[symmetric] - del: Collect_const) - apply (rule ccorres_handlers_weaken2, simp) + apply (simp add: unfold_checkMapping_return Collect_False + del: Collect_const) + apply (rule ccorres_handlers_weaken2) apply csymbr apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def pde_lift_pde_invalid) apply csymbr - apply (ctac add: cleanByVA_PoU_ccorres[unfolded dc_def]) + apply (ctac add: cleanByVA_PoU_ccorres) apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp @@ -3459,8 +3447,7 @@ lemma unmapPage_ccorres: apply (case_tac "pd = pde_Ptr (lookup_pd_slot pdPtr vptr)") prefer 2 apply (simp, rule ccorres_empty) - apply (simp add: gen_framesize_to_H_def dc_def[symmetric] - liftE_liftM mapM_discarded whileAnno_def + apply (simp add: gen_framesize_to_H_def liftE_liftM mapM_discarded whileAnno_def del: Collect_const) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, @@ -3510,7 +3497,7 @@ lemma unmapPage_ccorres: apply csymbr apply (rule ccorres_move_c_guard_pde ccorres_move_array_assertion_pde_16)+ apply (rule ccorres_add_return2) - apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: cleanCacheRange_PoU_ccorres) apply (rule ccorres_move_array_assertion_pde_16, rule ccorres_return_Skip') apply wp apply (rule_tac P="is_aligned pdPtr pdBits" in hoare_gen_asm) @@ -3548,14 +3535,14 @@ lemma unmapPage_ccorres: apply (rule ccorres_empty[where P=\]) apply ceqv apply (simp add: liftE_liftM) - apply (ctac add: flushPage_ccorres[unfolded dc_def]) + apply (ctac add: flushPage_ccorres) apply ((wp lookupPTSlot_inv mapM_storePTE_invs[unfolded swp_def] mapM_storePDE_invs[unfolded swp_def] | wpc | simp)+)[1] apply (simp add: guard_is_UNIV_def) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply (simp add: lookup_pd_slot_def Let_def table_bits_defs) apply (wp hoare_vcg_const_imp_lift_R findPDForASID_valid_offset'[simplified table_bits_defs] @@ -4061,13 +4048,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPDCap rv \ asid \ mask asid_bits) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPDCap oldcap \ asid \ mask asid_bits) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -4135,7 +4122,7 @@ lemma performASIDPoolInvocation_ccorres: apply (wp getASID_wp) apply simp apply wp - apply (simp add: o_def inv_def) + apply (simp add: inv_def) apply (wp getASID_wp) apply simp apply (rule empty_fail_getObject) @@ -4196,14 +4183,14 @@ lemma flushTable_ccorres: apply (rule_tac R=\ in ccorres_cond2) apply (clarsimp simp: from_bool_0 Collect_const_mem) apply (rule ccorres_pre_getCurThread) - apply (ctac (no_vcg) add: setVMRoot_ccorres [unfolded dc_def]) - apply (rule ccorres_return_Skip[unfolded dc_def]) - apply (wp static_imp_wp) + apply (ctac (no_vcg) add: setVMRoot_ccorres) + apply (rule ccorres_return_Skip) + apply (wp hoare_weak_lift_imp) apply clarsimp apply (rule_tac Q="\_ s. invs' s \ cur_tcb' s" in hoare_post_imp) apply (simp add: invs'_invs_no_cicd cur_tcb'_def) apply (wp mapM_x_wp_inv getPTE_wp | wpc)+ - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply wp apply clarsimp apply (strengthen invs_valid_pde_mappings') diff --git a/proof/crefine/Move_C.thy b/proof/crefine/Move_C.thy index 6d98067203..80b4b52fae 100644 --- a/proof/crefine/Move_C.thy +++ b/proof/crefine/Move_C.thy @@ -566,7 +566,7 @@ lemma map_to_ko_at_updI': \ (projectKO_opt \\<^sub>m (ksPSpace s)) x = Some y; valid_pspace' s; ko_at' y' x' s; objBitsKO (injectKO y') = objBitsKO y''; x \ x' \ \ - ko_at' y x (s\ksPSpace := ksPSpace s(x' \ y'')\)" + ko_at' y x (s\ksPSpace := (ksPSpace s)(x' \ y'')\)" by (fastforce simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd dest: map_to_ko_atI) @@ -677,7 +677,7 @@ lemma asUser_mapM_x: apply (rule bind_apply_cong [OF refl])+ apply (clarsimp simp: in_monad dest!: fst_stateAssertD) apply (drule use_valid, rule mapM_wp', rule asUser.typ_at_lifts_all', assumption) - apply (simp add: stateAssert_def get_def NonDetMonad.bind_def) + apply (simp add: stateAssert_def get_def Nondet_Monad.bind_def) done lemma asUser_threadGet_tcbFault_comm: @@ -877,7 +877,7 @@ lemma cteDeleteOne_sch_act_wf: apply (simp add: cteDeleteOne_def unless_when split_def) apply (simp add: finaliseCapTrue_standin_def Let_def) apply (wpsimp wp: isFinalCapability_inv cancelAllSignals_sch_act_wf - cancelAllIPC_sch_act_wf getCTE_wp' static_imp_wp weak_if_wp' + cancelAllIPC_sch_act_wf getCTE_wp' hoare_weak_lift_imp weak_if_wp' simp: Let_def) done @@ -899,7 +899,7 @@ lemmas setNotification_tcb = set_ntfn'.obj_at_tcb' lemma state_refs_of'_upd: "\ valid_pspace' s; ko_wp_at' (\ko. objBitsKO ko = objBitsKO ko') ptr s \ \ - state_refs_of' (s\ksPSpace := ksPSpace s(ptr \ ko')\) = + state_refs_of' (s\ksPSpace := (ksPSpace s)(ptr \ ko')\) = (state_refs_of' s)(ptr := refs_of' ko')" apply (rule ext) apply (clarsimp simp: ps_clear_upd valid_pspace'_def pspace_aligned'_def @@ -1372,7 +1372,7 @@ lemma asUser_obj_at': lemma update_ep_map_to_ctes: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + shows "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm) diff --git a/proof/crefine/RISCV64/ADT_C.thy b/proof/crefine/RISCV64/ADT_C.thy index cc33bc2da2..ad14a9ad6a 100644 --- a/proof/crefine/RISCV64/ADT_C.thy +++ b/proof/crefine/RISCV64/ADT_C.thy @@ -86,7 +86,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption diff --git a/proof/crefine/RISCV64/ArchMove_C.thy b/proof/crefine/RISCV64/ArchMove_C.thy index ee98d8cf11..8c0e7ce8e6 100644 --- a/proof/crefine/RISCV64/ArchMove_C.thy +++ b/proof/crefine/RISCV64/ArchMove_C.thy @@ -339,8 +339,7 @@ lemma asid_shiftr_low_bits_less[simplified]: lemma getActiveIRQ_neq_Some0x3FF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_Some0x3FF: diff --git a/proof/crefine/RISCV64/Arch_C.thy b/proof/crefine/RISCV64/Arch_C.thy index d009641274..e657057cd0 100644 --- a/proof/crefine/RISCV64/Arch_C.thy +++ b/proof/crefine/RISCV64/Arch_C.thy @@ -73,7 +73,7 @@ using [[goals_limit=20]] apply (ctac add: unmapPageTable_ccorres) apply (simp add: storePTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply(simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PTE_ccorres[simplified objBits_InvalidPTE_pte_bits]) apply wp apply (simp del: Collect_const) @@ -448,7 +448,7 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rule_tac P = "rv = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -671,7 +671,7 @@ lemma liftME_option_catch_bind: apply (rule ext) apply (clarsimp simp: return_def) apply (case_tac "m s", clarsimp) - apply (auto simp: split_def throwError_def return_def NonDetMonad.lift_def + apply (auto simp: split_def throwError_def return_def Nondet_Monad.lift_def split: prod.splits sum.splits) done @@ -967,7 +967,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (solves \clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_IsMapped\) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -980,7 +980,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (fold not_None_def) (* avoid expanding capPTMappedAddress *) apply clarsimp apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: lookupError_injection invocationCatch_use_injection_handler injection_bindE[OF refl refl] injection_handler_If bindE_assoc @@ -992,7 +992,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves\clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_BasePtr\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: bindE_assoc) apply (ctac pre: ccorres_liftE_Seq add: lookupPTSlot_ccorres) @@ -1022,7 +1022,7 @@ lemma decodeRISCVPageTableInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks are done, move on to doing the mapping *) apply (clarsimp simp: injection_handler_returnOk) @@ -1324,7 +1324,7 @@ lemma performPageInvocationMapPTE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart) @@ -1351,7 +1351,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -1374,7 +1374,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -1387,10 +1387,10 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def Kernel_C.a1_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg @@ -1779,7 +1779,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (solves \clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_IsMapped\) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -1796,7 +1796,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves\clarsimp simp: asidInvalid_def isCap_simps ccap_relation_PageTableCap_BasePtr\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: bindE_assoc) (* check vaddr is valid *) @@ -1808,7 +1808,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves \clarsimp simp: pptrUserTop_def' p_assoc_help\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* check vaddr alignment *) apply (clarsimp simp: checkVPAlignment_def unlessE_def injection_handler_If @@ -1819,7 +1819,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves \clarsimp simp: vmsz_aligned_def from_bool_0 is_aligned_mask\) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* lookup pt slot *) @@ -1831,7 +1831,6 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rename_tac ptSlot ptSlot_ret) apply wpfix apply (rule_tac P="unat (ptBitsLeft_C ptSlot_ret) < 64" in ccorres_gen_asm) - apply (fold dc_def id_def) apply (rule ccorres_if_lhs[rotated]) (* throwing a lookup fault, branch condition on C side is true *) apply (prop_tac "ptBitsLeft_C ptSlot_ret @@ -1844,7 +1843,7 @@ lemma decodeRISCVFrameInvocation_ccorres: lookup_fault_missing_capability_new_'proc *) apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def exception_defs lookup_fault_lift_invalid_root) apply (clarsimp simp: syscall_error_rel_def exception_defs syscall_error_to_H_def syscall_error_type_defs) @@ -1892,7 +1891,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks handled, perform frame map *) @@ -1959,7 +1958,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (clarsimp simp: isCap_simps not_None_def ccap_relation_FrameCap_MappedAddress ccap_relation_PageTableCap_MappedASID ccap_relation_FrameCap_MappedASID) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* ensure mapped address of frame matches *) apply csymbr @@ -1968,7 +1967,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* ensure lookupPTSlot returned a slot with a PTE *) @@ -1991,7 +1990,7 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (rule ccorres_if_cond_throws2[rotated -1, where Q=\ and Q'=\]) apply vcg apply (solves clarsimp) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* checks handled, perform frame remap *) @@ -2185,7 +2184,6 @@ lemma decodeRISCVFrameInvocation_ccorres: apply (prop_tac "(addrFromPPtr p >> 12) AND mask 44 = (addrFromPPtr p >> 12)") subgoal apply (frule cte_wp_at'_frame_at', fastforce) - apply (clarsimp simp: comp_def) apply (prop_tac "canonical_address p") apply (erule canonical_address_frame_at', fastforce) apply (prop_tac "p \ kernel_mappings") @@ -2852,7 +2850,7 @@ lemma decodeRISCVMMUInvocation_ccorres: (* Can't reach *) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (cases cp; simp add: isCap_simps) - apply (clarsimp simp: o_def) + apply clarsimp apply (rule conjI) (* PTCap *) apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule_tac t="cteCap cte" in sym) diff --git a/proof/crefine/RISCV64/CSpaceAcc_C.thy b/proof/crefine/RISCV64/CSpaceAcc_C.thy index 1bf605bbda..d177cad8d1 100644 --- a/proof/crefine/RISCV64/CSpaceAcc_C.thy +++ b/proof/crefine/RISCV64/CSpaceAcc_C.thy @@ -270,7 +270,7 @@ lemma array_assertion_abs_cnode_ctes: apply (metis array_assertion_shrink_right) done -lemmas ccorres_move_array_assertion_cnode_ctes [corres_pre] +lemmas ccorres_move_array_assertion_cnode_ctes [ccorres_pre] = ccorres_move_Guard_Seq [OF array_assertion_abs_cnode_ctes] ccorres_move_Guard [OF array_assertion_abs_cnode_ctes] diff --git a/proof/crefine/RISCV64/CSpace_All.thy b/proof/crefine/RISCV64/CSpace_All.thy index f7e6f8f286..5054835fd9 100644 --- a/proof/crefine/RISCV64/CSpace_All.thy +++ b/proof/crefine/RISCV64/CSpace_All.thy @@ -25,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf diff --git a/proof/crefine/RISCV64/CSpace_C.thy b/proof/crefine/RISCV64/CSpace_C.thy index 084a292f9d..a478e93861 100644 --- a/proof/crefine/RISCV64/CSpace_C.thy +++ b/proof/crefine/RISCV64/CSpace_C.thy @@ -767,7 +767,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v64_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -889,7 +889,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -915,14 +915,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1038,19 +1038,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -2232,7 +2230,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2243,9 +2240,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2294,7 +2291,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2339,7 +2336,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2412,8 +2409,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2422,7 +2419,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ diff --git a/proof/crefine/RISCV64/CSpace_RAB_C.thy b/proof/crefine/RISCV64/CSpace_RAB_C.thy index 306d2a78fc..c4c5564905 100644 --- a/proof/crefine/RISCV64/CSpace_RAB_C.thy +++ b/proof/crefine/RISCV64/CSpace_RAB_C.thy @@ -54,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -208,10 +208,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned_longlong = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer diff --git a/proof/crefine/RISCV64/Ctac_lemmas_C.thy b/proof/crefine/RISCV64/Ctac_lemmas_C.thy index a1f8a57874..1cf990d2fb 100644 --- a/proof/crefine/RISCV64/Ctac_lemmas_C.thy +++ b/proof/crefine/RISCV64/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -95,7 +95,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -118,7 +118,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/RISCV64/Delete_C.thy b/proof/crefine/RISCV64/Delete_C.thy index 7d67c3050a..71b3ae44ae 100644 --- a/proof/crefine/RISCV64/Delete_C.thy +++ b/proof/crefine/RISCV64/Delete_C.thy @@ -856,7 +856,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -878,7 +878,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -904,7 +904,7 @@ lemma finaliseSlot_ccorres: apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -999,25 +999,22 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR; clarsimp simp: updateTimeStamp_independent_def sch_act_simple_def)+ apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) diff --git a/proof/crefine/RISCV64/Detype_C.thy b/proof/crefine/RISCV64/Detype_C.thy index cdddae440c..94abcbc8de 100644 --- a/proof/crefine/RISCV64/Detype_C.thy +++ b/proof/crefine/RISCV64/Detype_C.thy @@ -1529,7 +1529,7 @@ lemma deleteObjects_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - diff --git a/proof/crefine/RISCV64/Finalise_C.thy b/proof/crefine/RISCV64/Finalise_C.thy index 2cab35302d..13e803a8d8 100644 --- a/proof/crefine/RISCV64/Finalise_C.thy +++ b/proof/crefine/RISCV64/Finalise_C.thy @@ -240,8 +240,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -250,7 +249,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -320,10 +319,10 @@ lemma cancelAllIPC_ccorres: sorry (* FIXME RT: cancelAllIPC_ccorres *) (* apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -332,8 +331,8 @@ sorry (* FIXME RT: cancelAllIPC_ccorres *) (* apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -367,7 +366,7 @@ sorry (* FIXME RT: cancelAllIPC_ccorres *) (* subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -384,12 +383,10 @@ sorry (* FIXME RT: cancelAllIPC_ccorres *) (* apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -418,7 +415,7 @@ sorry (* FIXME RT: cancelAllIPC_ccorres *) (* subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -452,10 +449,10 @@ lemma cancelAllSignals_ccorres: sorry (* FIXME RT: cancelAllSignals_ccorres *) (* apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -464,18 +461,15 @@ sorry (* FIXME RT: cancelAllSignals_ccorres *) (* apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -483,8 +477,8 @@ sorry (* FIXME RT: cancelAllSignals_ccorres *) (* apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -502,7 +496,7 @@ sorry (* FIXME RT: cancelAllSignals_ccorres *) (* subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -729,8 +723,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and (\s. sym_refs (state_refs_of' s)) and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and (\s. sym_refs (state_refs_of' s)) and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -749,7 +743,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal sorry (* FIXME RT: refill_buffer_relation *) subgoal by (simp add: carch_state_relation_def) subgoal by (simp add: cmachine_state_relation_def) @@ -761,7 +755,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -812,7 +806,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -846,9 +840,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -866,13 +860,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -1052,7 +1046,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] when_def) + apply (clarsimp simp: liftM_def when_def) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_riscvKSASIDTable_ksArchState) apply (rule_tac R="\s. rv = riscvKSASIDTable (ksArchState s)" in ccorres_cond2) @@ -1109,7 +1103,7 @@ lemma deleteASID_ccorres: apply (rule ccorres_from_vcg[where P="\" and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (simp add: return_def) - apply (clarsimp simp: dc_def[symmetric] when_def liftM_def + apply (clarsimp simp: when_def liftM_def cong: conj_cong call_ignore_cong) apply (rename_tac asidTable ap) apply csymbr @@ -1183,7 +1177,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val'::'a. \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1379,7 +1373,6 @@ next apply (rule ccorres_checkPTAt) apply (rule ccorres_symb_exec_r2) apply (rule ccorres_symb_exec_r2) - apply (fold dc_def)[1] apply (rule Suc.hyps[unfolded whileAnno_def]) using level apply simp apply vcg @@ -1468,12 +1461,6 @@ lemma no_0_page_table_at'[elim!]: apply (drule spec[where x=0], clarsimp simp: bit_simps) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1570,7 +1557,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned_longlong = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1581,12 +1568,12 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) @@ -1622,7 +1609,7 @@ lemma deletingIRQHandler_ccorres: ({s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1705,7 +1692,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: map_comp_def split: if_split) lemma ep_queue_relation_shift2: @@ -1735,7 +1722,7 @@ lemma sched_queue_relation_shift: lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1746,7 +1733,7 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1779,7 +1766,7 @@ lemma ccap_relation_capFMappedASID_CL_0: done lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] if_weak_cong[cong] + notes Collect_const[simp del] if_weak_cong[cong] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -1992,7 +1979,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (rule ccorres_return_Skip) apply fastforce @@ -2178,18 +2164,18 @@ sorry (* FIXME RT: finaliseCap_ccorres *) (* apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) diff --git a/proof/crefine/RISCV64/Interrupt_C.thy b/proof/crefine/RISCV64/Interrupt_C.thy index fb6288ba21..431268fa7e 100644 --- a/proof/crefine/RISCV64/Interrupt_C.thy +++ b/proof/crefine/RISCV64/Interrupt_C.thy @@ -75,7 +75,7 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp @@ -112,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply (simp add: ucast_up_ucast is_up) apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -346,7 +346,7 @@ lemma invokeIRQControl_ccorres: (performIRQControl (Invocations_H.irqcontrol_invocation.IssueIRQHandler irq slot parent)) (Call invokeIRQControl_'proc)" by (clarsimp simp: performIRQControl_def liftE_def bind_assoc - intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def K_def, simplified]) + intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def, simplified]) lemma isIRQActive_ccorres: "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' @@ -627,7 +627,7 @@ lemma Arch_decodeIRQControlInvocation_ccorres: apply (vcg exspec=getSyscallArg_modifies) apply ccorres_rewrite apply (auto split: invocation_label.split arch_invocation_label.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases invocation_eq_use_types)[1] apply clarsimp apply (clarsimp simp: interpret_excaps_test_null excaps_map_def diff --git a/proof/crefine/RISCV64/Invoke_C.thy b/proof/crefine/RISCV64/Invoke_C.thy index d7a29e6bfd..00d3b337a7 100644 --- a/proof/crefine/RISCV64/Invoke_C.thy +++ b/proof/crefine/RISCV64/Invoke_C.thy @@ -66,10 +66,10 @@ lemma setDomain_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) apply (simp add: when_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -90,7 +90,7 @@ lemma setDomain_ccorres: apply (wpsimp wp: threadSet_tcbDomain_update_invs') apply fastforce apply (simp add: guard_is_UNIV_def) - apply (wp tcbSchedDequeue_not_in_queue hoare_vcg_all_lift) + apply (wp tcbSchedDequeue_not_in_queue hoare_vcg_all_lift) apply fastforce done @@ -404,7 +404,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -584,9 +584,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -669,8 +667,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -697,8 +694,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -776,8 +772,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -859,7 +854,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -878,8 +873,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1001,8 +995,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1047,12 +1040,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1112,8 +1103,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1175,8 +1165,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1184,8 +1173,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1200,7 +1188,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1266,16 +1254,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1290,7 +1278,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_lift_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1409,7 +1397,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1466,7 +1454,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1775,8 +1763,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2937,8 +2924,8 @@ lemma decodeUntypedInvocation_ccorres_helper: [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3161,8 +3148,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (rule ccorres_stateAssertE_fwd) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) @@ -3213,7 +3199,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply vcg apply (rule ccorres_guard_imp [where Q =\ and Q' = UNIV,rotated],assumption+) - apply (simp add: o_def) + apply simp apply (simp add: liftE_validE) apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3282,7 +3268,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) @@ -3416,7 +3402,7 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) diff --git a/proof/crefine/RISCV64/IpcCancel_C.thy b/proof/crefine/RISCV64/IpcCancel_C.thy index 43da9d71a9..f3843b4148 100644 --- a/proof/crefine/RISCV64/IpcCancel_C.thy +++ b/proof/crefine/RISCV64/IpcCancel_C.thy @@ -222,7 +222,7 @@ lemma cancelSignal_ccorres_helper: apply (drule (2) ntfn_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) - apply (frule null_ep_queue [simplified Fun.comp_def]) + apply (frule null_ep_queue [simplified comp_def]) apply (intro impI conjI allI) \ \empty case\ apply clarsimp @@ -1049,7 +1049,7 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) + apply simp apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1171,7 +1171,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1407,7 +1407,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1680,7 +1679,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1809,7 +1808,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1918,7 +1916,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -2307,7 +2305,6 @@ lemma possibleSwitchTo_ccorres: (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2333,7 +2330,7 @@ sorry (* FIXME RT: possibleSwitchTo_ccorres *) (* apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2606,7 +2603,7 @@ sorry (* FIXME RT: cancelSignal_ccorres *) (* | drule_tac x=thread in bspec)+ *) (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2749,7 +2746,7 @@ lemma cpspace_relation_ep_update_an_ep: and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat psp rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2772,8 +2769,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs sym_refs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2785,15 +2782,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "machine_word" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2872,7 +2869,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (3) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2890,7 +2887,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -3064,7 +3061,6 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply wpc \ \BlockedOnReceive\ apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3093,11 +3089,9 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr - apply (unfold comp_def)[1] apply csymbr apply (rule ccorres_move_c_guard_tcb)+ apply (rule ccorres_split_nothrow_novcg) @@ -3133,14 +3127,12 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3153,7 +3145,8 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3162,10 +3155,12 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3191,7 +3186,8 @@ sorry (* FIXME RT: cancelIPC_ccorres1 *) (* apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg diff --git a/proof/crefine/RISCV64/Ipc_C.thy b/proof/crefine/RISCV64/Ipc_C.thy index f730753abb..5733eefb4c 100644 --- a/proof/crefine/RISCV64/Ipc_C.thy +++ b/proof/crefine/RISCV64/Ipc_C.thy @@ -851,7 +851,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1708,7 +1708,7 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) + apply (rule ccorres_rel_imp[where r = dc, simplified]) apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) RISCV64_H.syscallMessage = msg) sender and valid_pspace' and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" @@ -1762,7 +1762,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (wp mapM_x_wp_inv setMR_atcbContext_obj_at[simplified atcbContextGet_def, simplified] @@ -1851,7 +1851,7 @@ proof - split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -2169,7 +2169,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def RISCV64.badgeRegister_def RISCV64.capRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -2207,7 +2207,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -3260,10 +3260,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3317,9 +3318,10 @@ proof - apply csymbr apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, rule ccorres_splitE_novcg) @@ -3328,7 +3330,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 8) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3338,7 +3340,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 5" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 5" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3359,7 +3361,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3400,9 +3402,8 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" - in ccorres_from_vcg_throws) + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def seL4_MsgExtraCapBits_def) @@ -3496,7 +3497,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge___unsigned_long_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3541,7 +3542,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def option_to_0_def option_to_ptr_def @@ -3605,7 +3606,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3674,12 +3674,10 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender @@ -3741,7 +3739,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3794,7 +3792,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3950,7 +3948,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=3, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3989,8 +3986,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -4002,7 +3999,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -4092,7 +4089,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -4138,7 +4135,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4233,7 +4230,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ sorry (* FIXME RT: cteDeleteOne_tcbFault. Now unused?*) (* apply (clarsimp split: if_split) @@ -4366,7 +4363,6 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) @@ -4390,7 +4386,6 @@ proof - fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4420,10 +4415,9 @@ proof - apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres_valid_queues'_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def @@ -4497,7 +4491,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4518,7 +4512,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4881,7 +4875,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4897,12 +4891,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5129,12 +5123,9 @@ sorry (* FIXME RT: sendIPC_ccorres *) (* apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not possibleSwitchTo_sch_act_not sts_st_tcb' @@ -5338,7 +5329,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5354,12 +5345,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5487,7 +5478,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5508,7 +5499,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5677,7 +5668,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) sorry (* FIXME RT: completeSignal_ccorres *) (* apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) @@ -5800,7 +5791,7 @@ sorry (* FIXME RT: receiveIPC_ccorres *) (* apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_longlong_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5830,20 +5821,18 @@ sorry (* FIXME RT: receiveIPC_ccorres *) (* apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5855,18 +5844,16 @@ sorry (* FIXME RT: receiveIPC_ccorres *) (* apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5944,8 +5931,6 @@ sorry (* FIXME RT: receiveIPC_ccorres *) (* split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) @@ -6025,12 +6010,12 @@ sorry (* FIXME RT: receiveIPC_ccorres *) (* apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -6059,11 +6044,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (3) ntfn_blocked_in_queueD) apply (frule (1) ntfn_ko_at_valid_objs_valid_ntfn'[OF _ invs_valid_objs']) apply (elim conjE) @@ -6083,7 +6067,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -6268,7 +6252,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6284,7 +6268,7 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) sorry (* FIXME RT: sendSignal_ccorres *) (* - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" @@ -6292,7 +6276,7 @@ sorry (* FIXME RT: sendSignal_ccorres *) (* apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def RISCV64_H.badgeRegister_def C_register_defs RISCV64.badgeRegister_def RISCV64.capRegister_def @@ -6348,7 +6332,7 @@ sorry (* FIXME RT: sendSignal_ccorres *) (* apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) @@ -6430,16 +6414,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "machine_word" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6499,7 +6484,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6515,13 +6500,14 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn) (ntfnSc ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn) (ntfnSc ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) subgoal sorry (* FIXME RT: sym_refs argument *) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn) (ntfnSc ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn) (ntfnSc ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn) (ntfnSc ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6688,12 +6674,11 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) sorry (* FIXME RT: receiveSignal_ccorres *) (* - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6704,7 +6689,7 @@ sorry (* FIXME RT: receiveSignal_ccorres *) (* apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6760,8 +6745,7 @@ sorry (* FIXME RT: receiveSignal_ccorres *) (* apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6773,7 +6757,7 @@ sorry (* FIXME RT: receiveSignal_ccorres *) (* apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/RISCV64/IsolatedThreadAction.thy b/proof/crefine/RISCV64/IsolatedThreadAction.thy index 1ef53157ec..c54ef19f82 100644 --- a/proof/crefine/RISCV64/IsolatedThreadAction.thy +++ b/proof/crefine/RISCV64/IsolatedThreadAction.thy @@ -447,7 +447,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -1106,8 +1106,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) diff --git a/proof/crefine/RISCV64/PSpace_C.thy b/proof/crefine/RISCV64/PSpace_C.thy index d170cbca4d..cf276db376 100644 --- a/proof/crefine/RISCV64/PSpace_C.thy +++ b/proof/crefine/RISCV64/PSpace_C.thy @@ -39,7 +39,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko ; \(v :: 'a) (v' :: 'a). objBits v = objBits v'\ diff --git a/proof/crefine/RISCV64/Recycle_C.thy b/proof/crefine/RISCV64/Recycle_C.thy index 43d49388fc..0f9d0c49ae 100644 --- a/proof/crefine/RISCV64/Recycle_C.thy +++ b/proof/crefine/RISCV64/Recycle_C.thy @@ -414,7 +414,7 @@ lemma mapM_x_store_memset_ccorres_assist: assumes constsize: "\v :: 'a. objBits v = objBitsT (koType TYPE('a))" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -695,8 +695,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -806,8 +806,8 @@ lemma cancelBadgedSends_ccorres: del: Collect_const) sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_longlong_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_longlong_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -817,22 +817,22 @@ sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -908,7 +908,7 @@ sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* subgoal by (simp add: mask_def canonical_bit_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -918,7 +918,7 @@ sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -943,9 +943,9 @@ sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -1014,9 +1014,9 @@ sorry (* FIXME RT: cancelBadgedSends_ccorres *) (* apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases sts_sch_act sts_valid_queues setThreadState_oa_queued) apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) diff --git a/proof/crefine/RISCV64/Refine_C.thy b/proof/crefine/RISCV64/Refine_C.thy index af8f07879f..ec3e162e09 100644 --- a/proof/crefine/RISCV64/Refine_C.thy +++ b/proof/crefine/RISCV64/Refine_C.thy @@ -456,7 +456,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -608,9 +608,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -639,7 +639,7 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def cteSizeBits_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state | simp add: tcb_cap_cases_def tcb_arch_ref_def | rule threadSet_ct_in_state' @@ -694,13 +694,13 @@ lemma entry_corres_C: apply simp apply (rule corres_split) (* FIXME: fastpath - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) *) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -802,7 +802,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -829,7 +829,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -897,7 +897,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -917,7 +917,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def diff --git a/proof/crefine/RISCV64/Retype_C.thy b/proof/crefine/RISCV64/Retype_C.thy index 8d579fca8d..1e09815965 100644 --- a/proof/crefine/RISCV64/Retype_C.thy +++ b/proof/crefine/RISCV64/Retype_C.thy @@ -1097,7 +1097,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -3110,8 +3110,7 @@ proof - apply (simp add: hrs_mem_def, subst rep0) apply (simp only: take_replicate, simp add: cte_C_size objBits_simps') apply (simp add: cte_C_size objBits_simps') - apply (simp add: fun_eq_iff o_def - split: if_split) + apply (simp add: fun_eq_iff split: if_split) apply (simp add: hrs_comm packed_heap_update_collapse typ_heap_simps) apply (subst clift_heap_update_same_td_name', simp_all, @@ -3784,7 +3783,7 @@ lemma mapM_x_storeWord_step: apply (subst if_not_P) apply (subst not_less) apply (erule is_aligned_no_overflow) - apply (simp add: mapM_x_map comp_def upto_enum_word del: upt.simps) + apply (simp add: mapM_x_map upto_enum_word del: upt.simps) apply (subst div_power_helper_64 [OF sz2, simplified]) apply assumption apply (simp add: word_bits_def unat_minus_one del: upt.simps) @@ -4359,12 +4358,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4397,12 +4394,10 @@ lemma ccorres_placeNewObject_notification: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ntfn [where ptr=regionBase and n="1" and sz="objBitsKO (KONotification makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps' - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps' + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -4462,11 +4457,10 @@ lemma ccorres_placeNewObject_captable: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac createObjects_ccorres_cte [where ptr=regionBase and n="2 ^ unat userSize" and sz="unat userSize + objBitsKO (KOCTE makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ - apply (clarsimp simp: split_def objBitsKO_def - Fun.comp_def rf_sr_def split_def Let_def cteSizeBits_def - new_cap_addrs_def field_simps power_add ptr_retyps_gen_def - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ + apply (clarsimp simp: split_def objBitsKO_def rf_sr_def split_def Let_def cteSizeBits_def + new_cap_addrs_def field_simps power_add ptr_retyps_gen_def + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv range_cover_def) apply (clarsimp simp: objBitsKO_def objBits_simps' range_cover.aligned) apply (clarsimp simp: no_fail_def) @@ -4523,8 +4517,8 @@ lemma Arch_initContext_spec': apply (rule allI, rule conseqPre) apply (rule hoarep.Catch[rotated], vcg) apply (rule conseqPost[where A'="{}" and Q'=Q and Q=Q for Q, simplified]) - apply ((vcg , - clarsimp simp: hrs_mem_update_compose h_val_id packed_heap_update_collapse o_def + apply ((vcg, + clarsimp simp: hrs_mem_update_compose h_val_id packed_heap_update_collapse array_updates_rev_app))+ apply (auto simp: h_val_heap_same_hrs_mem_update_typ_disj[OF h_t_valid_c_guard_field _ tag_disj_via_td_name] export_tag_adjust_ti typ_uinfo_t_def array_updates_rev @@ -4571,7 +4565,6 @@ proof - apply (rule ptr_retyp_h_t_valid) apply simp apply (rule tcb_ptr_orth_cte_ptrs') - apply (simp add: o_def) apply (intro conjI allI impI) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def kernel_data_refs_domain_eq_rotate) @@ -4629,11 +4622,11 @@ lemma placeNewObject_pte: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac s=\ in createObjects_ccorres_pte [where ptr=regionBase and sz=pageBits]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ apply (clarsimp simp: split_def objBitsKO_def archObjSize_def - Fun.comp_def rf_sr_def split_def Let_def ptr_retyps_gen_def - new_cap_addrs_def field_simps power_add - cong: globals.unfold_congs) + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) apply (simp add: Int_ac bit_simps) apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def bit_simps) apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def bit_simps) @@ -5185,7 +5178,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def split: option.splits if_splits) done @@ -5367,7 +5360,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply (vcg exspec=Arch_initContext_modifies) - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: createObject_hs_preconds_def @@ -5543,7 +5536,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -7315,7 +7308,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -7479,9 +7472,9 @@ sorry (* FIXME RT: createNewObjects_ccorres *) (* including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -7515,7 +7508,7 @@ sorry (* FIXME RT: createNewObjects_ccorres *) (* hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -7677,7 +7670,7 @@ sorry (* FIXME RT: createNewObjects_ccorres *) (* apply (simp add: o_def) apply (case_tac newType, simp_all add: object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + nAPIObjects_def APIType_capBits_def split:apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/RISCV64/SR_lemmas_C.thy b/proof/crefine/RISCV64/SR_lemmas_C.thy index 274e011894..cc77583b05 100644 --- a/proof/crefine/RISCV64/SR_lemmas_C.thy +++ b/proof/crefine/RISCV64/SR_lemmas_C.thy @@ -350,7 +350,7 @@ lemma tcb_cte_cases_proj_eq [simp]: (* NOTE: 5 = cte_level_bits *) lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done @@ -358,7 +358,7 @@ lemma map_to_ctes_upd_cte': lemma map_to_ctes_upd_tcb': "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = +==> map_to_ctes ((ksPSpace s)(p |-> KOTCB tcb)) = (%x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & getF tcb ~= getF tcb' @@ -465,7 +465,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_scs (ksPSpace s) = map_to_scs (ksPSpace s')); @@ -492,7 +492,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -684,7 +684,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1481,7 +1480,7 @@ lemma ctcb_relation_null_queue_ptrs: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1495,15 +1494,15 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_scs (ksPSpace s(p \ KONotification ko)) = map_to_scs (ksPSpace s)" - and "map_to_replies (ksPSpace s(p \ KONotification ko)) = map_to_replies (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_scs ((ksPSpace s)(p \ KONotification ko)) = map_to_scs (ksPSpace s)" + and "map_to_replies ((ksPSpace s)(p \ KONotification ko)) = map_to_replies (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1511,15 +1510,15 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_scs (ksPSpace s(p \ KOEndpoint ko)) = map_to_scs (ksPSpace s)" - and "map_to_replies (ksPSpace s(p \ KOEndpoint ko)) = map_to_replies (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_scs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_scs (ksPSpace s)" + and "map_to_replies ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_replies (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1527,14 +1526,14 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_scs (ksPSpace s(p \ KOTCB ko)) = map_to_scs (ksPSpace s)" - and "map_to_replies (ksPSpace s(p \ KOTCB ko)) = map_to_replies (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_scs ((ksPSpace s)(p \ KOTCB ko)) = map_to_scs (ksPSpace s)" + and "map_to_replies ((ksPSpace s)(p \ KOTCB ko)) = map_to_replies (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1542,16 +1541,15 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_scs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_scs (ksPSpace s)" - and "map_to_replies (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_replies (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" - + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_scs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_scs (ksPSpace s)" + and "map_to_replies ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_replies (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs @@ -1559,27 +1557,27 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_scs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_scs (ksPSpace s)" - and "map_to_replies (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_replies (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_scs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_scs (ksPSpace s)" + and "map_to_replies ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_replies (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split diff --git a/proof/crefine/RISCV64/Schedule_C.thy b/proof/crefine/RISCV64/Schedule_C.thy index a202b65db1..174c280acf 100644 --- a/proof/crefine/RISCV64/Schedule_C.thy +++ b/proof/crefine/RISCV64/Schedule_C.thy @@ -186,14 +186,14 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done @@ -287,14 +287,14 @@ proof - apply (intro conjI impI) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) apply (prop_tac "ksCurDomain s = 0") using unsigned_eq_0_iff apply force apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) done qed @@ -375,7 +375,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -416,7 +415,6 @@ lemma isHighestPrio_ccorres: lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -430,7 +428,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -447,7 +445,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -475,7 +473,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -495,7 +493,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -532,18 +530,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -636,13 +633,12 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') apply (rule conjI) @@ -660,7 +656,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -714,7 +710,7 @@ lemma timerTick_ccorres: apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -736,17 +732,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -758,7 +754,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -766,13 +761,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/RISCV64/SyscallArgs_C.thy b/proof/crefine/RISCV64/SyscallArgs_C.thy index 11bd4b2862..1c54300f62 100644 --- a/proof/crefine/RISCV64/SyscallArgs_C.thy +++ b/proof/crefine/RISCV64/SyscallArgs_C.thy @@ -48,7 +48,7 @@ lemma replyOnRestart_invs'[wp]: "replyOnRestart thread reply isCall \invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) apply (rule hoare_strengthen_post, rule gts_sp') apply (clarsimp simp: pred_tcb_at') apply (auto elim!: pred_tcb'_weakenE st_tcb_ex_cap'' @@ -387,7 +387,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -748,7 +748,7 @@ lemma getMRs_tcbContext: apply (wp|wpc)+ apply (rule_tac P="n < length x" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -874,12 +874,12 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes) apply (ctac (no_vcg)) apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isFrameCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap rv \ isFrameCap (capCap rv)" in ccorres_case_bools') apply simp apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap rv" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -1078,7 +1078,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) diff --git a/proof/crefine/RISCV64/Syscall_C.thy b/proof/crefine/RISCV64/Syscall_C.thy index 1ad45e8dd5..f6235180a6 100644 --- a/proof/crefine/RISCV64/Syscall_C.thy +++ b/proof/crefine/RISCV64/Syscall_C.thy @@ -270,22 +270,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -720,7 +720,7 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] @@ -894,8 +894,7 @@ lemma handleInvocation_ccorres: apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift sts_ksQ' @@ -1059,7 +1058,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1077,7 +1076,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1238,7 +1236,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1256,8 +1254,8 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) @@ -1305,7 +1303,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1322,7 +1320,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1335,7 +1333,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1356,7 +1354,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1369,9 +1367,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1588,7 +1586,6 @@ lemma ccorres_handleReservedIRQ: (\p. ksCurThread s \ set (ksReadyQueues s p)))) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] apply (cinit lift: irq_') apply (rule ccorres_return_Skip) apply clarsimp @@ -1611,11 +1608,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1654,7 +1651,7 @@ lemma handleInterrupt_ccorres: apply csymbr apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1662,7 +1659,7 @@ lemma handleInterrupt_ccorres: apply csymbr+ apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) apply (case_tac rva, simp_all del: Collect_const)[1] @@ -1672,7 +1669,7 @@ lemma handleInterrupt_ccorres: rule ccorres_guard_imp2, rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, clarsimp)+ apply (wpsimp wp: getSlotCap_wp simp: maskIrqSignal_def) apply simp @@ -1681,7 +1678,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1693,7 +1689,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up ) diff --git a/proof/crefine/RISCV64/TcbAcc_C.thy b/proof/crefine/RISCV64/TcbAcc_C.thy index 35e784cbf8..54b04557cf 100644 --- a/proof/crefine/RISCV64/TcbAcc_C.thy +++ b/proof/crefine/RISCV64/TcbAcc_C.thy @@ -173,7 +173,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -202,7 +202,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/RISCV64/TcbQueue_C.thy b/proof/crefine/RISCV64/TcbQueue_C.thy index 54122923aa..eb7b297eea 100644 --- a/proof/crefine/RISCV64/TcbQueue_C.thy +++ b/proof/crefine/RISCV64/TcbQueue_C.thy @@ -1095,8 +1095,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs sym_refs cp rel apply - apply (subst map_comp_update) @@ -1383,7 +1383,7 @@ lemma rf_sr_tcb_update_no_queue: (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes diff --git a/proof/crefine/RISCV64/Tcb_C.thy b/proof/crefine/RISCV64/Tcb_C.thy index d3430522a9..fecb1947de 100644 --- a/proof/crefine/RISCV64/Tcb_C.thy +++ b/proof/crefine/RISCV64/Tcb_C.thy @@ -98,8 +98,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -160,8 +160,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -171,8 +171,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -272,8 +272,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -306,7 +306,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -419,8 +419,8 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' @@ -444,7 +444,7 @@ lemma setPriority_ccorres: apply (frule (1) valid_objs'_maxDomain[where t=t]) apply (frule (1) valid_objs'_maxPriority[where t=t]) apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -525,7 +525,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -626,7 +626,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -672,7 +672,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -681,7 +681,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp @@ -695,7 +695,7 @@ lemma invokeTCB_ThreadControl_ccorres: tcb_at' target s \ ksCurDomain s \ maxDomain \ valid_queues' s \ fst (the priority) \ maxPriority)"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr @@ -710,7 +710,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -720,7 +720,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def @@ -739,7 +739,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -748,7 +748,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp add: guard_is_UNIV_def Collect_const_mem flip: canonical_bit_def) @@ -767,7 +767,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -777,7 +777,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) @@ -814,7 +814,7 @@ lemma invokeTCB_ThreadControl_ccorres: simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues invs_valid_queues' | wp hoare_drop_imps)+ @@ -855,11 +855,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -878,8 +877,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -887,7 +885,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbVTable_def tcbVTableSlot_def Kernel_C.tcbVTable_def cte_level_bits_def size_of_def option_to_0_def objBits_defs mask_def) @@ -895,7 +893,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb @@ -922,12 +920,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -949,8 +946,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -958,7 +954,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def cte_level_bits_def size_of_def option_to_0_def mask_def objBits_defs) @@ -966,7 +962,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb @@ -985,13 +981,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def cintr_def objBits_defs mask_def) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -1019,7 +1015,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1156,7 +1152,7 @@ lemma postModifyRegisters_ccorres: apply (simp add: if_distrib[where f="asUser t" for t] asUser_return) apply (rule ccorres_add_return2) apply (rule ccorres_stateAssert) - apply (rule ccorres_return_Skip'[unfolded dc_def]) + apply (rule ccorres_return_Skip') by simp+ lemma invokeTCB_CopyRegisters_ccorres: @@ -1232,7 +1228,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (rule ccorres_pre_getCurThread) apply (ctac add: postModifyRegisters_ccorres) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" + apply (rule_tac R="\s. rvc = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1298,8 +1294,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1308,8 +1304,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1371,8 +1367,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1384,8 +1380,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1458,7 +1454,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1518,7 +1514,7 @@ lemma asUser_getMRs_rel: apply (erule getMRs_rel_context, simp) apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs) apply simp -done + done lemma asUser_sysargs_rel: @@ -1543,7 +1539,7 @@ lemma asUser_setRegister_ko_at': done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] word_less_1[simp del] + notes hoare_weak_lift_imp [wp] word_less_1[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1650,14 +1646,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1837,6 +1833,7 @@ shows apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1896,15 +1893,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1912,7 +1909,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (RISCV64_H.frameRegisters @ RISCV64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -2021,11 +2018,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (RISCV64_H.frameRegisters @ RISCV64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2098,11 +2094,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2134,15 +2130,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2151,7 +2147,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2171,12 +2167,12 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) @@ -2192,7 +2188,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def dc_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2257,7 +2253,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2268,13 +2265,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2369,7 +2366,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2380,13 +2378,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2394,7 +2392,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2414,7 +2412,7 @@ lemma decodeWriteRegisters_ccorres: WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs) apply (frule arg_cong[where f="\x. unat (of_nat x :: machine_word)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2630,7 +2628,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2651,7 +2649,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -3213,7 +3211,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3281,8 +3278,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3347,7 +3343,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3415,8 +3411,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3494,7 +3489,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3581,8 +3576,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3814,7 +3808,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3834,7 +3828,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply ((clarsimp simp: option_to_ctcb_ptr_canonical[OF invs_pspace_canonical'] simp flip: canonical_bit_def)+)[3] apply (auto simp: option_to_ctcb_ptr_def objBits_simps' @@ -3848,7 +3842,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3914,7 +3908,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -4311,7 +4305,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4446,7 +4440,7 @@ lemma decodeSetSpace_ccorres: done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4457,7 +4451,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) diff --git a/proof/crefine/RISCV64/VSpace_C.thy b/proof/crefine/RISCV64/VSpace_C.thy index 51b2cd9eec..9fe539d4d9 100644 --- a/proof/crefine/RISCV64/VSpace_C.thy +++ b/proof/crefine/RISCV64/VSpace_C.thy @@ -254,7 +254,7 @@ lemma handleVMFault_ccorres: apply (rule corres_split[OF read_stval_ccorres[ac]]) apply terminates_trivial apply (drule sym, clarsimp) - apply (wpc; simp add: vm_fault_type_from_H_def vm_fault_defs_C bind_assoc) + apply (corres_cases; simp add: vm_fault_type_from_H_def vm_fault_defs_C bind_assoc) apply (rule returnVMFault_corres; clarsimp simp: exception_defs mask_twice lift_rv_def mask_def vmFaultTypeFSR_def)+ apply wpsimp+ @@ -719,7 +719,7 @@ lemma findVSpaceForASID_ccorres: apply clarsimp apply (rule_tac P="valid_arch_state' and _" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def lookup_fault_lift_invalid_root asid_wf_table_guard) apply (frule rf_sr_asidTable_None[where asid=asid, THEN iffD2], @@ -895,14 +895,13 @@ lemma setVMRoot_ccorres: apply (subst will_throw_and_catch) apply (simp split: capability.split arch_capability.split option.split) apply (fastforce simp: isCap_simps) - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded o_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr apply ccorres_rewrite apply (subst bind_return_unit) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) - apply (simp flip: dc_def) apply (rule ccorres_return_void_C) apply (rule hoare_post_taut[where P=\]) apply (simp add: catch_def bindE_bind_linearise bind_assoc liftE_def) @@ -925,23 +924,23 @@ lemma setVMRoot_ccorres: in ccorres_gen_asm2) apply simp apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def throwError_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: whenE_def throwError_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) apply (rule hoare_post_taut[where P=\]) - apply (simp add: whenE_def returnOk_def flip: dc_def) + apply (simp add: whenE_def returnOk_def) apply (csymbr) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) - apply (rule ccorres_cond_true_seq, simp add: dc_def[symmetric], ccorres_rewrite) + apply (rule ccorres_cond_true_seq, simp, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr - apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) @@ -1004,12 +1003,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1040,8 +1039,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg___unsigned_long = val\ @@ -1058,7 +1055,7 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_msgRegisters not_le conj_commute) @@ -1245,7 +1242,6 @@ lemma unmapPage_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: page_size_' asid___unsigned_long_' vptr_' pptr___unsigned_long_') apply (simp add: ignoreFailure_liftM) - apply (fold dc_def) apply (ctac add: findVSpaceForASID_ccorres) apply (rename_tac vspace find_ret) apply (rule ccorres_liftE_Seq) @@ -1255,9 +1251,9 @@ lemma unmapPage_ccorres: apply (simp (no_asm) add: split_def del: Collect_const) apply (rule ccorres_split_unless_throwError_cond[where Q=\ and Q'=\]) apply (clarsimp simp: of_nat_pageBitsForSize split: if_split) - apply (simp add: throwError_def flip: dc_def) + apply (simp add: throwError_def) apply (rule ccorres_return_void_C) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (subst bindE_assoc[symmetric]) @@ -1270,11 +1266,10 @@ lemma unmapPage_ccorres: split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_bindE - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply csymbr apply (rule ccorres_split_nothrow_novcg) - apply (simp add: dc_def[symmetric] ptr_add_assertion_def split_def) + apply (simp add: ptr_add_assertion_def split_def) apply ccorres_rewrite apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) @@ -1293,7 +1288,7 @@ lemma unmapPage_ccorres: apply wpsimp apply (vcg exspec=lookupPTSlot_modifies) apply ccorres_rewrite - apply (simp add: throwError_def flip: dc_def) + apply (simp add: throwError_def) apply (rule ccorres_return_void_C) apply wp apply (vcg exspec=findVSpaceForASID_modifies) @@ -1373,7 +1368,7 @@ lemma performPageInvocationUnmap_ccorres: apply simp apply simp apply simp - apply (simp add: asidInvalid_def flip: dc_def) + apply (simp add: asidInvalid_def) apply (rule ccorres_return_Skip) apply ceqv apply (simp add: liftM_def) @@ -1655,7 +1650,7 @@ proof - show ?thesis apply (cinit lift: newLvl1pt_' simp: ptIndex_maxPTLevel_pptrBase ptTranslationBits_def) apply (rule ccorres_pre_gets_riscvKSGlobalPT_ksArchState, rename_tac globalPT) - apply (rule ccorres_rel_imp[where r=dc, OF _ dc_simp]) + apply (rule ccorres_rel_imp[where r=dc, simplified]) apply (clarsimp simp: whileAnno_def objBits_simps bit_simps RISCV64.pptrBase_def mask_def) apply (rule ccorres_h_t_valid_riscvKSGlobalPT) apply csymbr diff --git a/proof/crefine/X64/ADT_C.thy b/proof/crefine/X64/ADT_C.thy index a63a7a3757..652c5279ef 100644 --- a/proof/crefine/X64/ADT_C.thy +++ b/proof/crefine/X64/ADT_C.thy @@ -84,7 +84,7 @@ lemma setTCBContext_C_corres: apply clarsimp apply (frule getObject_eq [rotated -1], simp) apply (simp add: objBits_simps') - apply (simp add: NonDetMonad.bind_def split_def) + apply (simp add: Nondet_Monad.bind_def split_def) apply (rule bexI) prefer 2 apply assumption diff --git a/proof/crefine/X64/Arch_C.thy b/proof/crefine/X64/Arch_C.thy index 05333cd793..26f4689aad 100644 --- a/proof/crefine/X64/Arch_C.thy +++ b/proof/crefine/X64/Arch_C.thy @@ -75,7 +75,7 @@ lemma performPageTableInvocationUnmap_ccorres: apply csymbr apply (simp add: storePTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PTE_ccorres) apply wp apply (simp del: Collect_const) @@ -217,7 +217,7 @@ lemma performPageDirectoryInvocationUnmap_ccorres: apply csymbr apply (simp add: storePDE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PDE_ccorres) apply wp apply (simp del: Collect_const) @@ -359,7 +359,7 @@ lemma performPDPTInvocationUnmap_ccorres: apply csymbr apply (simp add: storePDPTE_def' swp_def) apply clarsimp - apply(simp only: dc_def[symmetric] bit_simps_corres[symmetric]) + apply (simp only: bit_simps_corres[symmetric]) apply (ctac add: clearMemory_setObject_PDPTE_ccorres) apply wp apply (simp del: Collect_const) @@ -748,7 +748,9 @@ shows apply (rule ccorres_rhs_assoc2) apply (rule ccorres_abstract_cleanup) apply (rule ccorres_symb_exec_l) - apply (rule_tac P = "rva = (capability.UntypedCap isdev frame pageBits idx)" in ccorres_gen_asm) + apply (rename_tac pcap) + apply (rule_tac P = "pcap = (capability.UntypedCap isdev frame pageBits idx)" + in ccorres_gen_asm) apply (simp add: hrs_htd_update del:fun_upd_apply) apply (rule ccorres_split_nothrow) @@ -887,7 +889,7 @@ shows pageBits_def split: if_split) apply (clarsimp simp: X64SmallPageBits_def word_sle_def is_aligned_mask[symmetric] - ghost_assertion_data_get_gs_clear_region[unfolded o_def]) + ghost_assertion_data_get_gs_clear_region) apply (subst ghost_assertion_size_logic_flex[unfolded o_def, rotated]) apply assumption apply (simp add: ghost_assertion_data_get_gs_clear_region[unfolded o_def]) @@ -1121,10 +1123,10 @@ lemma decodeX64PageTableInvocation_ccorres: isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -1789,7 +1791,7 @@ lemma performPageInvocationMapPDPTE_ccorres: done lemma performPageGetAddress_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and (\s. ksCurThread s = thread) and ct_in_state' ((=) Restart)) @@ -1815,7 +1817,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -1838,7 +1840,7 @@ lemma performPageGetAddress_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -1851,10 +1853,10 @@ lemma performPageGetAddress_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply clarsimp apply vcg @@ -2218,7 +2220,7 @@ lemma decodeX86ModeMapPage_ccorres: (Inr (invocation.InvokePage (PageMap (ArchObjectCap cap) slot x pml4))) odE) (Call decodeX86ModeMapPage_'proc)" - supply if_cong[cong] tl_drop_1[simp] Collect_const[simp del] dc_simp[simp del] + supply if_cong[cong] tl_drop_1[simp] Collect_const[simp del] apply (simp add: K_def) apply (rule ccorres_gen_asm) apply (cinit' lift: label___unsigned_long_' page_size_' vroot_' cap_' paddr_' vm_rights_' vm_attr_' @@ -2249,7 +2251,7 @@ lemma decodeX86ModeMapPage_ccorres: apply (vcg exspec=createSafeMappingEntries_PDPTE_modifies) by (clarsimp simp: invs_valid_objs' tcb_at_invs' vmsz_aligned_addrFromPPtr' invs_queues valid_tcb_state'_def invs_sch_act_wf' ThreadState_Restart_def rf_sr_ksCurThread - arch_invocation_label_defs mask_def isCap_simps dc_def) + arch_invocation_label_defs mask_def isCap_simps) lemma valid_cap'_PageCap_kernel_mappings: "\pspace_in_kernel_mappings' s; isPageCap cap; valid_cap' (ArchObjectCap cap) s\ @@ -2559,7 +2561,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (rule ccorres_Cond_rhs_Seq) apply (clarsimp simp: maptype_from_H_def throwError_bind invocationCatch_def split: vmmap_type.split_asm) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) (* throw on mismatched vaddr *) apply simp @@ -2571,7 +2573,6 @@ lemma decodeX64FrameInvocation_ccorres: split: vmmap_type.split_asm) apply (clarsimp simp: X86_MappingNone_def X86_MappingVSpace_def) apply ccorres_rewrite - apply (fold dc_def id_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* frame cap not mapped, check mapping *) @@ -2590,7 +2591,7 @@ lemma decodeX64FrameInvocation_ccorres: apply csymbr apply (simp add: user_vtop_def X64.pptrUserTop_def hd_conv_nth length_ineq_not_Nil) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[unfolded id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) (* Doesn't throw case *) apply (drule_tac s="Some y" in sym, @@ -2626,7 +2627,6 @@ lemma decodeX64FrameInvocation_ccorres: apply (simp add: word_less_nat_alt user_vtop_def X64.pptrUserTop_def hd_conv_nth length_ineq_not_Nil) apply (ccorres_rewrite) - apply (fold dc_def) apply (rule ccorres_return_Skip) apply clarsimp apply (clarsimp simp: asidInvalid_def) @@ -2652,7 +2652,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (clarsimp simp: cap_lift_pml4_cap cap_to_H_def get_capPtr_CL_def cap_pml4_cap_lift_def elim!: ccap_relationE split: if_split) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply (rule ccorres_symb_exec_r) @@ -2666,7 +2666,7 @@ lemma decodeX64FrameInvocation_ccorres: apply (clarsimp simp: framesize_from_to_H user_vtop_def X64.pptrUserTop_def) apply (simp add: injection_handler_throwError throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply csymbr apply csymbr @@ -3140,10 +3140,10 @@ lemma decodeX64PageDirectoryInvocation_ccorres: isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def from_bool_0 cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def from_bool_0 cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -3509,7 +3509,7 @@ lemma decodeX64PDPTInvocation_ccorres: >>= invocationCatch thread isBlocking isCall InvokeArchObject) (Call decodeX64PDPTInvocation_'proc)" (is "_ \ _ \ ccorres _ _ ?pre ?cpre _ _ _") - supply Collect_const[simp del] if_cong[cong] dc_simp[simp del] + supply Collect_const[simp del] if_cong[cong] from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] ccorres_IF_True[simp] apply (clarsimp simp only: isCap_simps) apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' cte_' current_extra_caps_' cap_' buffer_' @@ -3614,15 +3614,14 @@ lemma decodeX64PDPTInvocation_ccorres: apply clarsimp apply (rule ccorres_Cond_rhs_Seq) apply ccorres_rewrite - apply clarsimp apply (rule_tac P="isArchObjectCap (fst (extraCaps ! 0)) \ isPML4Cap (capCap (fst (extraCaps ! 0)))" in ccorres_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (clarsimp simp: hd_conv_nth throwError_bind invocationCatch_def cong: if_cong) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: hd_conv_nth) apply csymbr @@ -3752,8 +3751,7 @@ lemma decodeX64PDPTInvocation_ccorres: simp: neq_Nil_conv valid_cap_simps' isCap_simps get_capMappedASID_CL_def cap_pml4_cap_lift cap_to_H_simps split: if_split_asm) - apply (clarsimp simp: dc_simp neq_Nil_conv[where xs=extraCaps] - excaps_in_mem_def slotcap_in_mem_def + apply (clarsimp simp: neq_Nil_conv[where xs=extraCaps] excaps_in_mem_def slotcap_in_mem_def dest!: sym[where s="ArchObjectCap cp" for cp]) apply (clarsimp simp: word_less_nat_alt hd_conv_nth dest!: length_ineq_not_Nil) apply (rule conjI, fastforce simp: mask_def) @@ -3888,7 +3886,7 @@ lemma decodeX64MMUInvocation_ccorres: throwError_bind invocationCatch_def split: invocation_label.split arch_invocation_label.split) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) (* X64ASIDControlMakePool *) apply (clarsimp simp: decodeX64MMUInvocation_def decodeX64ASIDControlInvocation_def isCap_simps) @@ -3899,7 +3897,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule ccorres_cond_true_seq | simp)+ apply (simp add: throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (simp add: interpret_excaps_test_null excaps_map_def) apply csymbr @@ -3908,14 +3906,14 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule ccorres_cond_true_seq | simp)+ apply (simp add: throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (simp add: interpret_excaps_test_null[OF Suc_leI]) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: length_ineq_not_Nil throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (subgoal_tac "1 < length extraCaps") prefer 2 @@ -4020,7 +4018,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp split: list.split) apply (fastforce dest!: filter_eq_ConsD) apply (simp add: throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (rule ccorres_Guard_Seq)+ apply (simp add: invocationCatch_use_injection_handler @@ -4045,7 +4043,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (clarsimp simp: to_bool_if cond_throw_whenE bindE_assoc) apply (rule ccorres_split_when_throwError_cond[where Q = \ and Q' = \]) apply fastforce - apply (rule syscall_error_throwError_ccorres_n[simplified id_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_rel_def shiftL_nat syscall_error_to_H_cases) prefer 2 apply vcg @@ -4163,7 +4161,7 @@ lemma decodeX64MMUInvocation_ccorres: apply ccorres_rewrite apply (clarsimp simp: isCap_simps decodeX64ASIDPoolInvocation_def throwError_bind invocationCatch_def) - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply (clarsimp simp: isCap_simps decodeX64ASIDPoolInvocation_def split: list.split) apply csymbr @@ -4388,7 +4386,7 @@ lemma decodeX64MMUInvocation_ccorres: apply (rule_tac t=b and s="snd (extraCaps ! 0)" in subst, fastforce) apply vcg (* Mode stuff *) - apply (rule ccorres_trim_returnE; simp) + apply (rule ccorres_trim_returnE; simp?) apply (rule ccorres_call, rule decodeX64ModeMMUInvocation_ccorres; simp) @@ -4574,7 +4572,7 @@ lemma setMessageInfo_ksCurThread_ccorres: done lemma invokeX86PortIn8_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and @@ -4608,7 +4606,7 @@ lemma invokeX86PortIn8_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -4631,7 +4629,7 @@ lemma invokeX86PortIn8_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -4644,10 +4642,10 @@ lemma invokeX86PortIn8_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in8_modifies) @@ -4662,7 +4660,7 @@ lemma invokeX86PortIn8_ccorres: simplified, symmetric]) lemma invokeX86PortIn16_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and @@ -4696,7 +4694,7 @@ lemma invokeX86PortIn16_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -4719,7 +4717,7 @@ lemma invokeX86PortIn16_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -4732,10 +4730,10 @@ lemma invokeX86PortIn16_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in16_modifies) @@ -4750,7 +4748,7 @@ lemma invokeX86PortIn16_ccorres: simplified, symmetric]) lemma invokeX86PortIn32_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres ((intr_and_se_rel \ Inr) \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') (valid_objs' and valid_queues and ct_in_state' ((=) Restart) and @@ -4782,7 +4780,7 @@ lemma invokeX86PortIn32_ccorres: apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply clarsimp apply (rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_simp) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (rule ccorres_rhs_assoc)+ apply (clarsimp simp: replyOnRestart_def liftE_def bind_assoc) @@ -4805,7 +4803,7 @@ lemma invokeX86PortIn32_ccorres: apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: return_def dc_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setThreadState_modifies) apply wpsimp @@ -4818,10 +4816,10 @@ lemma invokeX86PortIn32_ccorres: Kernel_C.msgInfoRegister_def) apply (vcg exspec=setMR_modifies) apply wpsimp - apply (clarsimp simp: dc_def) + apply clarsimp apply (vcg exspec=setRegister_modifies) apply wpsimp - apply (clarsimp simp: dc_def ThreadState_Running_def) + apply (clarsimp simp: ThreadState_Running_def) apply (vcg exspec=lookupIPCBuffer_modifies) apply (wpsimp wp: hoare_vcg_imp_lift hoare_vcg_all_lift) apply (vcg exspec=in32_modifies) @@ -4836,7 +4834,7 @@ lemma invokeX86PortIn32_ccorres: simplified, symmetric]) lemma invokeX86PortOut8_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -4855,7 +4853,7 @@ lemma invokeX86PortOut8_ccorres: done lemma invokeX86PortOut16_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -4874,7 +4872,7 @@ lemma invokeX86PortOut16_ccorres: done lemma invokeX86PortOut32_ccorres: - notes Collect_const[simp del] dc_simp[simp del] + notes Collect_const[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') invs' @@ -5340,7 +5338,7 @@ proof - apply (rule ccorres_equals_throwError) apply (fastforce simp: whenE_def throwError_bind invocationCatch_def) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) apply (clarsimp simp: ucast_drop_big_mask) apply (clarsimp simp: invocationCatch_use_injection_handler injection_bindE[OF refl refl] @@ -5352,7 +5350,7 @@ proof - apply (rule ccorres_Cond_rhs_Seq) apply (clarsimp simp: from_bool_0 injection_handler_throwError) apply ccorres_rewrite - apply (rule syscall_error_throwError_ccorres_n[simplified dc_def id_def o_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (clarsimp simp: syscall_error_to_H_cases) apply (clarsimp simp: from_bool_neq_0 injection_handler_returnOk) apply (ctac add: ccorres_injection_handler_csum1 @@ -5365,8 +5363,7 @@ proof - apply ccorres_rewrite apply (rule_tac P="\s. thread = ksCurThread s" in ccorres_cross_over_guard) apply (ctac add: setThreadState_ccorres) - apply (ctac(no_vcg) add: invokeX86PortControl_ccorres - [simplified dc_def o_def id_def]) + apply (ctac(no_vcg) add: invokeX86PortControl_ccorres) apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE, simp+)[1] @@ -5422,7 +5419,7 @@ proof - apply (clarsimp simp: interpret_excaps_eq rf_sr_ksCurThread ThreadState_Restart_def mask_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) apply clarsimp - apply (rule conjI, clarsimp simp: sysargs_rel_to_n o_def dest!: unat_length_4_helper) + apply (rule conjI, clarsimp simp: sysargs_rel_to_n dest!: unat_length_4_helper) apply (clarsimp simp: o_def) done qed diff --git a/proof/crefine/X64/CSpaceAcc_C.thy b/proof/crefine/X64/CSpaceAcc_C.thy index 79d4e7a937..7c199a74aa 100644 --- a/proof/crefine/X64/CSpaceAcc_C.thy +++ b/proof/crefine/X64/CSpaceAcc_C.thy @@ -274,7 +274,7 @@ lemma array_assertion_abs_cnode_ctes: apply (metis array_assertion_shrink_right) done -lemmas ccorres_move_array_assertion_cnode_ctes [corres_pre] +lemmas ccorres_move_array_assertion_cnode_ctes [ccorres_pre] = ccorres_move_Guard_Seq [OF array_assertion_abs_cnode_ctes] ccorres_move_Guard [OF array_assertion_abs_cnode_ctes] diff --git a/proof/crefine/X64/CSpace_All.thy b/proof/crefine/X64/CSpace_All.thy index 9262e81db1..45a8087a89 100644 --- a/proof/crefine/X64/CSpace_All.thy +++ b/proof/crefine/X64/CSpace_All.thy @@ -25,9 +25,9 @@ abbreviation (* FIXME: move *) lemma ccorres_return_into_rel: - "ccorres (\rv rv'. r (f rv) rv') xf G G' hs a c + "ccorres (r \ f) xf G G' hs a c \ ccorres r xf G G' hs (a >>= (\rv. return (f rv))) c" - by (simp add: liftM_def[symmetric] o_def) + by (simp add: liftM_def[symmetric]) lemma lookupCap_ccorres': "ccorres (lookup_failure_rel \ ccap_relation) lookupCap_xf diff --git a/proof/crefine/X64/CSpace_C.thy b/proof/crefine/X64/CSpace_C.thy index d02b699fe7..e8b39a5233 100644 --- a/proof/crefine/X64/CSpace_C.thy +++ b/proof/crefine/X64/CSpace_C.thy @@ -770,7 +770,7 @@ lemma update_freeIndex': show ?thesis apply (cinit lift: cap_ptr_' v64_') apply (rule ccorres_pre_getCTE) - apply (rule_tac P="\s. ctes_of s srcSlot = Some rv \ (\i. cteCap rv = UntypedCap d p sz i)" + apply (rule_tac P="\s. ctes_of s srcSlot = Some cte \ (\i. cteCap cte = UntypedCap d p sz i)" in ccorres_from_vcg[where P' = UNIV]) apply (rule allI) apply (rule conseqPre) @@ -894,7 +894,7 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (rule ccorres_Guard) apply (rule ccorres_call) - apply (rule update_freeIndex [unfolded dc_def]) + apply (rule update_freeIndex) apply simp apply simp apply simp @@ -920,14 +920,14 @@ lemma setUntypedCapAsFull_ccorres [corres]: apply csymbr apply (clarsimp simp: cap_get_tag_to_H cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap split: if_split_asm) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) + apply (rule ccorres_return_Skip) apply clarsimp apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip [unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: cap_get_tag_isCap[symmetric] cap_get_tag_UntypedCap) apply (frule(1) cte_wp_at_valid_objs_valid_cap') apply (clarsimp simp: untypedBits_defs) @@ -1043,19 +1043,17 @@ lemma cteInsert_ccorres: apply csymbr apply simp apply (rule ccorres_move_c_guard_cte) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres:ccorres_updateMDB_set_mdbPrev) - apply (simp add:dc_def[symmetric]) apply (ctac ccorres: ccorres_updateMDB_skip) - apply (wp static_imp_wp)+ - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp)+ + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg - apply (wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply vcg apply (clarsimp simp:cmdb_node_relation_mdbNext) - apply (wp setUntypedCapAsFull_cte_at_wp static_imp_wp) - apply (clarsimp simp: Collect_const_mem dc_def split del: if_split) + apply (wp setUntypedCapAsFull_cte_at_wp hoare_weak_lift_imp) + apply (clarsimp simp: Collect_const_mem split del: if_split) apply (vcg exspec=setUntypedCapAsFull_modifies) apply wp apply vcg @@ -2438,8 +2436,8 @@ lemma Arch_postCapDeletion_ccorres: prefer 3 (* IOPort case *) apply (rule ccorres_rhs_assoc)+ apply csymbr+ - apply (ctac add: freeIOPortRange_ccorres[simplified dc_def]) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (ctac add: freeIOPortRange_ccorres) + apply (rule ccorres_return_Skip)+ apply (clarsimp simp: arch_cleanup_info_wf'_def split: arch_capability.splits) apply (frule cap_get_tag_isCap_unfolded_H_cap) by (clarsimp simp: ccap_relation_def cap_io_port_cap_lift cap_to_H_def) @@ -2464,7 +2462,6 @@ lemma postCapDeletion_ccorres: apply (rule ccorres_symb_exec_r) apply (rule_tac xf'=irq_' in ccorres_abstract, ceqv) apply (rule_tac P="rv' = ucast (capIRQ cap)" in ccorres_gen_asm2) - apply (fold dc_def) apply (frule cap_get_tag_to_H, solves \clarsimp simp: cap_get_tag_isCap_unfolded_H_cap\) apply (clarsimp simp: cap_irq_handler_cap_lift) apply (ctac(no_vcg) add: deletedIRQHandler_ccorres) @@ -2475,9 +2472,9 @@ lemma postCapDeletion_ccorres: apply (clarsimp simp: cap_get_tag_isCap) apply (rule ccorres_Cond_rhs) apply (wpc; clarsimp simp: isCap_simps) - apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: Arch_postCapDeletion_ccorres) apply (simp add: not_irq_or_arch_cap_case) - apply (rule ccorres_return_Skip[unfolded dc_def])+ + apply (rule ccorres_return_Skip) apply clarsimp apply (rule conjI, clarsimp simp: isCap_simps Kernel_C.maxIRQ_def) apply (frule cap_get_tag_isCap_unfolded_H_cap(5)) @@ -2527,7 +2524,7 @@ lemma emptySlot_ccorres: \ \*** proof for the 'else' branch (return () and SKIP) ***\ prefer 2 - apply (ctac add: ccorres_return_Skip[unfolded dc_def]) + apply (ctac add: ccorres_return_Skip) \ \*** proof for the 'then' branch ***\ @@ -2572,7 +2569,7 @@ lemma emptySlot_ccorres: \ \the post_cap_deletion case\ - apply (ctac(no_vcg) add: postCapDeletion_ccorres [unfolded dc_def]) + apply (ctac(no_vcg) add: postCapDeletion_ccorres) \ \Haskell pre/post for y \ updateMDB slot (\a. nullMDBNode);\ apply wp @@ -2645,8 +2642,8 @@ lemma capSwapForDelete_ccorres: \ \--- instruction: when (slot1 \ slot2) \ / IF Ptr slot1 = Ptr slot2 THEN \\ apply (simp add:when_def) apply (rule ccorres_if_cond_throws2 [where Q = \ and Q' = \]) - apply (case_tac "slot1=slot2", simp+) - apply (rule ccorres_return_void_C [simplified dc_def]) + apply (case_tac "slot1=slot2"; simp) + apply (rule ccorres_return_void_C) \ \***Main goal***\ \ \--- ccorres goal with 2 affectations (cap1 and cap2) on both on Haskell and C\ @@ -2655,7 +2652,7 @@ lemma capSwapForDelete_ccorres: apply (rule ccorres_pre_getCTE)+ apply (rule ccorres_move_c_guard_cte, rule ccorres_symb_exec_r)+ \ \***Main goal***\ - apply (ctac (no_vcg) add: cteSwap_ccorres [unfolded dc_def] ) + apply (ctac (no_vcg) add: cteSwap_ccorres) \ \C Hoare triple for \cap2 :== \\ apply vcg \ \C existential Hoare triple for \cap2 :== \\ diff --git a/proof/crefine/X64/CSpace_RAB_C.thy b/proof/crefine/X64/CSpace_RAB_C.thy index 0aa928b72f..c7838029cd 100644 --- a/proof/crefine/X64/CSpace_RAB_C.thy +++ b/proof/crefine/X64/CSpace_RAB_C.thy @@ -54,7 +54,7 @@ lemma ccorres_remove_bind_returnOk_noguard: apply clarsimp apply (drule not_snd_bindE_I1) apply (erule (4) ccorresE[OF ac]) - apply (clarsimp simp add: bindE_def returnOk_def NonDetMonad.lift_def bind_def return_def + apply (clarsimp simp add: bindE_def returnOk_def Nondet_Monad.lift_def bind_def return_def split_def) apply (rule bexI [rotated], assumption) apply (simp add: throwError_def return_def unif_rrel_def @@ -208,10 +208,8 @@ next apply (simp add: cap_get_tag_isCap split del: if_split) apply (thin_tac "ret__unsigned_longlong = X" for X) apply (rule ccorres_split_throws [where P = "?P"]) - apply (rule_tac G' = "\w_rightsMask. ({s. nodeCap_' s = nodeCap} - \ {s. unat (n_bits_' s) = guard'})" - in ccorres_abstract [where xf' = w_rightsMask_']) - apply (rule ceqv_refl) + apply (rule_tac P'="{s. nodeCap_' s = nodeCap} \ {s. unat (n_bits_' s) = guard'}" + in ccorres_inst) apply (rule_tac r' = "?rvr" in ccorres_rel_imp [where xf' = rab_xf]) defer diff --git a/proof/crefine/X64/Ctac_lemmas_C.thy b/proof/crefine/X64/Ctac_lemmas_C.thy index 3fd06ef556..2dd3547fc5 100644 --- a/proof/crefine/X64/Ctac_lemmas_C.thy +++ b/proof/crefine/X64/Ctac_lemmas_C.thy @@ -23,7 +23,7 @@ lemma c_guard_abs_cte: apply (simp add: typ_heap_simps') done -lemmas ccorres_move_c_guard_cte [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] +lemmas ccorres_move_c_guard_cte [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_cte] lemma c_guard_abs_tcb: fixes p :: "tcb_C ptr" @@ -33,7 +33,7 @@ lemma c_guard_abs_tcb: apply simp done -lemmas ccorres_move_c_guard_tcb [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] +lemmas ccorres_move_c_guard_tcb [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb] lemma cte_array_relation_array_assertion: "gsCNodes s p = Some n \ cte_array_relation s cstate @@ -96,7 +96,7 @@ lemma array_assertion_abs_tcb_ctes_add': lemmas array_assertion_abs_tcb_ctes_add = array_assertion_abs_tcb_ctes_add'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_array_assertion_tcb_ctes [corres_pre] +lemmas ccorres_move_array_assertion_tcb_ctes [ccorres_pre] = ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(1)] ccorres_move_array_assertions [OF array_assertion_abs_tcb_ctes(2)] ccorres_move_Guard_Seq[OF array_assertion_abs_tcb_ctes_add] @@ -119,7 +119,7 @@ lemma c_guard_abs_tcb_ctes': done lemmas c_guard_abs_tcb_ctes = c_guard_abs_tcb_ctes'[simplified objBits_defs mask_def, simplified] -lemmas ccorres_move_c_guard_tcb_ctes [corres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] +lemmas ccorres_move_c_guard_tcb_ctes [ccorres_pre] = ccorres_move_c_guards [OF c_guard_abs_tcb_ctes] lemma c_guard_abs_pte: "\s s'. (s, s') \ rf_sr \ pte_at' (ptr_val p) s \ True diff --git a/proof/crefine/X64/Delete_C.thy b/proof/crefine/X64/Delete_C.thy index aa48fdea03..fa30db71c0 100644 --- a/proof/crefine/X64/Delete_C.thy +++ b/proof/crefine/X64/Delete_C.thy @@ -856,7 +856,7 @@ lemma finaliseSlot_ccorres: ccorres_seq_skip) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) apply (rule hyps[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def], - unfolded split_def, unfolded K_def], + unfolded split_def], (simp add: in_monad)+) apply (simp add: from_bool_0) apply simp @@ -878,7 +878,7 @@ lemma finaliseSlot_ccorres: apply (simp add: guard_is_UNIV_def) apply (simp add: conj_comms) apply (wp make_zombie_invs' updateCap_cte_wp_at_cases - updateCap_cap_to' hoare_vcg_disj_lift static_imp_wp)+ + updateCap_cap_to' hoare_vcg_disj_lift hoare_weak_lift_imp)+ apply (simp add: guard_is_UNIV_def) apply wp apply (simp add: guard_is_UNIV_def) @@ -912,7 +912,7 @@ lemma finaliseSlot_ccorres: apply (erule(1) cmap_relationE1 [OF cmap_relation_cte]) apply (frule valid_global_refsD_with_objSize, clarsimp) apply (auto simp: typ_heap_simps dest!: ccte_relation_ccap_relation)[1] - apply (wp isFinalCapability_inv static_imp_wp | wp (once) isFinal[where x=slot'])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | wp (once) isFinal[where x=slot'])+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -1007,26 +1007,23 @@ lemma cteRevoke_ccorres1: apply (rule ccorres_drop_cutMon_bindE) apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg) add: cteDelete_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon, simp only: cutMon_walk_bindE) apply (rule ccorres_drop_cutMon_bindE) apply (ctac(no_vcg) add: preemptionPoint_ccorres) - apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs - dc_def[symmetric]) + apply (simp del: Collect_const add: Collect_False ccorres_cond_iffs) apply (rule ccorres_cutMon) apply (rule rsubst[where P="ccorres r xf' P P' hs a" for r xf' P P' hs a]) - apply (rule hyps[unfolded K_def], - (fastforce simp: in_monad)+)[1] + apply (rule hyps; fastforce simp: in_monad) apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp preemptionPoint_invR) apply simp apply simp apply (simp, rule ccorres_split_throws) - apply (rule ccorres_return_C_errorE, simp+)[1] + apply (rule ccorres_return_C_errorE; simp) apply vcg apply (wp cteDelete_invs' cteDelete_sch_act_simple) apply (rule ccorres_cond_false) diff --git a/proof/crefine/X64/Detype_C.thy b/proof/crefine/X64/Detype_C.thy index 898306d688..bd677c7ffa 100644 --- a/proof/crefine/X64/Detype_C.thy +++ b/proof/crefine/X64/Detype_C.thy @@ -1548,7 +1548,7 @@ lemma deleteObjects_ccorres': apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: in_monad) apply (rule bexI [rotated]) - apply (rule iffD2 [OF in_monad(20)]) + apply (rule iffD2 [OF in_monad(21)]) apply (rule conjI [OF refl refl]) apply (clarsimp simp: simpler_modify_def) proof - diff --git a/proof/crefine/X64/Finalise_C.thy b/proof/crefine/X64/Finalise_C.thy index 78e3fa1bd6..7425df66fc 100644 --- a/proof/crefine/X64/Finalise_C.thy +++ b/proof/crefine/X64/Finalise_C.thy @@ -201,8 +201,7 @@ proof (induct ts) apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (rule ccorres_tmp_lift2[where G'=UNIV and G''="\x. UNIV", simplified]) apply ceqv - apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def - dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs mapM_x_def sequence_x_def) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip) apply simp done @@ -211,7 +210,7 @@ next show ?case apply (rule iffD1 [OF ccorres_expand_while_iff]) apply (simp del: Collect_const - add: dc_def[symmetric] mapM_x_Cons) + add: mapM_x_Cons) apply (rule ccorres_guard_imp2) apply (rule_tac xf'=thread_' in ccorres_abstract) apply ceqv @@ -279,10 +278,10 @@ lemma cancelAllIPC_ccorres: apply (cinit lift: epptr_') apply (rule ccorres_symb_exec_l [OF _ getEndpoint_inv _ empty_fail_getEndpoint]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case rv of IdleEP \ scast EPState_Idle + and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv | SendEP _ \ scast EPState_Send" - and R="ko_at' rv epptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ep epptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ep]) @@ -291,8 +290,8 @@ lemma cancelAllIPC_ccorres: apply (simp add: cendpoint_relation_def Let_def split: endpoint.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv epptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ep epptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc apply (rename_tac list) apply (simp add: endpoint_state_defs @@ -327,7 +326,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -344,12 +343,10 @@ lemma cancelAllIPC_ccorres: apply (wp set_ep_valid_objs' hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear) apply vcg - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: endpoint_state_defs - Collect_False Collect_True - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: endpoint_state_defs Collect_False Collect_True ccorres_cond_iffs del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -379,7 +376,7 @@ lemma cancelAllIPC_ccorres: subgoal by (simp add: cendpoint_relation_def endpoint_state_defs) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -405,11 +402,6 @@ lemma cancelAllIPC_ccorres: apply clarsimp done -lemma empty_fail_getNotification: - "empty_fail (getNotification ep)" - unfolding getNotification_def - by (auto intro: empty_fail_getObject) - lemma cancelAllSignals_ccorres: "ccorres dc xfdc (invs') (UNIV \ {s. ntfnPtr_' s = Ptr ntfnptr}) [] @@ -417,10 +409,10 @@ lemma cancelAllSignals_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule_tac xf'=ret__unsigned_longlong_' - and val="case ntfnObj rv of IdleNtfn \ scast NtfnState_Idle + and val="case ntfnObj ntfn of IdleNtfn \ scast NtfnState_Idle | ActiveNtfn _ \ scast NtfnState_Active | WaitingNtfn _ \ scast NtfnState_Waiting" - and R="ko_at' rv ntfnptr" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and R="ko_at' ntfn ntfnptr" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1 [OF cmap_relation_ntfn]) @@ -429,18 +421,15 @@ lemma cancelAllSignals_ccorres: apply (simp add: cnotification_relation_def Let_def split: ntfn.split_asm) apply ceqv - apply (rule_tac A="invs' and ko_at' rv ntfnptr" - in ccorres_guard_imp2[where A'=UNIV]) + apply (rule_tac A="invs' and ko_at' ntfn ntfnptr" + in ccorres_guard_imp2[where A'=UNIV]) apply wpc - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric]) + apply (simp add: notification_state_defs ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) - apply (simp add: notification_state_defs ccorres_cond_iffs - dc_def[symmetric] Collect_True + apply (simp add: notification_state_defs ccorres_cond_iffs Collect_True del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -448,8 +437,8 @@ lemma cancelAllSignals_ccorres: apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule_tac r'=dc and xf'=xfdc in ccorres_split_nothrow) - apply (rule_tac P="ko_at' rv ntfnptr and invs'" - in ccorres_from_vcg[where P'=UNIV]) + apply (rule_tac P="ko_at' ntfn ntfnptr and invs'" + in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp apply (rule_tac x=ntfnptr in cmap_relationE1 [OF cmap_relation_ntfn], assumption) @@ -468,7 +457,7 @@ lemma cancelAllSignals_ccorres: subgoal by (simp add: cnotification_relation_def notification_state_defs Let_def) subgoal by simp apply (rule ceqv_refl) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply (rule ccorres_split_nothrow_novcg) apply (rule cancel_all_ccorres_helper) apply ceqv @@ -694,8 +683,8 @@ lemma doUnbindNotification_ccorres: (Call doUnbindNotification_'proc)" apply (cinit' lift: ntfnPtr_' tcbptr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr" and P'=UNIV - in ccorres_split_nothrow_novcg) + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV + in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: option_to_ptr_def option_to_0_def) @@ -714,7 +703,7 @@ lemma doUnbindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv", ((simp add: option_to_ctcb_ptr_def)+)[4]) + apply (case_tac "ntfnObj ntfn", ((simp add: option_to_ctcb_ptr_def)+)[4]) subgoal by (simp add: carch_state_relation_def global_ioport_bitmap_heap_update_tag_disj_simps fpu_null_state_heap_update_tag_disj_simps) @@ -727,7 +716,7 @@ lemma doUnbindNotification_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -779,7 +768,7 @@ lemma doUnbindNotification_ccorres': apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) apply (rule_tac P'="\" and P="\" - in threadSet_ccorres_lemma3[unfolded dc_def]) + in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule(1) rf_sr_tcb_update_no_queue2) @@ -814,9 +803,9 @@ lemma unbindNotification_ccorres: apply simp apply wpc apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (rule ccorres_cond_true) - apply (ctac (no_vcg) add: doUnbindNotification_ccorres[unfolded dc_def, simplified]) + apply (ctac (no_vcg) add: doUnbindNotification_ccorres[simplified]) apply (wp gbn_wp') apply vcg apply (clarsimp simp: option_to_ptr_def option_to_0_def pred_tcb_at'_def @@ -833,13 +822,13 @@ lemma unbindMaybeNotification_ccorres: apply (cinit lift: ntfnPtr_') apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) apply (rule ccorres_rhs_assoc2) - apply (rule_tac P="ntfnBoundTCB rv \ None \ - option_to_ctcb_ptr (ntfnBoundTCB rv) \ NULL" - in ccorres_gen_asm) + apply (rule_tac P="ntfnBoundTCB ntfn \ None \ + option_to_ctcb_ptr (ntfnBoundTCB ntfn) \ NULL" + in ccorres_gen_asm) apply (rule_tac xf'=boundTCB_' - and val="option_to_ctcb_ptr (ntfnBoundTCB rv)" - and R="ko_at' rv ntfnptr and valid_bound_tcb' (ntfnBoundTCB rv)" - in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) + and val="option_to_ctcb_ptr (ntfnBoundTCB ntfn)" + and R="ko_at' ntfn ntfnptr and valid_bound_tcb' (ntfnBoundTCB ntfn)" + in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg apply clarsimp apply (erule cmap_relationE1[OF cmap_relation_ntfn]) @@ -1029,8 +1018,7 @@ lemma deleteASIDPool_ccorres: apply (rule ccorres_gen_asm) apply (cinit lift: asid_base_' pool_' simp: whileAnno_def) apply (rule ccorres_assert) - apply (clarsimp simp: liftM_def dc_def[symmetric] fun_upd_def[symmetric] - when_def + apply (clarsimp simp: liftM_def fun_upd_def[symmetric] when_def simp del: Collect_const) apply (rule ccorres_Guard)+ apply (rule ccorres_pre_gets_x86KSASIDTable_ksArchState) @@ -1192,12 +1180,10 @@ lemma deleteASID_ccorres: apply (simp add: asid_high_bits_def) apply ceqv apply wpc - apply (simp add: ccorres_cond_iffs dc_def[symmetric] - Collect_False + apply (simp add: ccorres_cond_iffs Collect_False cong: call_ignore_cong) apply (rule ccorres_return_Skip) - apply (clarsimp simp: dc_def[symmetric] when_def - liftM_def + apply (clarsimp simp: when_def liftM_def cong: conj_cong call_ignore_cong) apply ccorres_rewrite apply (rule ccorres_rhs_assoc)+ @@ -1297,7 +1283,7 @@ lemma deleteASID_ccorres: lemma setObject_ccorres_lemma: fixes val :: "'a :: pspace_storable" shows - "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := ksPSpace s (ptr \ injectKO val) \, s') \ rf_sr},{}; + "\ \s. \ \ (Q s) c {s'. (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val) \, s') \ rf_sr},{}; \s s' val (val' :: 'a). \ ko_at' val' ptr s; (s, s') \ rf_sr \ \ s' \ Q s; \val :: 'a. updateObject val = updateObject_default val; @@ -1463,16 +1449,16 @@ lemma unmapPageTable_ccorres: apply (simp add: from_bool_0) apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (simp add: from_bool_0) - apply (rule ccorres_liftE[simplified dc_def]) + apply (rule ccorres_liftE', simp) apply (ctac add: flushTable_ccorres) apply (csymbr, rename_tac invalidPDE) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def) apply (csymbr, rename_tac root) - apply (ctac add: invalidatePageStructureCacheASID_ccorres[simplified dc_def]) + apply (ctac add: invalidatePageStructureCacheASID_ccorres) apply wp apply (clarsimp simp add: guard_is_UNIV_def) apply wp @@ -1480,14 +1466,14 @@ lemma unmapPageTable_ccorres: apply (vcg exspec=flushTable_modifies) apply (clarsimp simp: guard_is_UNIV_def) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (clarsimp,wp) apply (rule_tac Q'="\_ s. invs' s \ page_table_at' ptPtr s" in hoare_post_imp_R) apply wp apply clarsimp apply (vcg exspec=lookupPDSlot_modifies) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wp apply vcg apply (auto simp add: asid_wf_def mask_def) @@ -1509,12 +1495,6 @@ lemma no_0_pml4_at'[elim!]: apply (drule spec[where x=0], clarsimp simp: bit_simps) done -lemma ccte_relation_ccap_relation: - "ccte_relation cte cte' \ ccap_relation (cteCap cte) (cte_C.cap_C cte')" - by (clarsimp simp: ccte_relation_def ccap_relation_def - cte_to_H_def map_option_Some_eq2 - c_valid_cte_def) - lemma isFinalCapability_ccorres: "ccorres ((=) \ from_bool) ret__unsigned_long_' (cte_wp_at' ((=) cte) slot and invs') @@ -1611,7 +1591,7 @@ lemma cteDeleteOne_ccorres: erule_tac t="ret__unsigned_longlong = scast cap_null_cap" and s="cteCap cte = NullCap" in ssubst) - apply (clarsimp simp only: when_def unless_def dc_def[symmetric]) + apply (clarsimp simp only: when_def unless_def) apply (rule ccorres_cond2[where R=\]) apply (clarsimp simp: Collect_const_mem) apply (rule ccorres_rhs_assoc)+ @@ -1622,12 +1602,11 @@ lemma cteDeleteOne_ccorres: apply (ctac(no_vcg) add: isFinalCapability_ccorres[where slot=slot]) apply (rule_tac A="invs' and cte_wp_at' ((=) cte) slot" in ccorres_guard_imp2[where A'=UNIV]) - apply (simp add: split_def dc_def[symmetric] - del: Collect_const) + apply (simp add: split_def del: Collect_const) apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg) add: finaliseCap_True_standin_ccorres) apply (rule ccorres_assert) - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (ctac add: emptySlot_ccorres) apply (simp add: pred_conj_def finaliseCapTrue_standin_simple_def) @@ -1663,7 +1642,7 @@ lemma deletingIRQHandler_ccorres: (UNIV \ {s. irq_opt_relation (Some irq) (irq_' s)}) [] (deletingIRQHandler irq) (Call deletingIRQHandler_'proc)" apply (cinit lift: irq_' cong: call_ignore_cong) - apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def dc_def[symmetric] + apply (clarsimp simp: irq_opt_relation_def ptr_add_assertion_def cong: call_ignore_cong ) apply (rule_tac r'="\rv rv'. rv' = Ptr rv" and xf'="slot_'" in ccorres_split_nothrow) @@ -1751,7 +1730,7 @@ lemma option_to_ctcb_ptr_not_0: done lemma update_tcb_map_to_tcb: - "map_to_tcbs (ksPSpace s(p \ KOTCB tcb)) + "map_to_tcbs ((ksPSpace s)(p \ KOTCB tcb)) = (map_to_tcbs (ksPSpace s))(p \ tcb)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) @@ -1791,7 +1770,7 @@ lemma sched_queue_relation_shift: lemma cendpoint_relation_udpate_arch: "\ cslift x p = Some tcb ; cendpoint_relation (cslift x) v v' \ - \ cendpoint_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cendpoint_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cendpoint_relation_def Let_def tcb_queue_relation'_def split: endpoint.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1802,7 +1781,7 @@ lemma cendpoint_relation_udpate_arch: lemma cnotification_relation_udpate_arch: "\ cslift x p = Some tcb ; cnotification_relation (cslift x) v v' \ - \ cnotification_relation (cslift x(p \ tcbArch_C_update f tcb)) v v'" + \ cnotification_relation ((cslift x)(p \ tcbArch_C_update f tcb)) v v'" apply (clarsimp simp: cnotification_relation_def Let_def tcb_queue_relation'_def split: notification.splits ntfn.splits) apply (subst ep_queue_relation_shift2; simp add: fun_eq_iff) @@ -1880,16 +1859,16 @@ lemma unmapPageDirectory_ccorres: apply (simp add: from_bool_0) apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (simp add: from_bool_0) - apply (rule ccorres_liftE[simplified dc_def]) + apply (rule ccorres_liftE', simp) apply (ctac add: flushPD_ccorres) apply (csymbr, rename_tac invalidPDPTE) apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePDPTE_Basic_ccorres) apply (simp add: cpdpte_relation_def Let_def) apply (csymbr, rename_tac root) - apply (ctac add: invalidatePageStructureCacheASID_ccorres[simplified dc_def]) + apply (ctac add: invalidatePageStructureCacheASID_ccorres) apply wp apply (clarsimp simp add: guard_is_UNIV_def) apply wp @@ -1897,11 +1876,11 @@ lemma unmapPageDirectory_ccorres: apply (vcg exspec=flushPD_modifies) apply (clarsimp simp: guard_is_UNIV_def) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wpsimp apply (vcg exspec=lookupPDPTSlot_modifies) apply (simp,ccorres_rewrite,simp add:throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply wp apply vcg apply (auto simp add: asid_wf_def mask_def) @@ -1942,7 +1921,7 @@ lemma unmapPDPointerTable_ccorres: apply ccorres_rewrite apply (clarsimp simp: from_bool_0 isPDPointerTablePML4E_def split: pml4e.splits; clarsimp simp: throwError_def; - rule ccorres_return_void_C[simplified dc_def]) + rule ccorres_return_void_C) apply (clarsimp simp: isPDPointerTablePML4E_def liftE_def bind_assoc split: pml4e.split_asm) apply (ctac add: flushPDPT_ccorres) apply csymbr @@ -1950,7 +1929,7 @@ lemma unmapPDPointerTable_ccorres: apply (rule ccorres_split_nothrow_novcg_dc) apply (rule storePML4E_Basic_ccorres') apply (fastforce simp: cpml4e_relation_def) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply wp apply (fastforce simp: guard_is_UNIV_def) apply wp @@ -1958,7 +1937,7 @@ lemma unmapPDPointerTable_ccorres: apply vcg apply ccorres_rewrite apply (clarsimp simp: throwError_def) - apply (rule ccorres_return_void_C[simplified dc_def]) + apply (rule ccorres_return_void_C) apply (wpsimp wp: hoare_drop_imps) apply (vcg exspec=findVSpaceForASID_modifies) apply (auto simp: invs_arch_state' invs_no_0_obj' asid_wf_def mask_def typ_heap_simps @@ -2180,7 +2159,7 @@ lemma Mode_finaliseCap_ccorres_page_cap: dest!: x_less_2_0_1) lemma Arch_finaliseCap_ccorres: - notes dc_simp[simp del] Collect_const[simp del] + notes Collect_const[simp del] shows "ccorres (\rv rv'. ccap_relation (fst rv) (remainder_C rv') \ ccap_relation (snd rv) (finaliseCap_ret_C.cleanupInfo_C rv')) @@ -2406,7 +2385,7 @@ lemma fpuThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (fpuThreadDelete thread) (Call fpuThreadDelete_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit lift: thread_') apply clarsimp apply (ctac (no_vcg) add: nativeThreadUsingFPU_ccorres) @@ -2423,7 +2402,6 @@ lemma prepareThreadDelete_ccorres: (invs' and tcb_at' thread) (UNIV \ {s. thread_' s = tcb_ptr_to_ctcb_ptr thread}) hs (prepareThreadDelete thread) (Call Arch_prepareThreadDelete_'proc)" - supply dc_simp[simp del] apply (cinit lift: thread_', rename_tac cthread) apply (ctac add: fpuThreadDelete_ccorres) apply fastforce @@ -2578,18 +2556,18 @@ lemma finaliseCap_ccorres: apply (rule ccorres_fail) apply (rule ccorres_add_return, rule ccorres_split_nothrow_novcg[where r'=dc and xf'=xfdc]) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ccorres_Cond_rhs) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply simp apply (rule ccorres_Cond_rhs) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rule ceqv_refl) apply (rule ccorres_from_vcg_throws[where P=\ and P'=UNIV]) diff --git a/proof/crefine/X64/Interrupt_C.thy b/proof/crefine/X64/Interrupt_C.thy index 9572bf16bb..f84f8f8825 100644 --- a/proof/crefine/X64/Interrupt_C.thy +++ b/proof/crefine/X64/Interrupt_C.thy @@ -75,7 +75,7 @@ proof - apply (rule ccorres_symb_exec_r) apply (ctac(no_vcg) add: cteDeleteOne_ccorres[where w="-1"]) apply (rule ccorres_call) - apply (rule cteInsert_ccorres[simplified dc_def]) + apply (rule cteInsert_ccorres) apply simp apply simp apply simp @@ -112,7 +112,7 @@ lemma invokeIRQHandler_ClearIRQHandler_ccorres: apply (simp add: ucast_up_ucast is_up) apply (ctac(no_vcg) add: getIRQSlot_ccorres[simplified]) apply (rule ccorres_symb_exec_r) - apply (ctac add: cteDeleteOne_ccorres[where w="-1",simplified dc_def]) + apply (ctac add: cteDeleteOne_ccorres[where w="-1"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) @@ -349,7 +349,7 @@ lemma invokeIRQControl_ccorres: (performIRQControl (IssueIRQHandler irq slot parent)) (Call invokeIRQControl_'proc)" by (clarsimp simp: performIRQControl_def liftE_def bind_assoc - intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def K_def, simplified]) + intro!: invokeIRQControl_expanded_ccorres[simplified liftE_def, simplified]) lemma isIRQActive_ccorres: "ccorres (\rv rv'. rv' = from_bool rv) ret__unsigned_long_' @@ -719,7 +719,7 @@ from assms show ?thesis apply (rule ccorres_Cond_rhs_Seq) apply ccorres_rewrite apply (auto split: invocation_label.split arch_invocation_label.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases invocation_eq_use_types)[1] apply clarsimp apply (rule ccorres_rhs_assoc2) @@ -739,13 +739,13 @@ from assms show ?thesis apply (erule ccorres_disj_division; clarsimp split: invocation_label.split simp: invocation_eq_use_types) apply (auto split: list.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases)[2] (* Insufficient extra caps *) apply (erule ccorres_disj_division; clarsimp split: invocation_label.split simp: invocation_eq_use_types) apply (auto split: list.split - intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def dc_def id_def] + intro: syscall_error_throwError_ccorres_n[simplified throwError_def o_def] simp: throwError_def invocationCatch_def syscall_error_to_H_cases)[2] (* Arguments OK *) apply ccorres_rewrite @@ -772,7 +772,7 @@ from assms show ?thesis word_sless_alt is_down sint_ucast_eq_uint word_le_not_less invocationCatch_use_injection_handler injection_handler_throwError syscall_error_to_H_cases - intro: syscall_error_throwError_ccorres_n[simplified id_def dc_def]) | + intro: syscall_error_throwError_ccorres_n) | ccorres_rewrite)+)[2] apply (erule ccorres_disj_division; clarsimp simp: invocation_eq_use_types) (* X64IRQIssueIRQHandlerIOAPIC *) @@ -792,7 +792,7 @@ from assms show ?thesis apply (simp add: injection_handler_whenE injection_handler_throwError) apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) apply clarsimp - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (ctac add: ccorres_injection_handler_csum1 @@ -828,8 +828,7 @@ from assms show ?thesis where g="\_. injection_handler P Q >>=E R" for P Q R]) apply (clarsimp simp: injection_handler_returnOk) apply (simp only: bindE_K_bind) - apply (ctac add: ioapic_decode_map_pin_to_vector_ccorres - [simplified o_def id_def dc_def K_def]) + apply (ctac add: ioapic_decode_map_pin_to_vector_ccorres) apply ccorres_rewrite apply (simp add: ccorres_invocationCatch_Inr performInvocation_def returnOk_bind liftE_bindE bindE_assoc @@ -895,7 +894,7 @@ from assms show ?thesis apply (simp add: injection_handler_whenE injection_handler_throwError) apply (rule ccorres_split_when_throwError_cond[where Q=\ and Q'=\]) apply clarsimp - apply (rule syscall_error_throwError_ccorres_n[simplified id_def dc_def]) + apply (rule syscall_error_throwError_ccorres_n) apply (fastforce simp: syscall_error_to_H_cases) apply csymbr apply (ctac add: ccorres_injection_handler_csum1 @@ -931,7 +930,7 @@ from assms show ?thesis (* Handle the conditional checks on PCI bus/dev/func *) apply ((rule_tac Q=\ and Q'=\ in ccorres_split_when_throwError_cond, fastforce, - rule syscall_error_throwError_ccorres_n[simplified id_def dc_def], + rule syscall_error_throwError_ccorres_n, fastforce simp: syscall_error_to_H_cases)+)[3] apply ccorres_rewrite apply csymbr diff --git a/proof/crefine/X64/Invoke_C.thy b/proof/crefine/X64/Invoke_C.thy index 581d8dfd75..ac13823e21 100644 --- a/proof/crefine/X64/Invoke_C.thy +++ b/proof/crefine/X64/Invoke_C.thy @@ -65,10 +65,10 @@ lemma setDomain_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_return_Skip) apply (simp add: when_def) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. curThread = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply simp apply (wp hoare_drop_imps weak_sch_act_wf_lift_linear) @@ -76,13 +76,16 @@ lemma setDomain_ccorres: apply simp apply wp apply (rule_tac Q="\_. all_invs_but_sch_extra and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s)" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s)" + in hoare_strengthen_post) apply (wp threadSet_all_invs_but_sch_extra) - apply (clarsimp simp:valid_pspace_valid_objs' st_tcb_at_def[symmetric] - sch_act_simple_def st_tcb_at'_def o_def weak_sch_act_wf_def split:if_splits) + apply (clarsimp simp: valid_pspace_valid_objs' st_tcb_at_def[symmetric] + sch_act_simple_def st_tcb_at'_def weak_sch_act_wf_def + split: if_splits) apply (simp add: guard_is_UNIV_def) apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple - and (\s. rv = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" in hoare_strengthen_post) + and (\s. curThread = ksCurThread s \ (\p. t \ set (ksReadyQueues s p)))" + in hoare_strengthen_post) apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_not_queued tcbSchedDequeue_not_in_queue hoare_vcg_imp_lift hoare_vcg_all_lift) apply (clarsimp simp: invs'_def valid_pspace'_def valid_state'_def) @@ -381,7 +384,7 @@ lemma invokeCNodeRotate_ccorres: apply clarsimp apply (simp add: return_def) apply wp - apply (simp add: guard_is_UNIV_def dc_def xfdc_def) + apply (simp add: guard_is_UNIV_def) apply (clarsimp simp: valid_pspace'_def) apply (rule conjI, clarsimp) apply (clarsimp simp:cte_wp_at_ctes_of) @@ -626,9 +629,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const cong: call_ignore_cong) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc | csymbr)+ - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] - if_1_0_0 dc_def[symmetric] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) apply (simp add:if_P del: Collect_const) @@ -711,8 +712,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: Collect_const[symmetric] del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] - if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: list_case_helper injection_handler_returnOk @@ -739,8 +739,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError whenE_def - dc_def[symmetric]) + apply (simp add: injection_handler_throwError whenE_def) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -816,8 +815,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: whenE_def injection_handler_returnOk - invocationCatch_def injection_handler_throwError - dc_def[symmetric]) + invocationCatch_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -896,7 +894,7 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: flip: Collect_const cong: call_ignore_cong) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: injection_handler_throwError dc_def[symmetric] if_P) + apply (simp add: injection_handler_throwError if_P) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: if_not_P del: Collect_const) @@ -915,8 +913,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric] numeral_eqs) + apply (simp add: whenE_def injection_handler_throwError numeral_eqs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1015,13 +1012,11 @@ lemma decodeCNodeInvocation_ccorres: apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_returnOk bindE_assoc - injection_bindE[OF refl refl] split_def - dc_def[symmetric]) + injection_bindE[OF refl refl] split_def) apply (rule ccorres_split_throws) apply (rule ccorres_rhs_assoc)+ apply (ctac add: ccorres_injection_handler_csum1 [OF ensureEmptySlot_ccorres]) - apply (simp add: ccorres_invocationCatch_Inr performInvocation_def - dc_def[symmetric] bindE_assoc) + apply (simp add: ccorres_invocationCatch_Inr performInvocation_def bindE_assoc) apply (ctac add: setThreadState_ccorres) apply (ctac(no_vcg) add: invokeCNodeSaveCaller_ccorres) apply (rule ccorres_alternative2) @@ -1030,7 +1025,7 @@ lemma decodeCNodeInvocation_ccorres: apply (wp sts_valid_pspace_hangers)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_split_throws) apply (rule ccorres_return_C_errorE, simp+)[1] apply vcg @@ -1060,8 +1055,7 @@ lemma decodeCNodeInvocation_ccorres: in ccorres_gen_asm2) apply (simp del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: unlessE_def whenE_def injection_handler_throwError - dc_def[symmetric] from_bool_0) + apply (simp add: unlessE_def whenE_def injection_handler_throwError from_bool_0) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: unlessE_def whenE_def injection_handler_returnOk @@ -1105,12 +1099,10 @@ lemma decodeCNodeInvocation_ccorres: apply (simp add: throwError_def return_def exception_defs syscall_error_rel_def syscall_error_to_H_cases) apply clarsimp - apply (simp add: invocationCatch_use_injection_handler - [symmetric, unfolded o_def] + apply (simp add: invocationCatch_use_injection_handler[symmetric] del: Collect_const) apply csymbr apply (simp add: interpret_excaps_test_null excaps_map_def - if_1_0_0 dc_def[symmetric] del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: throwError_bind invocationCatch_def) @@ -1170,8 +1162,7 @@ lemma decodeCNodeInvocation_ccorres: del: Collect_const) apply csymbr apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1233,8 +1224,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def[where P=False] injection_handler_returnOk @@ -1242,8 +1232,7 @@ lemma decodeCNodeInvocation_ccorres: apply csymbr apply (simp add: cap_get_tag_NullCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def injection_handler_throwError - dc_def[symmetric]) + apply (simp add: whenE_def injection_handler_throwError) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) apply (simp add: whenE_def injection_handler_returnOk @@ -1257,7 +1246,7 @@ lemma decodeCNodeInvocation_ccorres: apply (rule ccorres_return_C_errorE, simp+)[1] apply wp apply (vcg exspec=invokeCNodeRotate_modifies) - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (simp add: Collect_const_mem) apply (vcg exspec=setThreadState_modifies) apply (simp add: Collect_const_mem) @@ -1321,16 +1310,16 @@ lemma decodeCNodeInvocation_ccorres: apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (vcg exspec=getSyscallArg_modifies) apply wp @@ -1345,7 +1334,7 @@ lemma decodeCNodeInvocation_ccorres: apply vcg apply simp apply (wp injection_wp_E[OF refl] hoare_vcg_const_imp_lift_R - hoare_vcg_all_lift_R lsfco_cte_at' static_imp_wp + hoare_vcg_all_lift_R lsfco_cte_at' hoare_weak_lift_imp | simp add: hasCancelSendRights_not_Null ctes_of_valid_strengthen cong: conj_cong | wp (once) hoare_drop_imps)+ @@ -1464,7 +1453,7 @@ lemma seL4_MessageInfo_lift_def2: lemma globals_update_id: "globals_update (t_hrs_'_update (hrs_htd_update id)) x = x" - by (simp add:id_def hrs_htd_update_def) + by (simp add: hrs_htd_update_def) lemma getObjectSize_spec: "\s. \\\s. \t \ of_nat (length (enum::object_type list) - 1)\ Call getObjectSize_'proc @@ -1521,7 +1510,7 @@ shows "\ctes_of (s::kernel_state) (ptr_val p) = Some cte; is_aligned ptr bits; bits < word_bits; {ptr..ptr + 2 ^ bits - 1} \ {ptr_val p..ptr_val p + mask cteSizeBits} = {}; ((clift hp) :: (cte_C ptr \ cte_C)) p = Some to\ \ (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: (cte_C ptr \ cte_C)) p = Some to" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def restrict_map_def split:if_splits) + apply (clarsimp simp:lift_t_def lift_typ_heap_def restrict_map_def split:if_splits) apply (intro conjI impI) apply (case_tac hp) apply (clarsimp simp:typ_clear_region_def hrs_htd_update_def) @@ -1850,8 +1839,7 @@ lemma resetUntypedCap_ccorres: apply (rule ccorres_Guard_Seq[where S=UNIV])? apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow) - apply (rule_tac idx="capFreeIndex (cteCap cte)" - in deleteObjects_ccorres[where p=slot, unfolded o_def]) + apply (rule_tac idx="capFreeIndex (cteCap cte)" in deleteObjects_ccorres[where p=slot]) apply ceqv apply clarsimp apply (simp only: ccorres_seq_cond_raise) @@ -2825,7 +2813,6 @@ lemma Arch_isFrameType_spec: apply (auto simp: object_type_from_H_def ) done - lemma decodeUntypedInvocation_ccorres_helper: notes TripleSuc[simp] notes valid_untyped_inv_wcap'.simps[simp del] tl_drop_1[simp] @@ -3005,8 +2992,8 @@ lemma decodeUntypedInvocation_ccorres_helper: [OF lookupTargetSlot_ccorres, unfolded lookupTargetSlot_def]) apply (simp add: injection_liftE[OF refl]) - apply (simp add: liftE_liftM o_def split_def withoutFailure_def - hd_drop_conv_nth2 numeral_eqs[symmetric]) + apply (simp add: liftE_liftM split_def hd_drop_conv_nth2 + cong: ccorres_all_cong) apply (rule ccorres_nohs) apply (rule ccorres_getSlotCap_cte_at) apply (rule ccorres_move_c_guard_cte) @@ -3229,8 +3216,7 @@ lemma decodeUntypedInvocation_ccorres_helper: performInvocation_def liftE_bindE bind_assoc) apply (ctac add: setThreadState_ccorres) apply (rule ccorres_trim_returnE, (simp (no_asm))+) - apply (simp (no_asm) add: o_def dc_def[symmetric] bindE_assoc - id_def[symmetric] bind_bindE_assoc) + apply (simp (no_asm) add: bindE_assoc bind_bindE_assoc) apply (rule ccorres_seq_skip'[THEN iffD1]) apply (ctac(no_vcg) add: invokeUntyped_Retype_ccorres[where start = "args!4"]) apply (rule ccorres_alternative2) @@ -3279,7 +3265,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply vcg apply (rule ccorres_guard_imp [where Q =\ and Q' = UNIV,rotated],assumption+) - apply (simp add: o_def) + apply simp apply (simp add: liftE_validE) apply (rule checkFreeIndex_wp) apply (clarsimp simp: ccap_relation_untyped_CL_simps shiftL_nat cap_get_tag_isCap @@ -3346,7 +3332,7 @@ lemma decodeUntypedInvocation_ccorres_helper: apply (rule validE_R_validE) apply (wp injection_wp_E[OF refl]) apply clarsimp - apply (simp add: ccHoarePost_def xfdc_def) + apply (simp add: ccHoarePost_def) apply (simp only: whileAnno_def[where I=UNIV and V=UNIV, symmetric]) apply (rule_tac V=UNIV in HoarePartial.reannotateWhileNoGuard) @@ -3476,7 +3462,7 @@ shows apply (rule ccorres_guard_imp2) apply (rule monadic_rewrite_ccorres_assemble) apply (rule_tac isBlocking=isBlocking and isCall=isCall and buffer=buffer - in decodeUntypedInvocation_ccorres_helper[unfolded K_def]) + in decodeUntypedInvocation_ccorres_helper) apply assumption apply (rule monadic_rewrite_trans[rotated]) apply (rule monadic_rewrite_bind_head) diff --git a/proof/crefine/X64/IpcCancel_C.thy b/proof/crefine/X64/IpcCancel_C.thy index 0b49ce0650..cca60bcadd 100644 --- a/proof/crefine/X64/IpcCancel_C.thy +++ b/proof/crefine/X64/IpcCancel_C.thy @@ -220,7 +220,7 @@ lemma cancelSignal_ccorres_helper: apply (drule (2) ntfn_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) - apply (frule null_ep_queue [simplified Fun.comp_def]) + apply (frule null_ep_queue [simplified comp_def]) apply (intro impI conjI allI) \ \empty case\ apply clarsimp @@ -1054,7 +1054,7 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) + apply simp apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1176,7 +1176,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1413,7 +1413,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1689,7 +1688,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -1821,7 +1820,6 @@ proof - apply (rule ccorres_split_nothrow_novcg_dc) prefer 2 apply (rule ccorres_move_c_guard_tcb) - apply (simp only: dc_def[symmetric]) apply ctac prefer 2 apply (wp, clarsimp, wp+) @@ -1927,7 +1925,7 @@ proof - apply simp apply (wp threadGet_wp) apply vcg - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply simp apply (wp threadGet_wp) apply vcg @@ -2320,11 +2318,6 @@ lemma getCurDomain_maxDom_ccorres_dom_': rf_sr_ksCurDomain) done -lemma rf_sr_cscheduler_action_relation: - "(s, s') \ rf_sr - \ cscheduler_action_relation (ksSchedulerAction s) (ksSchedulerAction_' (globals s'))" - by (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - lemma threadGet_get_obj_at'_has_domain: "\ tcb_at' t \ threadGet tcbDomain t \\rv. obj_at' (\tcb. rv = tcbDomain tcb) t\" by (wp threadGet_obj_at') (simp add: obj_at'_def) @@ -2341,7 +2334,6 @@ lemma possibleSwitchTo_ccorres: (Call possibleSwitchTo_'proc)" supply if_split [split del] supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) supply from_bool_eq_if[simp] from_bool_eq_if'[simp] from_bool_0[simp] @@ -2366,7 +2358,7 @@ lemma possibleSwitchTo_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule_tac R="\s. sact = ksSchedulerAction s \ weak_sch_act_wf (ksSchedulerAction s) s" in ccorres_cond) - apply (fastforce dest!: rf_sr_cscheduler_action_relation pred_tcb_at' tcb_at_not_NULL + apply (fastforce dest!: rf_sr_sched_action_relation pred_tcb_at' tcb_at_not_NULL simp: cscheduler_action_relation_def weak_sch_act_wf_def split: scheduler_action.splits) apply (ctac add: rescheduleRequired_ccorres) @@ -2690,7 +2682,7 @@ lemma cancelSignal_ccorres [corres]: | drule_tac x=thread in bspec)+ (* FIXME: MOVE *) -lemma ccorres_pre_getEndpoint [corres_pre]: +lemma ccorres_pre_getEndpoint [ccorres_pre]: assumes cc: "\rv. ccorres r xf (P rv) (P' rv) hs (f rv) c" shows "ccorres r xf (ep_at' p and (\s. \ep. ko_at' ep p s \ P ep s)) @@ -2832,8 +2824,8 @@ lemma cpspace_relation_ep_update_an_ep: and pal: "pspace_aligned' s" "pspace_distinct' s" and others: "\epptr' ep'. \ ko_at' ep' epptr' s; epptr' \ epptr; ep' \ IdleEP \ \ set (epQueue ep') \ (ctcb_ptr_to_tcb_ptr ` S) = {}" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using cp koat pal rel unfolding cmap_relation_def apply - apply (clarsimp elim!: obj_atE' simp: map_comp_update projectKO_opts_defs) @@ -2855,8 +2847,8 @@ lemma cpspace_relation_ep_update_ep: and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" using invs apply (intro cpspace_relation_ep_update_an_ep[OF koat cp rel mpeq]) apply clarsimp+ @@ -2868,15 +2860,15 @@ lemma cpspace_relation_ep_update_ep': fixes ep :: "endpoint" and ep' :: "endpoint" and epptr :: "machine_word" and s :: "kernel_state" defines "qs \ if (isSendEP ep' \ isRecvEP ep') then set (epQueue ep') else {}" - defines "s' \ s\ksPSpace := ksPSpace s(epptr \ KOEndpoint ep')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(epptr \ KOEndpoint ep')\" assumes koat: "ko_at' ep epptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_eps (ksPSpace s)) (cslift t) Ptr (cendpoint_relation mp)" and srs: "sym_refs (state_refs_of' s')" and rel: "cendpoint_relation mp' ep' endpoint" and mpeq: "(mp' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (mp |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" + shows "cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(Ptr epptr \ endpoint)) Ptr (cendpoint_relation mp')" proof - from koat have koat': "ko_at' ep' epptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -2955,7 +2947,7 @@ lemma cancelIPC_ccorres_helper: apply (rule allI) apply (rule conseqPre) apply vcg - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ep_blocked_in_queueD) apply (frule (1) ko_at_valid_ep' [OF _ invs_valid_objs']) apply (elim conjE) @@ -2973,7 +2965,7 @@ lemma cancelIPC_ccorres_helper: apply assumption+ apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) - apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split simp del: comp_def) + apply (clarsimp simp: typ_heap_simps cong: imp_cong split del: if_split) apply (frule null_ep_queue [simplified comp_def] null_ep_queue) apply (intro impI conjI allI) \ \empty case\ @@ -3129,7 +3121,6 @@ lemma cancelIPC_ccorres1: apply wpc \ \BlockedOnReceive\ apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs cong: call_ignore_cong) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3158,7 +3149,6 @@ lemma cancelIPC_ccorres1: apply (simp add: "StrictC'_thread_state_defs" ccorres_cond_iffs Collect_False Collect_True word_sle_def cong: call_ignore_cong del: Collect_const) - apply (fold dc_def) apply (rule ccorres_rhs_assoc)+ apply csymbr apply csymbr @@ -3198,14 +3188,12 @@ lemma cancelIPC_ccorres1: apply (rule ccorres_Cond_rhs) apply (simp add: nullPointer_def when_def) apply (rule ccorres_symb_exec_l[OF _ _ _ empty_fail_stateAssert]) - apply (simp only: dc_def[symmetric]) apply (rule ccorres_symb_exec_r) apply (ctac add: cteDeleteOne_ccorres[where w1="scast cap_reply_cap"]) apply vcg apply (rule conseqPre, vcg, clarsimp simp: rf_sr_def gs_set_assn_Delete_cstate_relation[unfolded o_def]) apply (wp | simp)+ - apply (simp add: when_def nullPointer_def dc_def[symmetric]) apply (rule ccorres_return_Skip) apply (simp add: guard_is_UNIV_def ghost_assertion_data_get_def ghost_assertion_data_set_def cap_tag_defs) @@ -3218,7 +3206,8 @@ lemma cancelIPC_ccorres1: apply (clarsimp simp add: guard_is_UNIV_def tcbReplySlot_def Kernel_C.tcbReply_def tcbCNodeEntries_def) \ \BlockedOnNotification\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg)) apply clarsimp @@ -3227,10 +3216,12 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Running, Inactive, and Idle\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip)+ \ \BlockedOnSend\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong) + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong) \ \clag\ apply (rule ccorres_rhs_assoc)+ apply csymbr @@ -3256,7 +3247,8 @@ lemma cancelIPC_ccorres1: apply (rule conseqPre, vcg) apply clarsimp \ \Restart\ - apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs dc_def [symmetric] cong: call_ignore_cong, + apply (simp add: word_sle_def "StrictC'_thread_state_defs" ccorres_cond_iffs + cong: call_ignore_cong, rule ccorres_return_Skip) \ \Post wp proofs\ apply vcg diff --git a/proof/crefine/X64/Ipc_C.thy b/proof/crefine/X64/Ipc_C.thy index 0a69ed15c3..2adbeff243 100644 --- a/proof/crefine/X64/Ipc_C.thy +++ b/proof/crefine/X64/Ipc_C.thy @@ -724,7 +724,7 @@ begin (* FIXME: move *) lemma ccorres_merge_return: - "ccorres (\a c. r (f a) c) xf P P' hs H C \ + "ccorres (r \ f) xf P P' hs H C \ ccorres r xf P P' hs (do x \ H; return (f x) od) C" by (rule ccorres_return_into_rel) @@ -1597,53 +1597,54 @@ proof - apply ceqv apply (rule ccorres_Cond_rhs) apply (simp del: Collect_const) - apply (rule ccorres_rel_imp[where r = "\rv rv'. True", simplified]) - apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) X64_H.syscallMessage = msg) - sender and valid_pspace' - and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" - in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) - apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters - option_to_0_def liftM_def[symmetric] - split: option.split_asm) - apply (rule ccorres_guard_imp2) - apply (rule_tac t=sender and r="X64_H.syscallMessage ! (n + unat n_msgRegisters)" - in ccorres_add_getRegister) - apply (ctac(no_vcg)) - apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" - in ccorres_cross_over_guard) - apply (rule ccorres_move_array_assertion_ipc_buffer - | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ - apply (simp add: storeWordUser_def) - apply (rule ccorres_pre_stateAssert) - apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) - apply (simp add: pred_conj_def) - apply (wp user_getreg_rv) - apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def - syscallMessage_ccorres msgRegisters_ccorres - unat_add_lem[THEN iffD1] unat_of_nat64 - word_bits_def word_size_def) - apply (simp only:field_simps imp_ex imp_conjL) - apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def - pointerInUserData_h_t_valid - atcbContextGet_def - projectKOs objBits_simps word_less_nat_alt - unat_add_lem[THEN iffD1] unat_of_nat) - apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def - MessageID_Syscall_def - msg_align_bits valid_ipc_buffer_ptr'_def) - apply (erule aligned_add_aligned) - apply (rule aligned_add_aligned[where n=3]) - apply (simp add: is_aligned_def) - apply (rule is_aligned_mult_triv2 [where n=3, simplified]) - apply (simp add: wb_gt_2)+ - apply (simp add: n_msgRegisters_def) - apply (vcg exspec=getRegister_modifies) - apply simp - apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) - apply (rule hoare_pre) - apply (wp hoare_case_option_wp | wpc)+ - apply clarsimp - apply (simp add: n_msgRegisters_def word_bits_def) + apply (rule ccorres_rel_imp) + apply (rule_tac F="\_. obj_at' (\tcb. map ((user_regs o atcbContext o tcbArch) tcb) X64_H.syscallMessage = msg) + sender and valid_pspace' + and (case recvBuffer of Some x \ valid_ipc_buffer_ptr' x | None \ \)" + in ccorres_mapM_x_while'[where i="unat n_msgRegisters"]) + apply (clarsimp simp: setMR_def n_msgRegisters_def length_msgRegisters + option_to_0_def liftM_def[symmetric] + split: option.split_asm) + apply (rule ccorres_guard_imp2) + apply (rule_tac t=sender and r="X64_H.syscallMessage ! (n + unat n_msgRegisters)" + in ccorres_add_getRegister) + apply (ctac(no_vcg)) + apply (rule_tac P="\s. rv = msg ! (n + unat n_msgRegisters)" + in ccorres_cross_over_guard) + apply (rule ccorres_move_array_assertion_ipc_buffer + | (rule ccorres_flip_Guard, rule ccorres_move_array_assertion_ipc_buffer))+ + apply (simp add: storeWordUser_def) + apply (rule ccorres_pre_stateAssert) + apply (ctac add: storeWord_ccorres[unfolded fun_app_def]) + apply (simp add: pred_conj_def) + apply (wp user_getreg_rv) + apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def + syscallMessage_ccorres msgRegisters_ccorres + unat_add_lem[THEN iffD1] unat_of_nat64 + word_bits_def word_size_def) + apply (simp only:field_simps imp_ex imp_conjL) + apply (clarsimp simp: pointerInUserData_c_guard obj_at'_def + pointerInUserData_h_t_valid + atcbContextGet_def + projectKOs objBits_simps word_less_nat_alt + unat_add_lem[THEN iffD1] unat_of_nat) + apply (clarsimp simp: pointerInUserData_h_t_valid rf_sr_def + MessageID_Syscall_def + msg_align_bits valid_ipc_buffer_ptr'_def) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n=3]) + apply (simp add: is_aligned_def) + apply (rule is_aligned_mult_triv2 [where n=3, simplified]) + apply (simp add: wb_gt_2)+ + apply (simp add: n_msgRegisters_def) + apply (vcg exspec=getRegister_modifies) + apply simp + apply (simp add: setMR_def n_msgRegisters_def length_msgRegisters) + apply (rule hoare_pre) + apply (wp hoare_case_option_wp | wpc)+ + apply clarsimp + apply (simp add: n_msgRegisters_def word_bits_def) + apply simp apply (simp add: n_msgRegisters_def) apply (frule (1) option_to_0_imp) apply (subst drop_zip) @@ -1651,7 +1652,7 @@ proof - apply (clarsimp simp: n_msgRegisters_def numeral_eqs mapM_cong[OF msg_aux, simplified numeral_eqs]) apply (subst mapM_x_return_gen[where w2="()"]) - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp) apply (rule hoare_impI) apply (wp mapM_x_wp_inv setMR_atcbContext_obj_at[simplified atcbContextGet_def, simplified] @@ -1741,7 +1742,7 @@ proof - split: list.split_asm) apply (simp split: list.split) apply (wp setMR_tcbFault_obj_at asUser_inv[OF getRestartPC_inv] - hoare_case_option_wp static_imp_wp + hoare_case_option_wp hoare_weak_lift_imp | simp add: option_to_ptr_def guard_is_UNIVI seL4_VMFault_PrefetchFault_def seL4_VMFault_Addr_def @@ -2043,7 +2044,7 @@ lemma doFaultTransfer_ccorres [corres]: apply ceqv apply csymbr apply (ctac (no_vcg, c_lines 2) add: setMessageInfo_ccorres) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply wp apply (simp add: badgeRegister_def X64.badgeRegister_def X64.capRegister_def Kernel_C.badgeRegister_def "StrictC'_register_defs") @@ -2081,7 +2082,7 @@ lemma unifyFailure_ccorres: assumes corr_ac: "ccorres (f \ r) xf P P' hs a c" shows "ccorres ((\_. dc) \ r) xf P P' hs (unifyFailure a) c" using corr_ac - apply (simp add: unifyFailure_def rethrowFailure_def const_def o_def + apply (simp add: unifyFailure_def rethrowFailure_def const_def handleE'_def throwError_def) apply (clarsimp simp: ccorres_underlying_def bind_def split_def return_def split: xstate.splits sum.splits) @@ -3123,10 +3124,11 @@ lemma ccorres_sequenceE_while': Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp @@ -3180,9 +3182,10 @@ proof - apply csymbr apply csymbr apply (rename_tac "lngth") - apply (simp add: mi_from_H_def mapME_def del: Collect_const cong: bind_apply_cong) + apply (unfold mapME_def)[1] + apply (simp add: mi_from_H_def del: Collect_const) apply (rule ccorres_symb_exec_l) - apply (rule_tac P="length rv = unat word2" in ccorres_gen_asm) + apply (rule_tac P="length xs = unat word2" in ccorres_gen_asm) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_add_returnOk2, rule ccorres_splitE_novcg) @@ -3191,7 +3194,7 @@ proof - and Q="UNIV" and F="\n s. valid_pspace' s \ tcb_at' thread s \ (case buffer of Some x \ valid_ipc_buffer_ptr' x | _ \ \) s \ - (\m < length rv. user_word_at (rv ! m) + (\m < length xs. user_word_at (xs ! m) (x2 + (of_nat m + (msgMaxLength + 2)) * 8) s)" in ccorres_sequenceE_while') apply (simp add: split_def) @@ -3201,7 +3204,7 @@ proof - apply (rule_tac xf'=cptr_' in ccorres_abstract, ceqv) apply (ctac add: capFaultOnFailure_ccorres [OF lookupSlotForThread_ccorres']) - apply (rule_tac P="is_aligned rva 5" in ccorres_gen_asm) + apply (rule_tac P="is_aligned rv 5" in ccorres_gen_asm) apply (simp add: ccorres_cond_iffs liftE_bindE) apply (rule ccorres_symb_exec_l [OF _ _ _ empty_fail_getSlotCap]) apply (rule_tac P'="UNIV \ {s. excaps_map ys @@ -3222,7 +3225,7 @@ proof - apply (clarsimp simp: ccorres_cond_iffs) apply (rule_tac P= \ and P'="{x. errstate x= lu_ret___struct_lookupSlot_raw_ret_C \ - rv' = (rv ! length ys)}" + rv' = (xs ! length ys)}" in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: throwError_def return_def) @@ -3263,9 +3266,8 @@ proof - apply ceqv apply (simp del: Collect_const) apply (rule_tac P'="{s. snd rv'=?curr s}" - and P="\s. length rva = length rv - \ (\x \ set rva. snd x \ 0)" - in ccorres_from_vcg_throws) + and P="\s. length rv = length xs \ (\x \ set rv. snd x \ 0)" + in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: returnOk_def return_def seL4_MsgExtraCapBits_def) @@ -3359,7 +3361,7 @@ proof - apply (cinit lift: sender_' receiver_' sendBuffer_' receiveBuffer_' canGrant_' badge_' endpoint_' cong: call_ignore_cong) - apply (clarsimp cong: call_ignore_cong simp del: dc_simp) + apply (clarsimp cong: call_ignore_cong) apply (ctac(c_lines 2, no_vcg) add: getMessageInfo_ccorres') apply (rule_tac xf'="\s. current_extra_caps_' (globals s)" and r'="\c c'. interpret_excaps c' = excaps_map c" @@ -3405,7 +3407,7 @@ proof - apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: seL4_MessageInfo_lift_def message_info_to_H_def mask_def msgLengthBits_def word_bw_assocs) - apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] static_imp_wp + apply (wp getMessageInfo_le3 getMessageInfo_msgLength[unfolded K_def] hoare_weak_lift_imp | simp)+ apply (auto simp: excaps_in_mem_def valid_ipc_buffer_ptr'_def option_to_0_def option_to_ptr_def @@ -3468,7 +3470,6 @@ lemma replyFromKernel_error_ccorres [corres]: apply ((rule ccorres_Guard_Seq)+)? apply csymbr apply (rule ccorres_abstract_cleanup) - apply (fold dc_def)[1] apply (rule setMessageInfo_ccorres) apply wp apply (simp add: Collect_const_mem) @@ -3537,12 +3538,10 @@ lemma doIPCTransfer_ccorres [corres]: apply simp_all[3] apply ceqv apply csymbr - apply (fold dc_def)[1] apply ctac apply (wp lookupIPCBuffer_not_Some_0 lookupIPCBuffer_aligned) apply (clarsimp simp: seL4_Fault_NullFault_def ccorres_cond_iffs fault_to_fault_tag_nonzero) - apply (fold dc_def)[1] apply ctac apply (clarsimp simp: guard_is_UNIV_def option_to_ptr_def split: option.splits) apply (rule_tac Q="\rv. valid_pspace' and cur_tcb' and tcb_at' sender @@ -3604,7 +3603,7 @@ proof - apply (rule ccorres_rhs_assoc2) apply (simp add: MessageID_Exception_def) apply ccorres_rewrite - apply (subst bind_return_unit) + apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_zipWithM_x_while) apply clarsimp @@ -3657,7 +3656,7 @@ proof - n_msgRegisters_def of_nat_less_iff) apply ccorres_rewrite - apply (rule ccorres_return_Skip[simplified dc_def]) + apply (rule ccorres_return_Skip) apply (wp mapM_wp') apply clarsimp+ apply (clarsimp simp: guard_is_UNIV_def message_info_to_H_def @@ -3813,7 +3812,6 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (subst aligned_add_aligned, assumption) apply (rule is_aligned_mult_triv2[where n=3, simplified]) apply (simp add: msg_align_bits) - apply (simp add: of_nat_unat[simplified comp_def]) apply (simp only: n_msgRegisters_def) apply (clarsimp simp: n_syscallMessage_def n_msgRegisters_def word_unat.Rep_inverse[of "scast _ :: 'a word"] @@ -3852,8 +3850,8 @@ lemma copyMRsFaultReply_ccorres_syscall: apply simp apply (subst option.split[symmetric,where P=id, simplified]) apply (rule valid_drop_case) - apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified K_def] - lookupIPCBuffer_not_Some_0[simplified K_def]) + apply (wp hoare_drop_imps hoare_vcg_all_lift lookupIPCBuffer_aligned[simplified] + lookupIPCBuffer_not_Some_0[simplified]) apply (simp add: length_syscallMessage length_msgRegisters n_syscallMessage_def @@ -3865,7 +3863,7 @@ lemma copyMRsFaultReply_ccorres_syscall: apply (rule ccorres_guard_imp) apply (rule ccorres_symb_exec_l) apply (case_tac rva ; clarsimp) - apply (rule ccorres_return_Skip[simplified dc_def])+ + apply (rule ccorres_return_Skip)+ apply (wp mapM_x_wp_inv user_getreg_inv' | clarsimp simp: zipWithM_x_mapM_x split: prod.split)+ apply (cases "4 < len") @@ -3955,7 +3953,7 @@ lemma handleFaultReply_ccorres [corres]: apply (unfold K_def, rule ccorres_gen_asm) apply (rule monadic_rewrite_ccorres_assemble_nodrop[OF _ handleFaultReply',rotated], simp) apply (cinit lift: sender_' receiver_' simp: whileAnno_def) - apply (clarsimp simp del: dc_simp) + apply clarsimp apply (ctac(c_lines 2) add: getMessageInfo_ccorres') apply (rename_tac tag tag') apply csymbr @@ -4001,7 +3999,7 @@ lemma handleFaultReply_ccorres [corres]: split del: if_split) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) apply (subst take_min_len[symmetric,where n="unat (msgLength _)"]) - apply (fold bind_assoc id_def) + apply (fold bind_assoc) apply (ctac add: copyMRsFaultReply_ccorres_syscall[simplified bind_assoc[symmetric]]) apply (ctac add: ccorres_return_C) apply wp @@ -4094,7 +4092,7 @@ lemma cteDeleteOne_tcbFault: apply (wp emptySlot_tcbFault cancelAllIPC_tcbFault getCTE_wp' cancelAllSignals_tcbFault unbindNotification_tcbFault isFinalCapability_inv unbindMaybeNotification_tcbFault - static_imp_wp + hoare_weak_lift_imp | wpc | simp add: Let_def)+ apply (clarsimp split: if_split) done @@ -4217,7 +4215,6 @@ proof - apply csymbr apply wpc apply (clarsimp simp: ccorres_cond_iffs split del: if_split) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac(no_vcg)) apply (rule ccorres_symb_exec_r) @@ -4241,7 +4238,6 @@ proof - fault_to_fault_tag_nonzero split del: if_split) apply (rule ccorres_rhs_assoc)+ - apply (fold dc_def)[1] apply (rule ccorres_symb_exec_r) apply (ctac (no_vcg) add: cteDeleteOne_ccorres[where w="scast cap_reply_cap"]) apply (rule_tac A'=UNIV in stronger_ccorres_guard_imp) @@ -4271,10 +4267,9 @@ proof - apply (simp only: K_bind_def) apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues setThreadState_st_tcb | simp)+ - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres_valid_queues'_simple) apply wp - apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' static_imp_wp + apply ((wp threadSet_valid_queues threadSet_sch_act threadSet_valid_queues' hoare_weak_lift_imp threadSet_valid_objs' threadSet_weak_sch_act_wf | simp add: valid_tcb_state'_def)+)[1] apply (clarsimp simp: guard_is_UNIV_def ThreadState_Restart_def @@ -4343,8 +4338,7 @@ lemma setupCallerCap_ccorres [corres]: apply (frule_tac p=sender in is_aligned_tcb_ptr_to_ctcb_ptr) apply (cinit lift: sender_' receiver_' canGrant_') apply (clarsimp simp: word_sle_def - tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]] - , fold dc_def)[1] + tcb_cnode_index_defs[THEN ptr_add_assertion_positive[OF ptr_add_assertion_positive_helper]]) apply ccorres_remove_UNIV_guard apply (ctac(no_vcg)) apply (rule ccorres_move_array_assertion_tcb_ctes) @@ -4365,7 +4359,7 @@ lemma setupCallerCap_ccorres [corres]: apply (rule ccorres_move_c_guard_cte) apply (ctac(no_vcg)) apply (rule ccorres_assert) - apply (simp only: ccorres_seq_skip dc_def[symmetric]) + apply (simp only: ccorres_seq_skip) apply csymbr apply (ctac add: cteInsert_ccorres) apply simp @@ -4420,7 +4414,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -4441,7 +4435,7 @@ lemma sendIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -4810,7 +4804,7 @@ lemma sendIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -4826,12 +4820,12 @@ lemma sendIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (SendEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (SendEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (SendEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5029,12 +5023,9 @@ lemma sendIPC_ccorres [corres]: apply (clarsimp simp: disj_imp[symmetric] split del: if_split) apply (wpc ; clarsimp) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setupCallerCap_ccorres) apply ccorres_rewrite - apply (fold dc_def)[1] apply (ctac add: setThreadState_ccorres) - apply (fold dc_def)[1] apply (rule ccorres_return_Skip) apply (wpsimp wp: hoare_drop_imps hoare_vcg_all_lift possibleSwitchTo_sch_act_not possibleSwitchTo_sch_act_not sts_st_tcb' @@ -5234,7 +5225,7 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ep) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -5250,12 +5241,12 @@ lemma receiveIPC_enqueue_ccorres_helper: apply (simp add: cendpoint_relation_def Let_def) apply (case_tac ep, simp_all add: init_def valid_ep'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\))") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (RecvEP queue) epptr (\\ksPSpace := - ksPSpace \(epptr \ KOEndpoint (RecvEP queue))\)") + (ksPSpace \)(epptr \ KOEndpoint (RecvEP queue))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -5382,7 +5373,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule ep_blocked_in_queueD [OF pred_tcb'_weakenE]) apply simp apply assumption+ @@ -5403,7 +5394,7 @@ lemma receiveIPC_dequeue_ccorres_helper: apply (drule (2) ep_to_ep_queue) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cendpoint_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -5558,7 +5549,7 @@ lemma completeSignal_ccorres: apply (erule(1) cmap_relation_ko_atE[OF cmap_relation_ntfn]) apply (clarsimp simp: cnotification_relation_def Let_def typ_heap_simps) apply ceqv - apply (fold dc_def, ctac(no_vcg)) + apply (ctac(no_vcg)) apply (rule_tac P="invs' and ko_at' ntfn ntfnptr" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp) @@ -5674,7 +5665,7 @@ lemma receiveIPC_ccorres [corres]: apply ceqv apply (rule ccorres_cond[where R=\]) apply (simp add: Collect_const_mem) - apply (ctac add: completeSignal_ccorres[unfolded dc_def]) + apply (ctac add: completeSignal_ccorres) apply (rule_tac xf'=ret__unsigned_longlong_' and val="case ep of IdleEP \ scast EPState_Idle | RecvEP _ \ scast EPState_Recv @@ -5704,20 +5695,18 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp apply (rename_tac list NOo) - apply (rule_tac ep="RecvEP list" - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep="RecvEP list" in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (rename_tac list) apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \IdleEP case\ apply (rule ccorres_cond_true) apply csymbr @@ -5729,18 +5718,16 @@ lemma receiveIPC_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp split del: if_split) apply (rule receiveIPC_block_ccorres_helper[unfolded ptr_val_def, simplified]) apply ceqv apply simp - apply (rule_tac ep=IdleEP - in receiveIPC_enqueue_ccorres_helper[simplified, unfolded dc_def]) + apply (rule_tac ep=IdleEP in receiveIPC_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ep'_def) apply (wp sts_st_tcb') apply (clarsimp simp: obj_at'_def ko_wp_at'_def projectKOs) apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \SendEP case\ apply (thin_tac "isBlockinga = from_bool P" for P) apply (rule ccorres_cond_false) @@ -5818,8 +5805,6 @@ lemma receiveIPC_ccorres [corres]: split: Structures_H.thread_state.splits) apply ceqv - apply (fold dc_def) - supply dc_simp[simp del] apply (clarsimp simp: from_bool_0 disj_imp[symmetric] simp del: Collect_const) apply wpc (* blocking ipc call *) @@ -5899,12 +5884,12 @@ lemma receiveIPC_ccorres [corres]: apply (clarsimp simp:st_tcb_at_refs_of_rev') apply (erule_tac x=x and P="\x. st_tcb_at' P x s" for P in ballE) apply (drule_tac t=x in valid_queues_not_runnable'_not_ksQ) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (subgoal_tac "sch_act_not x s") prefer 2 apply (frule invs_sch_act_wf') apply (clarsimp simp:sch_act_wf_def) - apply (clarsimp simp: st_tcb_at'_def obj_at'_def o_def) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def) apply (clarsimp simp: obj_at'_def st_tcb_at'_def projectKOs isBlockedOnSend_def split: list.split | rule conjI)+ @@ -5932,11 +5917,10 @@ lemma sendSignal_dequeue_ccorres_helper: IF head_C \ntfn_queue = Ptr 0 THEN CALL notification_ptr_set_state(Ptr ntfn,scast NtfnState_Idle) FI)" - apply (rule ccorres_from_vcg) apply (rule allI) apply (rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule (2) ntfn_blocked_in_queueD) apply (frule (1) ko_at_valid_ntfn' [OF _ invs_valid_objs']) apply (elim conjE) @@ -5956,7 +5940,7 @@ lemma sendSignal_dequeue_ccorres_helper: apply (drule ntfn_to_ep_queue, (simp add: isWaitingNtfn_def)+) apply (simp add: tcb_queue_relation'_def) apply (clarsimp simp: typ_heap_simps cnotification_relation_def Let_def - cong: imp_cong split del: if_split simp del: comp_def) + cong: imp_cong split del: if_split) apply (intro conjI impI allI) apply (fastforce simp: h_t_valid_clift) apply (fastforce simp: h_t_valid_clift) @@ -6144,7 +6128,7 @@ lemma sendSignal_ccorres [corres]: apply wpc apply (simp add: option_to_ctcb_ptr_def split del: if_split) apply (rule ccorres_cond_false) - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (rule ccorres_cond_true) apply (rule getThreadState_ccorres_foo) apply (rule ccorres_Guard_Seq) @@ -6159,7 +6143,7 @@ lemma sendSignal_ccorres [corres]: apply (ctac(no_vcg) add: cancelIPC_ccorres1[OF cteDeleteOne_ccorres]) apply (ctac(no_vcg) add: setThreadState_ccorres) apply (ctac(no_vcg) add: setRegister_ccorres) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: possibleSwitchTo_ccorres) apply (wp sts_running_valid_queues sts_st_tcb_at'_cases | simp add: option_to_ctcb_ptr_def split del: if_split)+ apply (rule_tac Q="\_. tcb_at' (the (ntfnBoundTCB ntfn)) and invs'" @@ -6167,7 +6151,7 @@ lemma sendSignal_ccorres [corres]: apply auto[1] apply wp apply simp - apply (ctac add: ntfn_set_active_ccorres[unfolded dc_def]) + apply (ctac add: ntfn_set_active_ccorres) apply (clarsimp simp: guard_is_UNIV_def option_to_ctcb_ptr_def X64_H.badgeRegister_def Kernel_C.badgeRegister_def X64.badgeRegister_def X64.capRegister_def @@ -6226,7 +6210,7 @@ lemma sendSignal_ccorres [corres]: apply ceqv apply (simp only: K_bind_def) apply (ctac (no_vcg)) - apply (simp, fold dc_def) + apply simp apply (ctac (no_vcg)) apply (ctac add: possibleSwitchTo_ccorres) apply (simp) @@ -6307,16 +6291,17 @@ lemma cpspace_relation_ntfn_update_ntfn': fixes ntfn :: "Structures_H.notification" and ntfn' :: "Structures_H.notification" and ntfnptr :: "machine_word" and s :: "kernel_state" defines "qs \ if isWaitingNtfn (ntfnObj ntfn') then set (ntfnQueue (ntfnObj ntfn')) else {}" - defines "s' \ s\ksPSpace := ksPSpace s(ntfnptr \ KONotification ntfn')\" + defines "s' \ s\ksPSpace := (ksPSpace s)(ntfnptr \ KONotification ntfn')\" assumes koat: "ko_at' ntfn ntfnptr s" and vp: "valid_pspace' s" and cp: "cmap_relation (map_to_ntfns (ksPSpace s)) (cslift t) Ptr (cnotification_relation (cslift t))" and srs: "sym_refs (state_refs_of' s')" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr - (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) + Ptr + (cnotification_relation (cslift t'))" proof - from koat have koat': "ko_at' ntfn' ntfnptr s'" by (clarsimp simp: obj_at'_def s'_def objBitsKO_def ps_clear_def projectKOs) @@ -6376,7 +6361,7 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (rule ccorres_gen_asm) apply (rule ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp split del: if_split simp del: comp_def) + apply (clarsimp split del: if_split) apply (frule cmap_relation_ntfn) apply (erule (1) cmap_relation_ko_atE) apply (rule conjI) @@ -6392,12 +6377,12 @@ lemma receiveSignal_enqueue_ccorres_helper: apply (simp add: cnotification_relation_def Let_def) apply (case_tac "ntfnObj ntfn", simp_all add: init_def valid_ntfn'_def)[1] apply (subgoal_tac "sym_refs (state_refs_of' (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\))") prefer 2 apply (clarsimp simp: state_refs_of'_upd ko_wp_at'_def ntfnBound_state_refs_equivalence obj_at'_def projectKOs objBitsKO_def) apply (subgoal_tac "ko_at' (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)) ntfnptr (\\ksPSpace := - ksPSpace \(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") + (ksPSpace \)(ntfnptr \ KONotification (NTFN (WaitingNtfn queue) (ntfnBoundTCB ntfn)))\)") prefer 2 apply (clarsimp simp: obj_at'_def projectKOs objBitsKO_def ps_clear_upd) apply (intro conjI impI allI) @@ -6564,11 +6549,10 @@ lemma receiveSignal_ccorres [corres]: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow_novcg) - apply (simp) apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6579,7 +6563,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) \ \ActiveNtfn case\ apply (rename_tac badge) apply (rule ccorres_cond_false) @@ -6636,8 +6620,7 @@ lemma receiveSignal_ccorres [corres]: apply (rule receiveSignal_block_ccorres_helper[simplified]) apply ceqv apply (simp only: K_bind_def) - apply (rule_tac ntfn="ntfn" - in receiveSignal_enqueue_ccorres_helper[unfolded dc_def, simplified]) + apply (rule_tac ntfn="ntfn" in receiveSignal_enqueue_ccorres_helper[simplified]) apply (simp add: valid_ntfn'_def) apply (wp sts_st_tcb') apply (rule_tac Q="\rv. ko_wp_at' (\x. projectKO_opt x = Some ntfn @@ -6649,7 +6632,7 @@ lemma receiveSignal_ccorres [corres]: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply simp - apply (ctac add: doNBRecvFailedTransfer_ccorres[unfolded dc_def]) + apply (ctac add: doNBRecvFailedTransfer_ccorres) apply (clarsimp simp: guard_is_UNIV_def NtfnState_Active_def NtfnState_Waiting_def NtfnState_Idle_def) apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/X64/IsolatedThreadAction.thy b/proof/crefine/X64/IsolatedThreadAction.thy index 62ce61e6be..737a6b0999 100644 --- a/proof/crefine/X64/IsolatedThreadAction.thy +++ b/proof/crefine/X64/IsolatedThreadAction.thy @@ -461,7 +461,7 @@ lemma modify_isolatable: liftM_def bind_assoc) apply (clarsimp simp: monadic_rewrite_def exec_gets getSchedulerAction_def) - apply (simp add: simpler_modify_def o_def) + apply (simp add: simpler_modify_def) apply (subst swap) apply (simp add: obj_at_partial_overwrite_If) apply (simp add: ksPSpace_update_partial_id o_def) @@ -973,7 +973,7 @@ lemma setThreadState_no_sch_change: "\\s. P (ksSchedulerAction s) \ (runnable' st \ t \ ksCurThread s)\ setThreadState st t \\rv s. P (ksSchedulerAction s)\" - (is "NonDetMonadVCG.valid ?P ?f ?Q") + (is "Nondet_VCG.valid ?P ?f ?Q") apply (simp add: setThreadState_def setSchedulerAction_def) apply (wp hoare_pre_cont[where f=rescheduleRequired]) apply (rule_tac Q="\_. ?P and st_tcb_at' ((=) st) t" in hoare_post_imp) @@ -1159,8 +1159,7 @@ lemma setCTE_isolatable: apply (erule notE[rotated], erule (3) tcb_ctes_clear[rotated]) apply (simp add: select_f_returns select_f_asserts split: if_split) apply (intro conjI impI) - apply (clarsimp simp: simpler_modify_def fun_eq_iff - partial_overwrite_fun_upd2 o_def + apply (clarsimp simp: simpler_modify_def fun_eq_iff partial_overwrite_fun_upd2 intro!: kernel_state.fold_congs[OF refl refl]) apply (clarsimp simp: obj_at'_def projectKOs objBits_simps) apply (erule notE[rotated], rule tcb_ctes_clear[rotated 2], assumption+) diff --git a/proof/crefine/X64/PSpace_C.thy b/proof/crefine/X64/PSpace_C.thy index 14a69364ef..1c5b5ee0ba 100644 --- a/proof/crefine/X64/PSpace_C.thy +++ b/proof/crefine/X64/PSpace_C.thy @@ -47,7 +47,7 @@ lemma setObject_ccorres_helper: fixes ko :: "'a :: pspace_storable" assumes valid: "\\ (ko' :: 'a). \ \ {s. (\, s) \ rf_sr \ P \ \ s \ P' \ ko_at' ko' p \} - c {s. (\\ksPSpace := ksPSpace \ (p \ injectKO ko)\, s) \ rf_sr}" + c {s. (\\ksPSpace := (ksPSpace \)(p \ injectKO ko)\, s) \ rf_sr}" shows "\ \ko :: 'a. updateObject ko = updateObject_default ko; \ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko \ \ ccorres dc xfdc P P' hs (setObject p ko) c" diff --git a/proof/crefine/X64/Recycle_C.thy b/proof/crefine/X64/Recycle_C.thy index e658ab91a7..1ef25598af 100644 --- a/proof/crefine/X64/Recycle_C.thy +++ b/proof/crefine/X64/Recycle_C.thy @@ -454,7 +454,7 @@ lemma mapM_x_store_memset_ccorres_assist: "\ko :: 'a. (1 :: machine_word) < 2 ^ objBits ko" assumes restr: "set slots \ S" assumes worker: "\ptr s s' (ko :: 'a). \ (s, s') \ rf_sr; ko_at' ko ptr s; ptr \ S \ - \ (s \ ksPSpace := ksPSpace s (ptr \ injectKO val)\, + \ (s \ ksPSpace := (ksPSpace s)(ptr \ injectKO val)\, globals_update (t_hrs_'_update (hrs_mem_update (heap_update_list ptr (replicateHider (2 ^ objBits val) (ucast c))))) s') \ rf_sr" @@ -793,8 +793,8 @@ lemma cpspace_relation_ep_update_ep2: (cslift t) ep_Ptr (cendpoint_relation (cslift t)); cendpoint_relation (cslift t') ep' endpoint; (cslift t' :: tcb_C ptr \ tcb_C) = cslift t \ - \ cmap_relation (map_to_eps (ksPSpace s(epptr \ KOEndpoint ep'))) - (cslift t(ep_Ptr epptr \ endpoint)) + \ cmap_relation (map_to_eps ((ksPSpace s)(epptr \ KOEndpoint ep'))) + ((cslift t)(ep_Ptr epptr \ endpoint)) ep_Ptr (cendpoint_relation (cslift t'))" apply (rule cmap_relationE1, assumption, erule ko_at_projectKO_opt) apply (rule_tac P="\a. cmap_relation a b c d" for b c d in rsubst, @@ -903,8 +903,8 @@ lemma cancelBadgedSends_ccorres: cong: list.case_cong Structures_H.endpoint.case_cong call_ignore_cong del: Collect_const) apply (rule ccorres_pre_getEndpoint) - apply (rule_tac R="ko_at' rv ptr" and xf'="ret__unsigned_longlong_'" - and val="case rv of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle + apply (rule_tac R="ko_at' ep ptr" and xf'="ret__unsigned_longlong_'" + and val="case ep of RecvEP q \ scast EPState_Recv | IdleEP \ scast EPState_Idle | SendEP q \ scast EPState_Send" in ccorres_symb_exec_r_known_rv_UNIV[where R'=UNIV]) apply vcg @@ -914,22 +914,22 @@ lemma cancelBadgedSends_ccorres: split: Structures_H.endpoint.split_asm) apply ceqv apply wpc - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) - apply (simp add: dc_def[symmetric] ccorres_cond_iffs) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_return_Skip) apply (rename_tac list) apply (simp add: Collect_True Collect_False endpoint_state_defs - ccorres_cond_iffs dc_def[symmetric] + ccorres_cond_iffs del: Collect_const cong: call_ignore_cong) apply (rule ccorres_rhs_assoc)+ apply (csymbr, csymbr) - apply (drule_tac s = rv in sym, simp only:) - apply (rule_tac P="ko_at' rv ptr and invs'" in ccorres_cross_over_guard) + apply (drule_tac s = ep in sym, simp only:) + apply (rule_tac P="ko_at' ep ptr and invs'" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (rule ccorres_split_nothrow[where r'=dc and xf'=xfdc, OF _ ceqv_refl]) - apply (rule_tac P="ko_at' rv ptr" + apply (rule_tac P="ko_at' ep ptr" in ccorres_from_vcg[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply clarsimp @@ -1008,7 +1008,7 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: mask_def) subgoal by (auto split: if_split) subgoal by simp - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (rule hoare_pre, wp weak_sch_act_wf_lift_linear set_ep_valid_objs') apply (clarsimp simp: weak_sch_act_wf_def sch_act_wf_def) apply (fastforce simp: valid_ep'_def pred_tcb_at' split: list.splits) @@ -1018,7 +1018,7 @@ lemma cancelBadgedSends_ccorres: apply (rule iffD1 [OF ccorres_expand_while_iff_Seq]) apply (rule ccorres_init_tmp_lift2, ceqv) apply (rule ccorres_guard_imp2) - apply (simp add: bind_assoc dc_def[symmetric] + apply (simp add: bind_assoc del: Collect_const) apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ @@ -1043,9 +1043,9 @@ lemma cancelBadgedSends_ccorres: subgoal by (simp add: rf_sr_def) apply simp apply ceqv - apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rva" in ccorres_gen_asm2) + apply (rule_tac P="ret__unsigned_longlong=blockingIPCBadge rv" in ccorres_gen_asm2) apply (rule ccorres_if_bind, rule ccorres_if_lhs) - apply (simp add: bind_assoc dc_def[symmetric]) + apply (simp add: bind_assoc) apply (rule ccorres_rhs_assoc)+ apply (ctac add: setThreadState_ccorres) apply (ctac add: tcbSchedEnqueue_ccorres) @@ -1115,9 +1115,9 @@ lemma cancelBadgedSends_ccorres: apply (wp hoare_vcg_const_Ball_lift sts_st_tcb_at'_cases sts_sch_act sts_valid_queues setThreadState_oa_queued) apply (vcg exspec=setThreadState_cslift_spec) - apply (simp add: ccorres_cond_iffs dc_def[symmetric]) + apply (simp add: ccorres_cond_iffs) apply (rule ccorres_symb_exec_r2) - apply (drule_tac x="x @ [a]" in spec, simp add: dc_def[symmetric]) + apply (drule_tac x="x @ [a]" in spec, simp) apply vcg apply (vcg spec=modifies) apply (thin_tac "\x. P x" for P) diff --git a/proof/crefine/X64/Refine_C.thy b/proof/crefine/X64/Refine_C.thy index b8d8a8263d..d1464904b7 100644 --- a/proof/crefine/X64/Refine_C.thy +++ b/proof/crefine/X64/Refine_C.thy @@ -458,7 +458,7 @@ lemma ccorres_corres_u_xf: apply (drule (1) bspec) apply (clarsimp simp: exec_C_def no_fail_def) apply (drule_tac x = a in spec) - apply (clarsimp simp:gets_def NonDetMonad.bind_def get_def return_def) + apply (clarsimp simp:gets_def Nondet_Monad.bind_def get_def return_def) apply (rule conjI) apply clarsimp apply (erule_tac x=0 in allE) @@ -611,9 +611,9 @@ lemma callKernel_withFastpath_corres_C: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_symb_exec_r)+ apply (rule ccorres_Cond_rhs) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_call_ccorres_callKernel]) - apply (simp add: dc_def[symmetric]) + apply simp apply (ctac add: ccorres_get_registers[OF fastpath_reply_recv_ccorres_callKernel]) apply vcg apply (rule conseqPre, vcg, clarsimp) @@ -642,7 +642,7 @@ lemma threadSet_all_invs_triv': apply (simp add: tcb_cte_cases_def) apply (simp add: exst_same_def) apply (wp thread_set_invs_trivial thread_set_ct_running thread_set_not_state_valid_sched - threadSet_invs_trivial threadSet_ct_running' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp thread_set_ct_in_state | simp add: tcb_cap_cases_def tcb_arch_ref_def | rule threadSet_ct_in_state' @@ -694,13 +694,13 @@ lemma entry_corres_C: apply simp apply (rule corres_split) (* FIXME: fastpath - apply (rule corres_cases[where R=fp], simp_all add: dc_def[symmetric])[1] - apply (rule callKernel_withFastpath_corres_C, simp) + apply (rule corres_cases[where R=fp]; simp) + apply (rule callKernel_withFastpath_corres_C) *) - apply (rule callKernel_corres_C[unfolded dc_def], simp) + apply (rule callKernel_corres_C) apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t' = tcb_ptr_to_ctcb_ptr t"]) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def) - apply (rule getContext_corres[unfolded o_def], simp) + apply (rule getContext_corres, simp) apply (wp threadSet_all_invs_triv' callKernel_cur)+ apply (clarsimp simp: all_invs'_def invs'_def cur_tcb'_def valid_state'_def) apply simp @@ -802,7 +802,7 @@ lemma user_memory_update_corres_C: prefer 2 apply (clarsimp simp add: doMachineOp_def user_memory_update_def simpler_modify_def simpler_gets_def select_f_def - NonDetMonad.bind_def return_def) + Nondet_Monad.bind_def return_def) apply (thin_tac P for P)+ apply (case_tac a, clarsimp) apply (case_tac ksMachineState, clarsimp) @@ -829,7 +829,7 @@ lemma device_update_corres_C: apply (clarsimp simp add: setDeviceState_C_def simpler_modify_def) apply (rule ballI) apply (clarsimp simp: simpler_modify_def setDeviceState_C_def) - apply (clarsimp simp: doMachineOp_def device_memory_update_def NonDetMonad.bind_def in_monad + apply (clarsimp simp: doMachineOp_def device_memory_update_def Nondet_Monad.bind_def in_monad gets_def get_def return_def simpler_modify_def select_f_def) apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) @@ -897,7 +897,7 @@ lemma do_user_op_corres_C: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) apply (drule(1) device_mem_C_relation[symmetric]) - apply (simp add: comp_def) + apply simp apply (rule_tac P=valid_state' and P'=\ and r'="(=)" in corres_split) apply (clarsimp simp: cstate_relation_def rf_sr_def Let_def cmachine_state_relation_def) @@ -917,7 +917,7 @@ lemma do_user_op_corres_C: apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (intro conjI allI ballI impI) apply ((clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def)+)[5] apply (clarsimp simp: ex_abs_def restrict_map_def diff --git a/proof/crefine/X64/Retype_C.thy b/proof/crefine/X64/Retype_C.thy index 863aac468f..4f37ce630d 100644 --- a/proof/crefine/X64/Retype_C.thy +++ b/proof/crefine/X64/Retype_C.thy @@ -1097,7 +1097,7 @@ lemma ptr_add_to_new_cap_addrs: shows "(CTypesDefs.ptr_add (Ptr ptr :: 'a :: mem_type ptr) \ of_nat) ` {k. k < n} = Ptr ` set (new_cap_addrs n ptr ko)" unfolding new_cap_addrs_def - apply (simp add: comp_def image_image shiftl_t2n size_of_m field_simps) + apply (simp add: image_image shiftl_t2n size_of_m field_simps) apply (clarsimp simp: atLeastLessThan_def lessThan_def) done @@ -3676,8 +3676,7 @@ proof - apply (simp add: hrs_mem_def, subst rep0) apply (simp only: take_replicate, simp add: cte_C_size objBits_simps') apply (simp add: cte_C_size objBits_simps') - apply (simp add: fun_eq_iff o_def - split: if_split) + apply (simp add: fun_eq_iff split: if_split) apply (simp add: hrs_comm packed_heap_update_collapse typ_heap_simps) apply (subst clift_heap_update_same_td_name', simp_all, @@ -4370,7 +4369,7 @@ lemma mapM_x_storeWord_step: apply (subst if_not_P) apply (subst not_less) apply (erule is_aligned_no_overflow) - apply (simp add: mapM_x_map comp_def upto_enum_word del: upt.simps) + apply (simp add: mapM_x_map upto_enum_word del: upt.simps) apply (subst div_power_helper_64 [OF sz2, simplified]) apply assumption apply (simp add: word_bits_def unat_minus_one del: upt.simps) @@ -4800,7 +4799,7 @@ lemma copyGlobalMappings_ccorres: apply (cinit lift: new_vspace_' simp:) apply csymbr apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState, rename_tac skimPM) - apply (rule ccorres_rel_imp[where r=dc, OF _ dc_simp]) + apply (rule ccorres_rel_imp[where r=dc, simplified]) apply (clarsimp simp: whileAnno_def objBits_simps archObjSize_def getPML4Index_def bit_simps X64.pptrBase_def mask_def) apply csymbr @@ -4993,12 +4992,10 @@ lemma ccorres_placeNewObject_endpoint: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ep [where ptr=regionBase and n="1" and sz="objBitsKO (KOEndpoint makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -5031,12 +5028,10 @@ lemma ccorres_placeNewObject_notification: apply (clarsimp simp: new_cap_addrs_def) apply (cut_tac createObjects_ccorres_ntfn [where ptr=regionBase and n="1" and sz="objBitsKO (KONotification makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ - apply (clarsimp simp: split_def Let_def - Fun.comp_def rf_sr_def new_cap_addrs_def - region_actually_is_bytes ptr_retyps_gen_def - objBits_simps' - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp simp: split_def Let_def rf_sr_def new_cap_addrs_def + region_actually_is_bytes ptr_retyps_gen_def objBits_simps' + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv) apply (clarsimp simp: range_cover.aligned objBits_simps) apply (clarsimp simp: no_fail_def) @@ -5096,11 +5091,10 @@ lemma ccorres_placeNewObject_captable: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac createObjects_ccorres_cte [where ptr=regionBase and n="2 ^ unat userSize" and sz="unat userSize + objBitsKO (KOCTE makeObject)"]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ - apply (clarsimp simp: split_def objBitsKO_def - Fun.comp_def rf_sr_def split_def Let_def cteSizeBits_def - new_cap_addrs_def field_simps power_add ptr_retyps_gen_def - elim!: rsubst[where P="cstate_relation s'" for s']) + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def cteSizeBits_def)+ + apply (clarsimp simp: split_def objBitsKO_def rf_sr_def split_def Let_def cteSizeBits_def + new_cap_addrs_def field_simps power_add ptr_retyps_gen_def + elim!: rsubst[where P="cstate_relation s'" for s']) apply (clarsimp simp: word_bits_conv range_cover_def) apply (clarsimp simp: objBitsKO_def objBits_simps' range_cover.aligned) apply (clarsimp simp: no_fail_def) @@ -5308,11 +5302,11 @@ lemma placeNewObject_pte: apply (clarsimp simp: split_def new_cap_addrs_def) apply (cut_tac s=\ in createObjects_ccorres_pte [where ptr=regionBase and sz=pageBits]) apply (erule_tac x=\ in allE, erule_tac x=x in allE) - apply (clarsimp elim!:is_aligned_weaken simp: objBitsKO_def word_bits_def)+ + apply (clarsimp elim!: is_aligned_weaken simp: objBitsKO_def word_bits_def)+ apply (clarsimp simp: split_def objBitsKO_def archObjSize_def - Fun.comp_def rf_sr_def split_def Let_def ptr_retyps_gen_def - new_cap_addrs_def field_simps power_add - cong: globals.unfold_congs) + rf_sr_def split_def Let_def ptr_retyps_gen_def + new_cap_addrs_def field_simps power_add + cong: globals.unfold_congs) apply (simp add: Int_ac bit_simps) apply (clarsimp simp: word_bits_conv range_cover_def archObjSize_def bit_simps) apply (clarsimp simp: objBitsKO_def range_cover.aligned archObjSize_def bit_simps) @@ -6127,7 +6121,7 @@ lemma gsCNodes_update_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -6308,7 +6302,7 @@ proof - apply (simp add: obj_at'_real_def) apply (wp placeNewObject_ko_wp_at') apply (vcg exspec=Arch_initContext_modifies) - apply (clarsimp simp: dc_def) + apply clarsimp apply vcg apply (rule conseqPre, vcg, clarsimp) apply (clarsimp simp: createObject_hs_preconds_def @@ -6488,7 +6482,7 @@ lemma ccorres_guard_impR: lemma typ_clear_region_dom: "dom (clift (hrs_htd_update (typ_clear_region ptr bits) hp) :: 'b :: mem_type typ_heap) \ dom ((clift hp) :: 'b :: mem_type typ_heap)" - apply (clarsimp simp:lift_t_def lift_typ_heap_def Fun.comp_def) + apply (clarsimp simp:lift_t_def lift_typ_heap_def comp_def) apply (clarsimp simp:lift_state_def) apply (case_tac hp) apply (clarsimp simp:) @@ -8412,7 +8406,7 @@ shows "ccorres dc xfdc apply (rule_tac P="rv' = of_nat n" in ccorres_gen_asm2, simp) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_add_return) - apply (simp only: dc_def[symmetric] hrs_htd_update) + apply (simp only: hrs_htd_update) apply ((rule ccorres_Guard_Seq[where S=UNIV])+)? apply (rule ccorres_split_nothrow, rule_tac S="{ptr .. ptr + of_nat (length destSlots) * 2^ (getObjectSize newType userSize) - 1}" @@ -8573,9 +8567,9 @@ shows "ccorres dc xfdc including no_pre apply (wp insertNewCap_invs' insertNewCap_valid_pspace' insertNewCap_caps_overlap_reserved' insertNewCap_pspace_no_overlap' insertNewCap_caps_no_overlap'' insertNewCap_descendants_range_in' - insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at static_imp_wp) + insertNewCap_untypedRange hoare_vcg_all_lift insertNewCap_cte_at hoare_weak_lift_imp) apply (wp insertNewCap_cte_wp_at_other) - apply (wp hoare_vcg_all_lift static_imp_wp insertNewCap_cte_at) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp insertNewCap_cte_at) apply (clarsimp simp:conj_comms | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_distinct')+ @@ -8609,7 +8603,7 @@ shows "ccorres dc xfdc hoare_vcg_prop createObject_gsCNodes_p createObject_cnodes_have_size) apply (rule hoare_vcg_conj_lift[OF createObject_capRange_helper]) apply (wp createObject_cte_wp_at' createObject_ex_cte_cap_wp_to - createObject_no_inter[where sz = sz] hoare_vcg_all_lift static_imp_wp)+ + createObject_no_inter[where sz = sz] hoare_vcg_all_lift hoare_weak_lift_imp)+ apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace' field_simps range_cover.sz conj_comms range_cover.aligned range_cover_sz' is_aligned_shiftl_self aligned_add_aligned[OF range_cover.aligned]) @@ -8771,7 +8765,7 @@ shows "ccorres dc xfdc apply (simp add: o_def) apply (case_tac newType, simp_all add: object_type_from_H_def Kernel_C_defs - nAPIObjects_def APIType_capBits_def o_def split:apiobject_type.splits)[1] + nAPIObjects_def APIType_capBits_def split:apiobject_type.splits)[1] subgoal by (simp add:unat_eq_def word_unat.Rep_inverse' word_less_nat_alt) subgoal by (clarsimp simp:objBits_simps', unat_arith) apply (fold_subgoals (prefix))[3] diff --git a/proof/crefine/X64/SR_lemmas_C.thy b/proof/crefine/X64/SR_lemmas_C.thy index c440778aa1..8263af60a8 100644 --- a/proof/crefine/X64/SR_lemmas_C.thy +++ b/proof/crefine/X64/SR_lemmas_C.thy @@ -307,7 +307,7 @@ lemma tcb_cte_cases_proj_eq [simp]: (* NOTE: 5 = cte_level_bits *) lemma map_to_ctes_upd_cte': "\ ksPSpace s p = Some (KOCTE cte'); is_aligned p cte_level_bits; ps_clear p cte_level_bits s \ - \ map_to_ctes (ksPSpace s(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" + \ map_to_ctes ((ksPSpace s)(p |-> KOCTE cte)) = (map_to_ctes (ksPSpace s))(p |-> cte)" apply (erule (1) map_to_ctes_upd_cte) apply (simp add: field_simps ps_clear_def3 cte_level_bits_def mask_def) done @@ -315,7 +315,7 @@ lemma map_to_ctes_upd_cte': lemma map_to_ctes_upd_tcb': "[| ksPSpace s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; ps_clear p tcbBlockSizeBits s |] -==> map_to_ctes (ksPSpace s(p |-> KOTCB tcb)) = +==> map_to_ctes ((ksPSpace s)(p |-> KOTCB tcb)) = (%x. if EX getF setF. tcb_cte_cases (x - p) = Some (getF, setF) & getF tcb ~= getF tcb' @@ -442,7 +442,7 @@ lemma fst_setCTE: assumes ct: "cte_at' dest s" and rl: "\s'. \ ((), s') \ fst (setCTE dest cte s); (s' = s \ ksPSpace := ksPSpace s' \); - (ctes_of s' = ctes_of s(dest \ cte)); + (ctes_of s' = (ctes_of s)(dest \ cte)); (map_to_eps (ksPSpace s) = map_to_eps (ksPSpace s')); (map_to_ntfns (ksPSpace s) = map_to_ntfns (ksPSpace s')); (map_to_pml4es (ksPSpace s) = map_to_pml4es (ksPSpace s')); @@ -470,7 +470,7 @@ proof - by clarsimp note thms = this - have ceq: "ctes_of s' = ctes_of s(dest \ cte)" + have ceq: "ctes_of s' = (ctes_of s)(dest \ cte)" by (rule use_valid [OF thms(1) setCTE_ctes_of_wp]) simp show ?thesis @@ -672,7 +672,6 @@ proof (rule cor_map_relI [OF map_option_eq_dom_eq]) hence "tcb_no_ctes_proj tcb = tcb_no_ctes_proj tcb'" using om apply - - apply (simp add: o_def) apply (drule fun_cong [where x = x]) apply simp done @@ -1494,7 +1493,7 @@ lemma ntfnQueue_tail_mask_4 [simp]: lemma map_to_ctes_upd_tcb_no_ctes: "\ko_at' tcb thread s ; \x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x \ - \ map_to_ctes (ksPSpace s(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(thread \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" apply (erule obj_atE') apply (simp add: projectKOs objBits_simps) apply (subst map_to_ctes_upd_tcb') @@ -1508,16 +1507,16 @@ lemma map_to_ctes_upd_tcb_no_ctes: lemma update_ntfn_map_tos: fixes P :: "Structures_H.notification \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KONotification ko)) = map_to_eps (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KONotification ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KONotification ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KONotification ko)) = map_to_eps (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KONotification ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KONotification ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KONotification ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KONotification ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KONotification ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KONotification ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KONotification ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1525,16 +1524,16 @@ lemma update_ntfn_map_tos: lemma update_ep_map_tos: fixes P :: "endpoint \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOEndpoint ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOEndpoint ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1542,15 +1541,15 @@ lemma update_ep_map_tos: lemma update_tcb_map_tos: fixes P :: "tcb \ bool" assumes at: "obj_at' P p s" - shows "map_to_eps (ksPSpace s(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" - and "map_to_ntfns (ksPSpace s(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOTCB ko)) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOTCB ko)) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" + shows "map_to_eps ((ksPSpace s)(p \ KOTCB ko)) = map_to_eps (ksPSpace s)" + and "map_to_ntfns ((ksPSpace s)(p \ KOTCB ko)) = map_to_ntfns (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOTCB ko)) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOTCB ko)) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOTCB ko)) = map_to_ptes (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ KOTCB ko)) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOTCB ko)) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI simp: projectKOs projectKO_opts_defs split: kernel_object.splits if_split_asm)+ @@ -1558,16 +1557,16 @@ lemma update_tcb_map_tos: lemma update_asidpool_map_tos: fixes P :: "asidpool \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOASIDPool ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOASIDPool ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOASIDPool ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOASIDPool ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_eps (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_to_ctes_upd_other map_comp_eqI @@ -1576,28 +1575,28 @@ lemma update_asidpool_map_tos: arch_kernel_object.split_asm) lemma update_asidpool_map_to_asidpools: - "map_to_asidpools (ksPSpace s(p \ KOArch (KOASIDPool ap))) + "map_to_asidpools ((ksPSpace s)(p \ KOArch (KOASIDPool ap))) = (map_to_asidpools (ksPSpace s))(p \ ap)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_to_ptes: - "map_to_ptes (ksPSpace s(p \ KOArch (KOPTE pte))) + "map_to_ptes ((ksPSpace s)(p \ KOArch (KOPTE pte))) = (map_to_ptes (ksPSpace s))(p \ pte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pte_map_tos: fixes P :: "pte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPTE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPTE ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPTE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPTE ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_pdes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPTE pte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1605,23 +1604,23 @@ lemma update_pte_map_tos: auto simp: projectKO_opts_defs) lemma update_pde_map_to_pdes: - "map_to_pdes (ksPSpace s(p \ KOArch (KOPDE pde))) + "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDE pde))) = (map_to_pdes (ksPSpace s))(p \ pde)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pde_map_tos: fixes P :: "pde \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPDE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPDE ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPDE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPDE ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDE pde)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1629,23 +1628,23 @@ lemma update_pde_map_tos: auto simp: projectKO_opts_defs) lemma update_pdpte_map_to_pdptes: - "map_to_pdptes (ksPSpace s(p \ KOArch (KOPDPTE pdpte))) + "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPDPTE pdpte))) = (map_to_pdptes (ksPSpace s))(p \ pdpte)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pdpte_map_tos: fixes P :: "pdpte \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ctes (ksPSpace s)" - and "map_to_pml4es (ksPSpace s(p \ KOArch (KOPDPTE ko))) = map_to_pml4es (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOPDPTE ko))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ctes (ksPSpace s)" + and "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPDPTE ko))) = map_to_pml4es (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPDPTE ko))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPDPTE pdpte)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -1653,23 +1652,23 @@ lemma update_pdpte_map_tos: auto simp: projectKO_opts_defs) lemma update_pml4e_map_to_pml4es: - "map_to_pml4es (ksPSpace s(p \ KOArch (KOPML4E pml4e))) + "map_to_pml4es ((ksPSpace s)(p \ KOArch (KOPML4E pml4e))) = (map_to_pml4es (ksPSpace s))(p \ pml4e)" by (rule ext, clarsimp simp: projectKOs map_comp_def split: if_split) lemma update_pml4e_map_tos: fixes P :: "pml4e \ bool" assumes at: "obj_at' P p s" - shows "map_to_ntfns (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ntfns (ksPSpace s)" - and "map_to_tcbs (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_tcbs (ksPSpace s)" - and "map_to_ctes (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ctes (ksPSpace s)" - and "map_to_pdptes (ksPSpace s(p \ KOArch (KOPML4E ko))) = map_to_pdptes (ksPSpace s)" - and "map_to_pdes (ksPSpace s(p \ KOArch (KOPML4E ko))) = map_to_pdes (ksPSpace s)" - and "map_to_ptes (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_ptes (ksPSpace s)" - and "map_to_eps (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_eps (ksPSpace s)" - and "map_to_asidpools (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_asidpools (ksPSpace s)" - and "map_to_user_data (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data (ksPSpace s)" - and "map_to_user_data_device (ksPSpace s(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data_device (ksPSpace s)" + shows "map_to_ntfns ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ntfns (ksPSpace s)" + and "map_to_tcbs ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_tcbs (ksPSpace s)" + and "map_to_ctes ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ctes (ksPSpace s)" + and "map_to_pdptes ((ksPSpace s)(p \ KOArch (KOPML4E ko))) = map_to_pdptes (ksPSpace s)" + and "map_to_pdes ((ksPSpace s)(p \ KOArch (KOPML4E ko))) = map_to_pdes (ksPSpace s)" + and "map_to_ptes ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_ptes (ksPSpace s)" + and "map_to_eps ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_eps (ksPSpace s)" + and "map_to_asidpools ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_asidpools (ksPSpace s)" + and "map_to_user_data ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data (ksPSpace s)" + and "map_to_user_data_device ((ksPSpace s)(p \ (KOArch (KOPML4E pml4e)))) = map_to_user_data_device (ksPSpace s)" using at by (auto elim!: obj_atE' intro!: map_comp_eqI map_to_ctes_upd_other split: if_split_asm if_split @@ -2168,7 +2167,7 @@ lemma gs_set_assn_Delete_cstate_relation: lemma update_typ_at: assumes at: "obj_at' P p s" and tp: "\obj. P obj \ koTypeOf (injectKOS obj) = koTypeOf ko" - shows "typ_at' T p' (s \ksPSpace := ksPSpace s(p \ ko)\) = typ_at' T p' s" + shows "typ_at' T p' (s \ksPSpace := (ksPSpace s)(p \ ko)\) = typ_at' T p' s" using at by (auto elim!: obj_atE' simp: typ_at'_def ko_wp_at'_def dest!: tp[rule_format] diff --git a/proof/crefine/X64/Schedule_C.thy b/proof/crefine/X64/Schedule_C.thy index 022167afe8..ec4adcf619 100644 --- a/proof/crefine/X64/Schedule_C.thy +++ b/proof/crefine/X64/Schedule_C.thy @@ -182,14 +182,14 @@ lemmas ccorres_remove_tail_Guard_Skip = ccorres_abstract[where xf'="\_. ()", OF ceqv_remove_tail_Guard_Skip] lemma switchToThread_ccorres': - "ccorres (\_ _. True) xfdc + "ccorres dc xfdc (all_invs_but_ct_idle_or_in_cur_domain' and tcb_at' t) (UNIV \ \\thread = tcb_ptr_to_ctcb_ptr t\) hs (switchToThread t) (Call switchToThread_'proc)" apply (rule ccorres_guard_imp2) - apply (ctac (no_vcg) add: switchToThread_ccorres[simplified dc_def]) + apply (ctac (no_vcg) add: switchToThread_ccorres) apply auto done @@ -283,14 +283,14 @@ proof - apply (intro conjI impI) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) apply (fastforce dest: lookupBitmapPriority_obj_at' - simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) apply (clarsimp simp: not_less le_maxDomain_eq_less_numDomains) apply (prop_tac "ksCurDomain s = 0") using unsigned_eq_0_iff apply force apply (cut_tac s=s in lookupBitmapPriority_obj_at'; simp?) - apply (clarsimp simp: pred_conj_def comp_def obj_at'_def st_tcb_at'_def) + apply (clarsimp simp: pred_conj_def obj_at'_def st_tcb_at'_def) done qed @@ -371,7 +371,6 @@ lemma isHighestPrio_ccorres: (isHighestPrio d p) (Call isHighestPrio_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -412,7 +411,6 @@ lemma isHighestPrio_ccorres: lemma schedule_ccorres: "ccorres dc xfdc invs' UNIV [] schedule (Call schedule_'proc)" supply Collect_const [simp del] - supply dc_simp [simp del] supply prio_and_dom_limit_helpers[simp] supply Collect_const_mem [simp] (* FIXME: these should likely be in simpset for CRefine, or even in general *) @@ -426,7 +424,7 @@ lemma schedule_ccorres: apply (rule ccorres_cond_false_seq) apply simp apply (rule_tac P=\ and P'="{s. ksSchedulerAction_' (globals s) = NULL }" in ccorres_from_vcg) - apply (clarsimp simp: dc_def return_def split: prod.splits) + apply (clarsimp simp: return_def split: prod.splits) apply (rule conseqPre, vcg, clarsimp) (* toplevel case: action is choose new thread *) apply (rule ccorres_cond_true_seq) @@ -443,7 +441,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_true_seq) (* isolate haskell part before setting thread action *) apply (simp add: scheduleChooseNewThread_def) @@ -471,7 +469,7 @@ lemma schedule_ccorres: apply (ctac add: tcbSchedEnqueue_ccorres) apply (rule ccorres_from_vcg[where P=\ and P'=UNIV]) apply (clarsimp, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule ccorres_cond_false_seq) apply (rule_tac xf'=was_runnable_' in ccorres_abstract, ceqv) @@ -491,7 +489,7 @@ lemma schedule_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule ccorres_rhs_assoc2) apply (rule_tac r'="\rv rv'. rv = to_bool rv'" and xf'=fastfail_' in ccorres_split_nothrow) - apply (clarsimp simp: scheduleSwitchThreadFastfail_def dc_simp) + apply (clarsimp simp: scheduleSwitchThreadFastfail_def) apply (rule ccorres_cond_seq2[THEN iffD1]) apply (rule_tac xf'=ret__int_' and val="from_bool (curThread = it)" and R="\s. it = ksIdleThread s \ curThread = ksCurThread s" and R'=UNIV @@ -528,18 +526,17 @@ lemma schedule_ccorres: apply (rule ccorres_move_c_guard_tcb) apply (rule ccorres_add_return2) apply (ctac add: isHighestPrio_ccorres, clarsimp) - apply (clarsimp simp: to_bool_def) apply (rule ccorres_inst[where P=\ and P'=UNIV]) apply (rule ccorres_return) apply (rule conseqPre, vcg) - apply clarsimp + apply (clarsimp simp: to_bool_def) apply (rule wp_post_taut) apply (vcg exspec=isHighestPrio_modifies) apply (rule_tac P=\ and P'="{s. ret__int_' s = 0}" in ccorres_from_vcg) apply clarsimp apply (rule conseqPre, vcg) apply (fastforce simp: isHighestPrio_def' gets_def return_def get_def - NonDetMonad.bind_def + Nondet_Monad.bind_def split: prod.split) apply ceqv apply (clarsimp simp: to_bool_def) @@ -632,13 +629,12 @@ lemma schedule_ccorres: apply (clarsimp simp: invs'_bitmapQ_no_L1_orphans invs_ksCurDomain_maxDomain') apply (fastforce dest: invs_sch_act_wf') - apply (wp | clarsimp simp: dc_def)+ + apply wpsimp+ apply (vcg exspec=tcbSchedEnqueue_modifies) apply wp apply vcg - apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs' - dc_def)+ + apply (clarsimp simp: tcb_at_invs' rf_sr_ksCurThread if_apply_def2 invs_queues invs_valid_objs') apply (frule invs_sch_act_wf') apply (frule tcb_at_invs') apply (rule conjI) @@ -656,7 +652,7 @@ lemma schedule_ccorres: (* FIXME: move *) lemma map_to_tcbs_upd: - "map_to_tcbs (ksPSpace s(t \ KOTCB tcb')) = map_to_tcbs (ksPSpace s)(t \ tcb')" + "map_to_tcbs ((ksPSpace s)(t \ KOTCB tcb')) = (map_to_tcbs (ksPSpace s))(t \ tcb')" apply (rule ext) apply (clarsimp simp: map_comp_def projectKOs split: option.splits if_splits) done @@ -710,7 +706,7 @@ lemma timerTick_ccorres: apply (ctac add: get_tsType_ccorres2 [where f="\s. ksCurThread_' (globals s)"]) apply (rule ccorres_split_nothrow_novcg) apply wpc - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ (* thread_state.Running *) apply simp apply (rule ccorres_cond_true) @@ -732,17 +728,17 @@ lemma timerTick_ccorres: apply (rule_tac P="cur_tcb'" and P'=\ in ccorres_move_c_guards(8)) apply (clarsimp simp: cur_tcb'_def) apply (fastforce simp: rf_sr_def cstate_relation_def Let_def typ_heap_simps dest: tcb_at_h_t_valid) - apply (ctac add: threadSet_timeSlice_ccorres[unfolded dc_def]) + apply (ctac add: threadSet_timeSlice_ccorres) apply (rule ccorres_rhs_assoc)+ apply (ctac) apply simp apply (ctac (no_vcg) add: tcbSchedAppend_ccorres) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply (wp weak_sch_act_wf_lift_linear threadSet_valid_queues threadSet_pred_tcb_at_state tcbSchedAppend_valid_objs' threadSet_valid_objs' threadSet_tcbDomain_triv | clarsimp simp: st_tcb_at'_def o_def split: if_splits)+ apply (vcg exspec=tcbSchedDequeue_modifies) - apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip[unfolded dc_def])+ + apply (simp add: "StrictC'_thread_state_defs", rule ccorres_cond_false, rule ccorres_return_Skip)+ apply ceqv apply (clarsimp simp: decDomainTime_def numDomains_sge_1_simp) apply (rule ccorres_when[where R=\]) @@ -754,7 +750,6 @@ lemma timerTick_ccorres: apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def carch_state_relation_def cmachine_state_relation_def) apply ceqv - apply (fold dc_def) apply (rule ccorres_pre_getDomainTime) apply (rename_tac rva rv'a rvb) apply (rule_tac P'="{s. ksDomainTime_' (globals s) = rvb}" in ccorres_inst, simp) @@ -762,13 +757,13 @@ lemma timerTick_ccorres: apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_true) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) apply clarsimp apply assumption apply clarsimp apply (rule ccorres_guard_imp2) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply clarsimp apply wp apply (clarsimp simp: guard_is_UNIV_def) diff --git a/proof/crefine/X64/SyscallArgs_C.thy b/proof/crefine/X64/SyscallArgs_C.thy index 511394ad1d..4be2242bd4 100644 --- a/proof/crefine/X64/SyscallArgs_C.thy +++ b/proof/crefine/X64/SyscallArgs_C.thy @@ -47,7 +47,7 @@ lemma replyOnRestart_invs'[wp]: "\invs'\ replyOnRestart thread reply isCall \\rv. invs'\" including no_pre apply (simp add: replyOnRestart_def) - apply (wp setThreadState_nonqueued_state_update rfk_invs' static_imp_wp) + apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_weak_lift_imp) apply (rule hoare_vcg_all_lift) apply (wp setThreadState_nonqueued_state_update rfk_invs' hoare_vcg_all_lift rfk_ksQ) apply (rule hoare_strengthen_post, rule gts_sp') @@ -289,7 +289,7 @@ lemma ccorres_invocationCatch_Inr: if reply = [] then liftE (replyOnRestart thread [] isCall) \ returnOk () else liftE (replyOnRestart thread reply isCall) odE od) c" - apply (simp add: invocationCatch_def liftE_bindE o_xo_injector) + apply (simp add: invocationCatch_def liftE_bindE o_xo_injector cong: ccorres_all_cong) apply (subst ccorres_liftM_simp[symmetric]) apply (simp add: liftM_def bind_assoc bindE_def) apply (rule_tac f="\f. ccorres rvr xs P P' hs f c" for rvr xs in arg_cong) @@ -657,7 +657,7 @@ lemma getMRs_tcbContext: apply (wp|wpc)+ apply (rule_tac P="n < length x" in hoare_gen_asm) apply (clarsimp simp: nth_append) - apply (wp mapM_wp' static_imp_wp)+ + apply (wp mapM_wp' hoare_weak_lift_imp)+ apply simp apply (rule asUser_cur_obj_at') apply (simp add: getRegister_def msgRegisters_unfold) @@ -783,12 +783,12 @@ lemma lookupIPCBuffer_ccorres[corres]: apply (rule ccorres_move_array_assertion_tcb_ctes) apply (ctac (no_vcg)) apply csymbr - apply (rule_tac b="isArchObjectCap rva \ isPageCap (capCap rva)" in ccorres_case_bools') + apply (rule_tac b="isArchObjectCap rv \ isPageCap (capCap rv)" in ccorres_case_bools') apply simp apply (rule ccorres_cond_false_seq) apply (simp(no_asm)) apply csymbr - apply (rule_tac b="isDeviceCap rva" in ccorres_case_bools') + apply (rule_tac b="isDeviceCap rv" in ccorres_case_bools') apply (rule ccorres_cond_true_seq) apply (rule ccorres_from_vcg_split_throws[where P=\ and P'=UNIV]) apply vcg @@ -994,7 +994,7 @@ lemma getMRs_rel: getMRs thread buffer mi \\args. getMRs_rel args buffer\" apply (simp add: getMRs_rel_def) apply (rule hoare_pre) - apply (rule_tac x=mi in hoare_vcg_exI) + apply (rule_tac x=mi in hoare_exI) apply wp apply (rule_tac Q="\rv s. thread = ksCurThread s \ fst (getMRs thread buffer mi s) = {(rv,s)}" in hoare_strengthen_post) apply (wp det_result det_wp_getMRs) diff --git a/proof/crefine/X64/Syscall_C.thy b/proof/crefine/X64/Syscall_C.thy index c7cbb8e6e9..e2ddf59ee8 100644 --- a/proof/crefine/X64/Syscall_C.thy +++ b/proof/crefine/X64/Syscall_C.thy @@ -268,22 +268,22 @@ lemma decodeInvocation_ccorres: apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, rule decodeTCBInvocation_ccorres) apply assumption apply (simp+)[3] apply (rule ccorres_Cond_rhs) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeDomainInvocation_ccorres[unfolded o_def], + erule decodeDomainInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply (simp add: if_to_top_of_bind) apply (rule ccorres_trim_returnE, simp+) - apply (simp add: liftME_invocationCatch o_def) + apply (simp add: liftME_invocationCatch) apply (rule ccorres_call, - erule decodeCNodeInvocation_ccorres[unfolded o_def], + erule decodeCNodeInvocation_ccorres, simp+)[1] apply (rule ccorres_Cond_rhs) apply simp @@ -717,7 +717,7 @@ lemma handleFault_ccorres: apply (rule ccorres_return_Skip') apply clarsimp apply (rule ccorres_cond_univ) - apply (ctac (no_vcg) add: handleDoubleFault_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: handleDoubleFault_ccorres) apply (simp add: sendFaultIPC_def) apply wp apply ((wp hoare_vcg_all_lift_R hoare_drop_impE_R |wpc |simp add: throw_def)+)[1] @@ -891,8 +891,7 @@ lemma handleInvocation_ccorres: apply (rule_tac Q="\rv'. invs' and tcb_at' rv" and E="\ft. invs' and tcb_at' rv" in hoare_post_impErr) - apply (wp hoare_split_bind_case_sumE - alternative_wp hoare_drop_imps + apply (wp hoare_split_bind_case_sumE hoare_drop_imps setThreadState_nonqueued_state_update ct_in_state'_set setThreadState_st_tcb hoare_vcg_all_lift sts_ksQ' @@ -1057,7 +1056,7 @@ lemma handleReply_ccorres: apply (rule ccorres_cond_true) apply simp apply (rule ccorres_return_void_catchbrk) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply (vcg exspec=doReplyTransfer_modifies) apply (rule ccorres_fail)+ apply (wpc, simp_all) @@ -1075,7 +1074,6 @@ lemma handleReply_ccorres: apply (csymbr, csymbr, csymbr) apply simp apply (rule ccorres_assert2) - apply (fold dc_def) apply (rule ccorres_add_return2) apply (ctac (no_vcg)) apply (rule ccorres_return_void_catchbrk) @@ -1236,7 +1234,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1254,8 +1252,8 @@ lemma handleRecv_ccorres: apply (simp add: liftE_bind) apply (ctac) - apply (rule_tac P="\s. ksCurThread s = rv" in ccorres_cross_over_guard) - apply (ctac add: receiveIPC_ccorres[unfolded dc_def]) + apply (rule_tac P="\s. ksCurThread s = thread" in ccorres_cross_over_guard) + apply (ctac add: receiveIPC_ccorres) apply (wp deleteCallerCap_ksQ_ct' hoare_vcg_all_lift) apply (rule conseqPost[where Q'=UNIV and A'="{}"], vcg exspec=deleteCallerCap_modifies) @@ -1303,7 +1301,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_add_return2) apply (rule ccorres_split_nothrow_call[where xf'=xfdc and d'="\_. break_C" and Q="\_ _. True" and Q'="\_ _. UNIV"]) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply simp+ apply ceqv apply (rule ccorres_break_return) @@ -1320,7 +1318,7 @@ lemma handleRecv_ccorres: apply (clarsimp simp: rf_sr_upd_safe) apply (simp add: liftE_bind) - apply (ctac add: receiveSignal_ccorres[unfolded dc_def]) + apply (ctac add: receiveSignal_ccorres) apply clarsimp apply (vcg exspec=handleFault_modifies) apply (rule ccorres_cond_true_seq) @@ -1333,7 +1331,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) apply (rule ccorres_add_return2) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_break_return[where P=\ and P'=UNIV]) apply simp+ apply wp @@ -1354,7 +1352,7 @@ lemma handleRecv_ccorres: apply (rule ccorres_symb_exec_r) apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply vcg apply (rule conseqPre, vcg) apply (clarsimp simp: rf_sr_upd_safe) @@ -1367,9 +1365,9 @@ lemma handleRecv_ccorres: apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_cross_over_guard[where P=\]) apply (rule ccorres_symb_exec_r) - apply (ctac add: handleFault_ccorres[unfolded dc_def]) + apply (ctac add: handleFault_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C [unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=handleFault_modifies) @@ -1596,7 +1594,6 @@ lemma ccorres_handleReservedIRQ: (\p. ksCurThread s \ set (ksReadyQueues s p)))) (UNIV \ {s. irq_' s = ucast irq}) hs (handleReservedIRQ irq) (Call handleReservedIRQ_'proc)" - supply dc_simp[simp del] apply (cinit lift: irq_') apply (rule ccorres_return_Skip) apply clarsimp @@ -1619,11 +1616,11 @@ lemma handleInterrupt_ccorres: apply (subst doMachineOp_bind) apply (rule maskInterrupt_empty_fail) apply (rule ackInterrupt_empty_fail) - apply (ctac add: maskInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: maskInterrupt_ccorres) apply (subst bind_return_unit[where f="doMachineOp (ackInterrupt irq)"]) - apply (ctac add: ackInterrupt_ccorres[unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wp apply (vcg exspec=ackInterrupt_modifies) @@ -1663,7 +1660,7 @@ lemma handleInterrupt_ccorres: apply (ctac (no_vcg) add: sendSignal_ccorres) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp+ apply (simp del: Collect_const) apply (rule ccorres_cond_true_seq) @@ -1672,7 +1669,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (simp add: maskIrqSignal_def) apply (ctac (no_vcg) add: maskInterrupt_ccorres) - apply (ctac add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac add: ackInterrupt_ccorres) apply wp apply (rule_tac P=\ and P'="{s. ret__int_' s = 0 \ cap_get_tag cap \ scast cap_notification_cap}" in ccorres_inst) apply (clarsimp simp: isCap_simps simp del: Collect_const) @@ -1684,7 +1681,7 @@ lemma handleInterrupt_ccorres: rule ccorres_cond_false_seq, simp, rule ccorres_cond_false_seq, simp, ctac (no_vcg) add: maskInterrupt_ccorres, - ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def], + ctac (no_vcg) add: ackInterrupt_ccorres, wp, simp)+) apply (wp getSlotCap_wp) apply simp @@ -1693,7 +1690,6 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_move_const_guards)+ apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) - apply (fold dc_def)[1] apply (rule ccorres_rhs_assoc)+ apply (ctac (no_vcg) add: timerTick_ccorres) apply (ctac (no_vcg) add: resetTimer_ccorres) @@ -1705,7 +1701,7 @@ lemma handleInterrupt_ccorres: apply (rule ccorres_cond_false_seq) apply (rule ccorres_cond_true_seq) apply (ctac add: ccorres_handleReservedIRQ) - apply (ctac (no_vcg) add: ackInterrupt_ccorres [unfolded dc_def]) + apply (ctac (no_vcg) add: ackInterrupt_ccorres) apply wp apply (vcg exspec=handleReservedIRQ_modifies) apply (simp add: sint_ucast_eq_uint is_down uint_up_ucast is_up ) diff --git a/proof/crefine/X64/TcbAcc_C.thy b/proof/crefine/X64/TcbAcc_C.thy index 8dd61964ef..31ce2674f0 100644 --- a/proof/crefine/X64/TcbAcc_C.thy +++ b/proof/crefine/X64/TcbAcc_C.thy @@ -177,7 +177,7 @@ lemma threadSet_corres_lemma: assumes spec: "\s. \\ \s. P s\ Call f {t. Q s t}" and mod: "modifies_heap_spec f" and rl: "\\ x t ko. \(\, x) \ rf_sr; Q x t; x \ P'; ko_at' ko thread \\ - \ (\\ksPSpace := ksPSpace \(thread \ KOTCB (g ko))\, + \ (\\ksPSpace := (ksPSpace \)(thread \ KOTCB (g ko))\, t\globals := globals x\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" and g: "\s x. \tcb_at' thread s; x \ P'; (s, x) \ rf_sr\ \ P x" shows "ccorres dc xfdc (tcb_at' thread) P' [] (threadSet g thread) (Call f)" @@ -206,7 +206,7 @@ lemma threadSet_corres_lemma: lemma threadSet_ccorres_lemma4: - "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := ksPSpace s(thread \ injectKOS (F tcb))\, s') \ rf_sr}; + "\ \s tcb. \ \ (Q s tcb) c {s'. (s \ksPSpace := (ksPSpace s)(thread \ injectKOS (F tcb))\, s') \ rf_sr}; \s s' tcb tcb'. \ (s, s') \ rf_sr; P tcb; ko_at' tcb thread s; cslift s' (tcb_ptr_to_ctcb_ptr thread) = Some tcb'; ctcb_relation tcb tcb'; P' s ; s' \ R\ \ s' \ Q s tcb \ diff --git a/proof/crefine/X64/TcbQueue_C.thy b/proof/crefine/X64/TcbQueue_C.thy index d81d6b6baa..0e1843dd58 100644 --- a/proof/crefine/X64/TcbQueue_C.thy +++ b/proof/crefine/X64/TcbQueue_C.thy @@ -1090,8 +1090,8 @@ lemma cpspace_relation_ntfn_update_ntfn: and cp: "cpspace_ntfn_relation (ksPSpace s) (t_hrs_' (globals t))" and rel: "cnotification_relation (cslift t') ntfn' notification" and mpeq: "(cslift t' |` (- (tcb_ptr_to_ctcb_ptr ` qs))) = (cslift t |` (- (tcb_ptr_to_ctcb_ptr ` qs)))" - shows "cmap_relation (map_to_ntfns (ksPSpace s(ntfnptr \ KONotification ntfn'))) - (cslift t(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" + shows "cmap_relation (map_to_ntfns ((ksPSpace s)(ntfnptr \ KONotification ntfn'))) + ((cslift t)(Ptr ntfnptr \ notification)) Ptr (cnotification_relation (cslift t'))" using koat invs cp rel apply - apply (subst map_comp_update) @@ -1383,7 +1383,7 @@ lemma user_fpu_state_C_in_tcb_C_offset: "(typ_uinfo_t TYPE(user_fpu_state_C), n) \ td_set (typ_uinfo_t TYPE(tcb_C)) 0 \ n = 0" \ \Examine the fields of tcb_C.\ apply (simp add: typ_uinfo_t_def tcb_C_typ_info_unfold td_set_export_uinfo_eq td_set_adjust_ti_eq - image_comp image_Un apfst_comp o_def[where f=export_uinfo] + image_comp image_Un apfst_comp del: export_uinfo_typdesc_simp) apply (elim disjE) apply (all \drule td_set_image_field_lookup[rotated]; clarsimp\) @@ -1462,7 +1462,7 @@ lemma rf_sr_tcb_update_no_queue: (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes @@ -1512,7 +1512,7 @@ lemma rf_sr_tcb_update_not_in_queue: \ live' (KOTCB tcb); invs' s; (\x\ran tcb_cte_cases. (\(getF, setF). getF tcb' = getF tcb) x); ctcb_relation tcb' ctcb \ - \ (s\ksPSpace := ksPSpace s(thread \ KOTCB tcb')\, + \ (s\ksPSpace := (ksPSpace s)(thread \ KOTCB tcb')\, x\globals := globals s'\t_hrs_' := t_hrs_' (globals t)\\) \ rf_sr" unfolding rf_sr_def state_relation_def cstate_relation_def cpspace_relation_def apply (clarsimp simp: Let_def update_tcb_map_tos map_to_ctes_upd_tcb_no_ctes diff --git a/proof/crefine/X64/Tcb_C.thy b/proof/crefine/X64/Tcb_C.thy index 6baa4b5b45..2625d70415 100644 --- a/proof/crefine/X64/Tcb_C.thy +++ b/proof/crefine/X64/Tcb_C.thy @@ -97,8 +97,8 @@ lemma getMRs_rel_sched: lemma getObject_state: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbState_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -156,8 +156,8 @@ lemma getObject_state: lemma threadGet_state: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) t' s); ko_at' ko t s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_state [where st=st]) apply (rule exI) @@ -167,8 +167,8 @@ lemma threadGet_state: lemma asUser_state: "\(x,s) \ fst (asUser t' f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ \ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) \ - fst (asUser t' f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) \ + fst (asUser t' f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -265,8 +265,8 @@ lemma asUser_state: lemma doMachineOp_state: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -299,7 +299,7 @@ lemma getMRs_rel_state: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s \ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbState_update (\_. st) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbState_update (\_. st) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -412,8 +412,8 @@ lemma setPriority_ccorres: apply (rule ccorres_pre_getCurThread) apply (rule_tac R = "\s. rv = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: rescheduleRequired_ccorres[unfolded dc_def]) - apply (ctac add: possibleSwitchTo_ccorres[unfolded dc_def]) + apply (ctac add: rescheduleRequired_ccorres) + apply (ctac add: possibleSwitchTo_ccorres) apply (rule ccorres_return_Skip') apply (wp isRunnable_wp) apply (wpsimp wp: hoare_drop_imps threadSet_valid_queues threadSet_valid_objs' @@ -437,7 +437,7 @@ lemma setPriority_ccorres: apply (frule (1) valid_objs'_maxDomain[where t=t]) apply (frule (1) valid_objs'_maxPriority[where t=t]) apply simp -done + done lemma setMCPriority_ccorres: "ccorres dc xfdc @@ -518,7 +518,7 @@ lemma cteInsert_cap_to'2: apply (simp add: cteInsert_def ex_nonz_cap_to'_def setUntypedCapAsFull_def) apply (rule hoare_vcg_ex_lift) apply (wp updateMDB_weak_cte_wp_at - updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp) + updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of) apply auto done @@ -619,7 +619,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply csymbr apply (simp add: liftE_bindE[symmetric] bindE_assoc getThreadBufferSlot_def - locateSlot_conv o_def + locateSlot_conv del: Collect_const) apply (simp add: liftE_bindE del: Collect_const) apply (ctac(no_vcg) add: cteDelete_ccorres) @@ -665,7 +665,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -674,7 +674,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+)[1] apply (wp (once)) apply (clarsimp simp: guard_is_UNIV_def) - apply (wpsimp wp: when_def static_imp_wp) + apply (wpsimp wp: when_def hoare_weak_lift_imp) apply (strengthen sch_act_wf_weak, wp) apply clarsimp apply wp @@ -688,7 +688,7 @@ lemma invokeTCB_ThreadControl_ccorres: tcb_at' target s \ ksCurDomain s \ maxDomain \ valid_queues' s \ fst (the priority) \ maxPriority)"]) apply (strengthen sch_act_wf_weak) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (clarsimp split: if_splits) apply (wp empty_fail_stateAssert hoare_case_option_wp | simp del: Collect_const)+ apply csymbr @@ -703,7 +703,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac (no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac (no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -713,7 +713,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply (simp add: when_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbBuffer_def size_of_def cte_level_bits_def @@ -732,7 +732,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -741,7 +741,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule ccorres_return_CE, simp+) apply wp apply (clarsimp simp: guard_is_UNIV_def) - apply (wp hoare_vcg_if_lift2(1) static_imp_wp, strengthen sch_act_wf_weak; wp) + apply (wp hoare_vcg_if_lift2(1) hoare_weak_lift_imp, strengthen sch_act_wf_weak; wp) apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp add: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: ccap_relation_def cap_thread_cap_lift cap_to_H_def canonical_address_bitfield_extract_tcb) @@ -759,7 +759,7 @@ lemma invokeTCB_ThreadControl_ccorres: and Q'=UNIV in ccorres_rewrite_cond_sr) apply (clarsimp simp: Collect_const_mem rf_sr_ksCurThread) apply (rule ccorres_Cond_rhs; clarsimp) - apply (ctac(no_vcg) add: rescheduleRequired_ccorres[unfolded dc_def]) + apply (ctac(no_vcg) add: rescheduleRequired_ccorres) apply (rule ccorres_return_Skip') apply (rule ccorres_split_nothrow_novcg_dc) apply (rule ccorres_cond2[where R=\], simp add: Collect_const_mem) @@ -769,7 +769,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply wp apply (clarsimp simp: guard_is_UNIV_def) apply wpsimp - apply (wp static_imp_wp, strengthen sch_act_wf_weak, wp ) + apply (wp hoare_weak_lift_imp, strengthen sch_act_wf_weak, wp ) apply wp apply (clarsimp simp : guard_is_UNIV_def Collect_const_mem) apply (simp cong: conj_cong) @@ -806,7 +806,7 @@ lemma invokeTCB_ThreadControl_ccorres: simp add: o_def) apply (rule_tac P="is_aligned (fst (the buf)) msg_align_bits" in hoare_gen_asm) - apply (wp threadSet_ipcbuffer_trivial static_imp_wp + apply (wp threadSet_ipcbuffer_trivial hoare_weak_lift_imp | simp | strengthen invs_sch_act_wf' invs_valid_objs' invs_weak_sch_act_wf invs_queues invs_valid_queues' | wp hoare_drop_imps)+ @@ -847,11 +847,10 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -869,8 +868,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -878,14 +876,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem tcbVTable_def tcbVTableSlot_def Kernel_C.tcbVTable_def cte_level_bits_def size_of_def option_to_0_def objBits_defs mask_def) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb) apply simp @@ -910,12 +908,11 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (ctac(no_vcg) add: cteDelete_ccorres) apply (simp add: liftE_bindE Collect_False ccorres_cond_iffs - dc_def del: Collect_const) apply ((rule ccorres_split_nothrow_novcg_dc[rotated], assumption) | rule ccorres_rhs_assoc2)+ apply (simp add: conj_comms pred_conj_def) - apply (simp add: o_def cong: conj_cong option.case_cong) + apply (simp cong: conj_cong option.case_cong) apply (wp checked_insert_tcb_invs' hoare_case_option_wp checkCap_inv [where P="tcb_at' p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] @@ -936,8 +933,7 @@ lemma invokeTCB_ThreadControl_ccorres: apply (rule checkCapAt_ccorres2) apply ceqv apply csymbr - apply (simp add: Collect_True assertDerived_def bind_assoc - ccorres_cond_iffs dc_def[symmetric] + apply (simp add: Collect_True assertDerived_def bind_assoc ccorres_cond_iffs del: Collect_const) apply (rule ccorres_symb_exec_l) apply (ctac add: cteInsert_ccorres) @@ -945,14 +941,14 @@ lemma invokeTCB_ThreadControl_ccorres: apply csymbr apply (simp add: Collect_False ccorres_cond_iffs del: Collect_const) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem Kernel_C.tcbCTable_def tcbCTableSlot_def if_1_0_0 cte_level_bits_def size_of_def option_to_0_def mask_def objBits_defs) apply csymbr apply (simp add: Collect_False del: Collect_const) apply (rule ccorres_cond_false) - apply (rule ccorres_return_Skip[unfolded dc_def]) + apply (rule ccorres_return_Skip) apply (clarsimp simp: guard_is_UNIV_def ccap_relation_def cap_thread_cap_lift cap_to_H_def Collect_const_mem canonical_address_bitfield_extract_tcb) apply simp @@ -969,13 +965,13 @@ lemma invokeTCB_ThreadControl_ccorres: apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def tcbCTableSlot_def Kernel_C.tcbCTable_def cte_level_bits_def size_of_def word_sle_def option_to_0_def cintr_def objBits_defs mask_def) apply (simp add: conj_comms) apply (wp hoare_case_option_wp threadSet_invs_trivial - threadSet_cap_to' static_imp_wp | simp)+ + threadSet_cap_to' hoare_weak_lift_imp | simp)+ apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem) apply (clarsimp simp: inQ_def) apply (subst is_aligned_neg_mask_eq) @@ -1002,7 +998,7 @@ lemma setupReplyMaster_ccorres: apply (cinit lift: thread_') apply (rule ccorres_move_array_assertion_tcb_ctes ccorres_Guard_Seq)+ apply ctac - apply (simp del: Collect_const add: dc_def[symmetric]) + apply (simp del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) apply (rule_tac F="\rv'. (rv' = scast cap_null_cap) = (cteCap oldCTE = NullCap)" @@ -1148,10 +1144,10 @@ lemma postModifyRegisters_ccorres: apply (simp add: if_distrib[where f="asUser t" for t] asUser_return) apply (rule_tac R="\s. ct = ksCurThread s" in ccorres_cond2) apply (clarsimp simp: rf_sr_ksCurThread) - apply (ctac add: setRegister_ccorres[unfolded dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_add_return2) apply (rule ccorres_stateAssert) - apply (rule ccorres_return_Skip'[unfolded dc_def]) + apply (rule ccorres_return_Skip') by simp+ lemma invokeTCB_CopyRegisters_ccorres: @@ -1227,7 +1223,7 @@ lemma invokeTCB_CopyRegisters_ccorres: apply (rule ccorres_pre_getCurThread) apply (ctac add: postModifyRegisters_ccorres) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rvd = ksCurThread s" + apply (rule_tac R="\s. rvc = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1293,8 +1289,8 @@ lemma invokeTCB_WriteRegisters_ccorres_helper: lemma doMachineOp_context: "(rv,s') \ fst (doMachineOp f s) \ - (rv,s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (doMachineOp f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + (rv,s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (doMachineOp f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (clarsimp simp: doMachineOp_def split_def in_monad select_f_def) apply fastforce done @@ -1303,8 +1299,8 @@ lemma doMachineOp_context: lemma getObject_context: " \(x, s') \ fst (getObject t' s); ko_at' ko t s\ \ (if t = t' then tcbContext_update (\_. st) x else x, - s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\) - \ fst (getObject t' (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbContext_update (\_. st) ko))\))" + s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\) + \ fst (getObject t' (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbContext_update (\_. st) ko))\))" apply (simp split: if_split) apply (rule conjI) apply clarsimp @@ -1363,8 +1359,8 @@ lemma getObject_context: lemma threadGet_context: "\ (uc, s') \ fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) s); ko_at' ko t s; t \ ksCurThread s \ \ - (uc, s'\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (uc, s'\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (threadGet (atcbContextGet o tcbArch) (ksCurThread s) (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: threadGet_def liftM_def in_monad) apply (drule (1) getObject_context [where st=st]) apply (rule exI) @@ -1376,8 +1372,8 @@ done lemma asUser_context: "\(x,s) \ fst (asUser (ksCurThread s) f s); ko_at' ko t s; \s. \(=) s\ f \\_. (=) s\ ; t \ ksCurThread s\ \ - (x,s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ - fst (asUser (ksCurThread s) f (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" + (x,s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\) \ + fst (asUser (ksCurThread s) f (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\))" apply (clarsimp simp: asUser_def in_monad select_f_def) apply (frule use_valid, rule threadGet_inv [where P="(=) s"], rule refl) apply (frule use_valid, assumption, rule refl) @@ -1448,7 +1444,7 @@ lemma getMRs_rel_context: "\getMRs_rel args buffer s; (cur_tcb' and case_option \ valid_ipc_buffer_ptr' buffer) s; ko_at' ko t s ; t \ ksCurThread s\ \ - getMRs_rel args buffer (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" + getMRs_rel args buffer (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbArch_update (\_. atcbContextSet st (tcbArch ko)) ko))\)" apply (clarsimp simp: getMRs_rel_def) apply (rule exI, erule conjI) apply (subst (asm) det_wp_use, rule det_wp_getMRs) @@ -1508,7 +1504,7 @@ lemma asUser_getMRs_rel: apply (erule getMRs_rel_context, simp) apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def projectKOs) apply simp -done + done lemma asUser_sysargs_rel: @@ -1533,7 +1529,7 @@ lemma asUser_setRegister_ko_at': done lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: - notes static_imp_wp [wp] word_less_1[simp del] + notes hoare_weak_lift_imp [wp] word_less_1[simp del] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs' and tcb_at' dst and ex_nonz_cap_to' dst and sch_act_simple @@ -1640,14 +1636,14 @@ lemma invokeTCB_WriteRegisters_ccorres[where S=UNIV]: apply (rule ccorres_split_nothrow_novcg) apply (rule ccorres_when[where R=\]) apply (simp add: from_bool_0 Collect_const_mem) - apply (rule_tac xf'="\_. 0" in ccorres_call) - apply (rule restart_ccorres) + apply (rule_tac xf'=Corres_C.xfdc in ccorres_call) + apply (rule restart_ccorres) + apply simp apply simp - apply (simp add: xfdc_def) apply simp apply (rule ceqv_refl) apply (rule ccorres_split_nothrow_novcg_dc) - apply (rule_tac R="\s. rv = ksCurThread s" + apply (rule_tac R="\s. self = ksCurThread s" in ccorres_when) apply (clarsimp simp: rf_sr_ksCurThread) apply clarsimp @@ -1827,6 +1823,7 @@ shows apply (rule ccorres_rhs_assoc)+ apply csymbr apply (ctac add: lookupIPCBuffer_ccorres) + apply (rename_tac state destIPCBuffer ipcBuffer) apply (ctac add: setRegister_ccorres) apply (rule ccorres_stateAssert) apply (rule ccorres_rhs_assoc2) @@ -1886,15 +1883,15 @@ shows apply (rule bind_apply_cong[OF _ refl]) apply (rule_tac n1="min (unat n_frameRegisters - unat n_msgRegisters) (unat n)" in fun_cong [OF mapM_x_split_append]) - apply (rule_tac P="rva \ Some 0" in ccorres_gen_asm) - apply (subgoal_tac "(ipcBuffer = NULL) = (rva = None)") + apply (rule_tac P="destIPCBuffer \ Some 0" in ccorres_gen_asm) + apply (subgoal_tac "(ipcBuffer = NULL) = (destIPCBuffer = None)") prefer 2 apply (clarsimp simp: option_to_ptr_def option_to_0_def split: option.split_asm) apply (simp add: bind_assoc del: Collect_const) apply (rule_tac xf'=i_' and r'="\_ rv. unat rv = min (unat n_frameRegisters) (min (unat n) - (case rva of None \ unat n_msgRegisters + (case destIPCBuffer of None \ unat n_msgRegisters | _ \ unat n_frameRegisters))" in ccorres_split_nothrow_novcg) apply (rule ccorres_Cond_rhs) @@ -1902,7 +1899,7 @@ shows rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (X64_H.frameRegisters @ X64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="unat n_msgRegisters" in ccorres_mapM_x_while') @@ -2011,11 +2008,10 @@ shows apply (rename_tac i_c, rule_tac P="i_c = 0" in ccorres_gen_asm2) apply (simp add: drop_zip del: Collect_const) apply (rule ccorres_Cond_rhs) - apply (simp del: Collect_const) apply (rule_tac F="\m s. obj_at' (\tcb. map ((user_regs o atcbContextGet o tcbArch) tcb) (genericTake n (X64_H.frameRegisters @ X64_H.gpRegisters)) = reply) target s - \ valid_ipc_buffer_ptr' (the rva) s \ valid_pspace' s" + \ valid_ipc_buffer_ptr' (the destIPCBuffer) s \ valid_pspace' s" and i="0" in ccorres_mapM_x_while') apply (clarsimp simp: less_diff_conv drop_zip) apply (rule ccorres_guard_imp2) @@ -2088,11 +2084,11 @@ shows apply (simp add: min_less_iff_disj less_imp_diff_less) apply (simp add: drop_zip n_gpRegisters_def) apply (elim disjE impCE) - apply (clarsimp simp: mapM_x_Nil) + apply (clarsimp simp: mapM_x_Nil cong: ccorres_all_cong) apply (rule ccorres_return_Skip') - apply (simp add: linorder_not_less word_le_nat_alt - drop_zip mapM_x_Nil n_frameRegisters_def - min.absorb1 n_msgRegisters_def) + apply (simp add: linorder_not_less word_le_nat_alt drop_zip + mapM_x_Nil n_frameRegisters_def n_msgRegisters_def + cong: ccorres_all_cong) apply (rule ccorres_guard_imp2, rule ccorres_return_Skip') apply simp apply ceqv @@ -2124,15 +2120,15 @@ shows apply (clarsimp simp: min_def iffD2 [OF mask_eq_iff_w2p] word_size word_less_nat_alt split: if_split_asm dest!: word_unat.Rep_inverse') - apply simp - apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift static_imp_wp + apply (simp add: pred_conj_def) + apply (wp mapM_x_wp' sch_act_wf_lift valid_queues_lift hoare_weak_lift_imp tcb_in_cur_domain'_lift) apply (simp add: n_frameRegisters_def n_msgRegisters_def guard_is_UNIV_def) apply simp apply (rule mapM_x_wp') apply (rule hoare_pre) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply clarsimp apply (clarsimp simp: guard_is_UNIV_def Collect_const_mem @@ -2141,7 +2137,7 @@ shows msgMaxLength_def msgLengthBits_def word_less_nat_alt unat_of_nat) apply (wp (once) hoare_drop_imps) - apply (wp asUser_obj_at'[where t'=target] static_imp_wp + apply (wp asUser_obj_at'[where t'=target] hoare_weak_lift_imp asUser_valid_ipc_buffer_ptr') apply (vcg exspec=setRegister_modifies) apply simp @@ -2161,12 +2157,12 @@ shows apply (simp cong: rev_conj_cong) apply wp apply (wp asUser_inv mapM_wp' getRegister_inv - asUser_get_registers[simplified] static_imp_wp)+ + asUser_get_registers[simplified] hoare_weak_lift_imp)+ apply (rule hoare_strengthen_post, rule asUser_get_registers) apply (clarsimp simp: obj_at'_def genericTake_def frame_gp_registers_convs) apply arith - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply simp apply (rule ccorres_inst[where P=\ and P'=UNIV], simp) apply (simp add: performTransfer_def) @@ -2182,7 +2178,7 @@ shows apply (vcg exspec=suspend_modifies) apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def dc_def + apply (clarsimp simp: rf_sr_ksCurThread ct_in_state'_def split: if_split) done @@ -2247,7 +2243,8 @@ lemma decodeReadRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2258,13 +2255,13 @@ lemma decodeReadRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2359,7 +2356,8 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getCurThread) apply (rule ccorres_cond_seq) - apply (rule_tac R="\s. rv = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = rv" in ccorres_cond_both) + apply (rule_tac R="\s. self = ksCurThread s \ isThreadCap cp" and P="\s. capTCBPtr cp = self" + in ccorres_cond_both) apply clarsimp apply (frule rf_sr_ksCurThread) apply clarsimp @@ -2370,13 +2368,13 @@ lemma decodeWriteRegisters_ccorres: apply (drule_tac t="ksCurThread s" in sym) apply simp apply simp - apply (rule_tac P="capTCBPtr cp = rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp = self" in ccorres_gen_asm) apply simp apply (simp add: throwError_bind invocationCatch_def cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule syscall_error_throwError_ccorres_n) apply (simp add: syscall_error_to_H_cases) - apply (rule_tac P="capTCBPtr cp \ rv" in ccorres_gen_asm) + apply (rule_tac P="capTCBPtr cp \ self" in ccorres_gen_asm) apply (simp add: returnOk_bind) apply (simp add: ccorres_invocationCatch_Inr del: Collect_const) apply (ctac add: setThreadState_ccorres) @@ -2384,7 +2382,7 @@ lemma decodeWriteRegisters_ccorres: apply (simp add: performInvocation_def) apply (ctac(no_vcg) add: invokeTCB_WriteRegisters_ccorres [where args=args and someNum="unat (args ! 1)"]) - apply (simp add: dc_def[symmetric] o_def) + apply simp apply (rule ccorres_alternative2, rule ccorres_return_CE, simp+) apply (rule ccorres_return_C_errorE, simp+)[1] apply wp[1] @@ -2404,7 +2402,7 @@ lemma decodeWriteRegisters_ccorres: WriteRegisters_resume_def word_sle_def word_sless_def numeral_eqs) apply (frule arg_cong[where f="\x. unat (of_nat x :: machine_word)"], - simp(no_asm_use) only: word_unat.Rep_inverse o_def, + simp(no_asm_use) only: word_unat.Rep_inverse, simp) apply (rule conjI) apply clarsimp @@ -2619,7 +2617,7 @@ lemma slotCapLongRunningDelete_ccorres: apply (simp add: case_Null_If del: Collect_const) apply (rule ccorres_pre_getCTE) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_cross_over_guard) apply (rule ccorres_symb_exec_r) apply (rule ccorres_if_lhs) @@ -2640,7 +2638,7 @@ lemma slotCapLongRunningDelete_ccorres: apply vcg apply (simp del: Collect_const) apply (rule ccorres_move_c_guard_cte) - apply (rule_tac P="cte_wp_at' ((=) rv) slot" + apply (rule_tac P="cte_wp_at' ((=) cte) slot" in ccorres_from_vcg_throws[where P'=UNIV]) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of return_def) @@ -3202,7 +3200,6 @@ lemma decodeSetMCPriority_ccorres: >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetMCPriority_'proc)" supply Collect_const[simp del] - supply dc_simp[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetMCPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3270,8 +3267,7 @@ lemma decodeSetMCPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3336,7 +3332,7 @@ lemma decodeSetPriority_ccorres: (decodeSetPriority args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetPriority_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetPriority_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3404,8 +3400,7 @@ lemma decodeSetPriority_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3483,7 +3478,7 @@ lemma decodeSetSchedParams_ccorres: (decodeSetSchedParams args cp extraCaps >>= invocationCatch thread isBlocking isCall InvokeTCB) (Call decodeSetSchedParams_'proc)" - supply Collect_const[simp del] dc_simp[simp del] + supply Collect_const[simp del] apply (cinit' lift: cap_' length___unsigned_long_' current_extra_caps_' buffer_' simp: decodeSetSchedParams_def) apply (simp cong: StateSpace.state.fold_congs globals.fold_congs) apply (rule ccorres_rhs_assoc2) @@ -3570,8 +3565,7 @@ lemma decodeSetSchedParams_ccorres: apply csymbr apply csymbr apply (ctac (no_vcg) add: invokeTCB_ThreadControl_ccorres) - (* HACK: delete rules from the simpset to avoid the RVRs getting out of sync *) - apply (clarsimp simp del: intr_and_se_rel_simps comp_apply dc_simp) + apply clarsimp apply (rule ccorres_alternative2) apply (rule ccorres_return_CE; simp) apply (rule ccorres_return_C_errorE; simp) @@ -3803,7 +3797,7 @@ lemma bindNotification_ccorres: (Call bindNotification_'proc)" apply (cinit lift: tcb_' ntfnPtr_' simp: bindNotification_def) apply (rule ccorres_symb_exec_l [OF _ get_ntfn_inv' _ empty_fail_getNotification]) - apply (rule_tac P="invs' and ko_at' rv ntfnptr and tcb_at' tcb" and P'=UNIV + apply (rule_tac P="invs' and ko_at' ntfn ntfnptr and tcb_at' tcb" and P'=UNIV in ccorres_split_nothrow_novcg) apply (rule ccorres_from_vcg[where rrel=dc and xf=xfdc]) apply (rule allI, rule conseqPre, vcg) @@ -3823,7 +3817,7 @@ lemma bindNotification_ccorres: apply (rule cpspace_relation_ntfn_update_ntfn, assumption+) apply (clarsimp simp: cnotification_relation_def Let_def mask_def [where n=2] NtfnState_Waiting_def) - apply (case_tac "ntfnObj rv") + apply (case_tac "ntfnObj ntfn") apply ((clarsimp simp: option_to_ctcb_ptr_canonical[OF invs_pspace_canonical'])+)[3] apply (auto simp: option_to_ctcb_ptr_def objBits_simps' bindNTFN_alignment_junk)[1] @@ -3837,7 +3831,7 @@ lemma bindNotification_ccorres: apply ceqv apply (rule ccorres_move_c_guard_tcb) apply (simp add: setBoundNotification_def) - apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3[unfolded dc_def]) + apply (rule_tac P'=\ and P=\ in threadSet_ccorres_lemma3) apply vcg apply simp apply (erule (1) rf_sr_tcb_update_no_queue2, @@ -3903,7 +3897,7 @@ lemma decodeUnbindNotification_ccorres: apply (rule ccorres_Guard_Seq) apply (simp add: liftE_bindE bind_assoc) apply (rule ccorres_pre_getBoundNotification) - apply (rule_tac P="\s. rv \ Some 0" in ccorres_cross_over_guard) + apply (rule_tac P="\s. ntfn \ Some 0" in ccorres_cross_over_guard) apply (simp add: bindE_bind_linearise) apply wpc apply (simp add: bindE_bind_linearise[symmetric] @@ -4300,7 +4294,7 @@ lemma decodeSetSpace_ccorres: apply (simp add: Collect_False del: Collect_const) apply csymbr apply csymbr - apply (simp add: cnode_cap_case_if cap_get_tag_isCap dc_def[symmetric] + apply (simp add: cnode_cap_case_if cap_get_tag_isCap del: Collect_const) apply (rule ccorres_Cond_rhs_Seq) apply (simp add: injection_handler_throwError @@ -4435,7 +4429,7 @@ lemma decodeSetSpace_ccorres: done lemma invokeTCB_SetTLSBase_ccorres: - notes static_imp_wp [wp] + notes hoare_weak_lift_imp [wp] shows "ccorres (cintr \ (\rv rv'. rv = [])) (liftxf errstate id (K ()) ret__unsigned_long_') (invs') @@ -4446,7 +4440,7 @@ lemma invokeTCB_SetTLSBase_ccorres: apply (cinit lift: thread_' tls_base_') apply (simp add: liftE_def bind_assoc del: Collect_const) - apply (ctac add: setRegister_ccorres[simplified dc_def]) + apply (ctac add: setRegister_ccorres) apply (rule ccorres_pre_getCurThread) apply (rename_tac cur_thr) apply (rule ccorres_split_nothrow_novcg_dc) diff --git a/proof/crefine/X64/VSpace_C.thy b/proof/crefine/X64/VSpace_C.thy index a8d5c8c9ab..e8d8827bfb 100644 --- a/proof/crefine/X64/VSpace_C.thy +++ b/proof/crefine/X64/VSpace_C.thy @@ -288,8 +288,8 @@ lemma handleVMFault_ccorres: apply simp apply terminates_trivial apply (drule sym, clarsimp) - apply (wpc; simp add: vm_fault_type_from_H_def X86InstructionFault_def X86DataFault_def - bind_assoc) + apply (corres_cases; simp add: vm_fault_type_from_H_def X86InstructionFault_def X86DataFault_def + bind_assoc) apply (rule returnVMFault_corres; clarsimp simp: exception_defs mask_twice lift_rv_def)+ apply wpsimp+ @@ -610,7 +610,7 @@ lemma lookupPDPTSlot_ccorres: apply (rule corres_symb_exec_lookupPML4Slot'; rename_tac pml4e_ptr) apply (rule corres_symb_exec_unknown_r; rename_tac undefined) apply (rule corres_symb_exec_pml4e_ptr_get_present'; rename_tac present) - apply wpc + apply corres_cases apply (rule_tac F="present = 0" in corres_gen_asm2) apply (simp add: bind_assoc) apply (rule corres_symb_exec_lookup_fault_missing_capability_new'; rename_tac lookup_fault) @@ -654,7 +654,7 @@ lemma lookupPDPTSlot_ccorres': apply csymbr apply csymbr apply (rule ccorres_abstract_cleanup) - apply (rule_tac P="(ret__unsigned_longlong = 0) = (rv = X64_H.InvalidPML4E)" + apply (rule_tac P="(ret__unsigned_longlong = 0) = (pml4e = X64_H.InvalidPML4E)" in ccorres_gen_asm2) apply (wpc; ccorres_rewrite) apply (rule_tac P=\ and P' =UNIV in ccorres_from_vcg_throws) @@ -666,9 +666,9 @@ lemma lookupPDPTSlot_ccorres': apply (thin_tac "_ = PDPointerTablePML4E _ _ _ _ _ _") apply (simp add: bind_liftE_distrib liftE_bindE returnOk_liftE[symmetric]) apply (rule ccorres_stateAssert) - apply (rule_tac P="pd_pointer_table_at' (ptrFromPAddr (pml4eTable rv)) - and ko_at' rv (lookup_pml4_slot pm vptr) - and K (isPDPointerTablePML4E rv)" + apply (rule_tac P="pd_pointer_table_at' (ptrFromPAddr (pml4eTable pml4e)) + and ko_at' pml4e (lookup_pml4_slot pm vptr) + and K (isPDPointerTablePML4E pml4e)" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) @@ -892,7 +892,7 @@ lemma findVSpaceForASID_ccorres: apply clarsimp apply (rule_tac P="valid_arch_state' and _" and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: throwError_def return_def bindE_def NonDetMonad.lift_def + apply (clarsimp simp: throwError_def return_def bindE_def Nondet_Monad.lift_def EXCEPTION_NONE_def EXCEPTION_LOOKUP_FAULT_def lookup_fault_lift_invalid_root) apply (frule rf_sr_asidTable_None[where asid=asid, THEN iffD2], simp add: asid_wf_def3, assumption, assumption) @@ -1190,12 +1190,12 @@ lemma setVMRoot_ccorres: apply csymbr apply (simp add: cap_get_tag_isCap_ArchObject2) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_def catch_def dc_def[symmetric]) + apply (simp add: throwError_def catch_def) apply (rule ccorres_cond_true_seq, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) @@ -1206,13 +1206,13 @@ lemma setVMRoot_ccorres: apply (rule_tac P="to_bool (capPML4IsMapped_CL (cap_pml4_cap_lift vRootCap')) = (capPML4MappedASID (capCap vRootCap) \ None)" in ccorres_gen_asm2) - apply (clarsimp simp: to_bool_def dc_def[symmetric]) + apply (clarsimp simp: to_bool_def) apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: throwError_def catch_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: throwError_def catch_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) @@ -1232,11 +1232,11 @@ lemma setVMRoot_ccorres: in ccorres_gen_asm2) apply simp apply (rule ccorres_Cond_rhs_Seq) - apply (simp add: whenE_def throwError_def dc_def[symmetric], ccorres_rewrite) + apply (simp add: whenE_def throwError_def, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) @@ -1245,7 +1245,7 @@ lemma setVMRoot_ccorres: apply (csymbr, rename_tac base_addr) apply (rule ccorres_symb_exec_r) apply (ctac add: getCurrentUserCR3_ccorres, rename_tac currentCR3 currentCR3') - apply (rule ccorres_if_bind, rule ccorres_if_lhs; simp add: dc_def[symmetric]) + apply (rule ccorres_if_bind, rule ccorres_if_lhs; simp) apply (rule ccorres_cond_true) apply (ctac add: setCurrentUserCR3_ccorres) apply (rule ccorres_cond_false) @@ -1254,11 +1254,11 @@ lemma setVMRoot_ccorres: apply vcg apply vcg apply (rule conseqPre, vcg, clarsimp) - apply (rule ccorres_cond_true_seq, simp add: dc_def[symmetric], ccorres_rewrite) + apply (rule ccorres_cond_true_seq, simp, ccorres_rewrite) apply (rule ccorres_rhs_assoc) apply (rule ccorres_h_t_valid_x64KSSKIMPML4) apply csymbr - apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState[unfolded comp_def]) + apply (rule ccorres_pre_gets_x64KSSKIMPML4_ksArchState) apply (rule ccorres_add_return2) apply (ctac (no_vcg) add: setCurrentUserVSpaceRoot_ccorres) apply (rule ccorres_return_void_C) @@ -1329,12 +1329,12 @@ lemma setRegister_ccorres: (asUser thread (setRegister reg val)) (Call setRegister_'proc)" apply (cinit' lift: thread_' reg_' w_') - apply (simp add: asUser_def dc_def[symmetric] split_def split del: if_split) + apply (simp add: asUser_def split_def) apply (rule ccorres_pre_threadGet) apply (rule ccorres_Guard) apply (simp add: setRegister_def simpler_modify_def exec_select_f_singleton) - apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = rv" - in threadSet_ccorres_lemma2 [unfolded dc_def]) + apply (rule_tac P="\tcb. (atcbContextGet o tcbArch) tcb = uc" + in threadSet_ccorres_lemma2) apply vcg apply (clarsimp simp: setRegister_def HaskellLib_H.runState_def simpler_modify_def typ_heap_simps) @@ -1365,8 +1365,6 @@ lemma msgRegisters_ccorres: (* usually when we call setMR directly, we mean to only set a registers, which will fit in actual registers *) lemma setMR_as_setRegister_ccorres: - notes dc_simp[simp del] - shows "ccorres (\rv rv'. rv' = of_nat offset + 1) ret__unsigned_' (tcb_at' thread and K (TCB_H.msgRegisters ! offset = reg \ offset < length msgRegisters)) (UNIV \ \\reg___unsigned_long = val\ @@ -1383,7 +1381,7 @@ lemma setMR_as_setRegister_ccorres: apply (ctac add: setRegister_ccorres) apply (rule ccorres_from_vcg_throws[where P'=UNIV and P=\]) apply (rule allI, rule conseqPre, vcg) - apply (clarsimp simp: dc_def return_def) + apply (clarsimp simp: return_def) apply (rule hoare_post_taut[of \]) apply (vcg exspec=setRegister_modifies) apply (clarsimp simp: n_msgRegisters_def length_of_msgRegisters not_le conj_commute) @@ -1696,14 +1694,15 @@ lemma modeUnmapPage_ccorres: rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (simp only: bindE_assoc[symmetric]) apply (rule ccorres_splitE_novcg) - apply (clarsimp simp: inl_rrel_def) - apply (rule checkMappingPPtr_pdpte_ccorres[simplified inl_rrel_def]) - apply (rule conseqPre, vcg) - apply (clarsimp simp: typ_heap_simps') - apply (intro conjI impI) - apply (auto simp: pdpte_pdpte_1g_lift_def pdpte_lift_def cpdpte_relation_def - isHugePagePDPTE_def pdpteFrame_def - split: if_split_asm pdpte.split_asm pdpte.split)[5] + apply (rule ccorres_rel_imp2) + apply (rule checkMappingPPtr_pdpte_ccorres) + apply (rule conseqPre, vcg) + apply (clarsimp simp: typ_heap_simps') + apply (auto simp: pdpte_pdpte_1g_lift_def pdpte_lift_def cpdpte_relation_def + isHugePagePDPTE_def pdpteFrame_def + split: if_split_asm pdpte.split_asm pdpte.split)[1] + apply fastforce + apply (fastforce simp: inl_rrel_def split: sum.splits) apply ceqv apply csymbr apply (rule ccorres_add_returnOk) @@ -1750,11 +1749,11 @@ lemma unmapPage_ccorres: apply (rule ccorres_splitE_novcg[where r'=dc and xf'=xfdc]) \ \X64SmallPage\ apply (rule ccorres_Cond_rhs) - apply (simp add: framesize_to_H_def dc_def[symmetric]) + apply (simp add: framesize_to_H_def) apply (rule ccorres_rhs_assoc)+ apply (ctac add: lookupPTSlot_ccorres) apply (rename_tac pt_slot pt_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) apply (simp only: bindE_assoc[symmetric]) @@ -1768,11 +1767,9 @@ lemma unmapPage_ccorres: split: if_split_asm pte.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr - apply (simp add: dc_def[symmetric]) apply (rule storePTE_Basic_ccorres) apply (simp add: cpte_relation_def Let_def) apply wp @@ -1786,12 +1783,11 @@ lemma unmapPage_ccorres: apply (vcg exspec=lookupPTSlot_modifies) \ \X64LargePage\ apply (rule ccorres_Cond_rhs) - apply (simp add: framesize_to_H_def dc_def[symmetric] - del: Collect_const) + apply (simp add: framesize_to_H_def del: Collect_const) apply (rule ccorres_rhs_assoc)+ apply (ctac add: lookupPDSlot_ccorres) apply (rename_tac pd_slot pd_slot') - apply (simp add: dc_def[symmetric]) + apply simp apply csymbr apply (rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2, rule ccorres_rhs_assoc2) @@ -1806,11 +1802,9 @@ lemma unmapPage_ccorres: split: if_split_asm pde.split_asm) apply (rule ceqv_refl) apply (simp add: unfold_checkMapping_return liftE_liftM - Collect_const[symmetric] dc_def[symmetric] del: Collect_const) apply (rule ccorres_handlers_weaken2) apply csymbr - apply (simp add: dc_def[symmetric]) apply (rule storePDE_Basic_ccorres) apply (simp add: cpde_relation_def Let_def) apply wp @@ -1823,7 +1817,7 @@ lemma unmapPage_ccorres: apply simp apply (vcg exspec=lookupPDSlot_modifies) \ \X64HugePage\ - apply (simp add: framesize_to_H_def dc_def[symmetric]) + apply (simp add: framesize_to_H_def) apply (rule ccorres_add_return2) apply (ctac add: modeUnmapPage_ccorres) apply (rule ccorres_from_vcg_might_throw[where P="\" and P'=UNIV]) @@ -1838,13 +1832,13 @@ lemma unmapPage_ccorres: apply clarsimp apply ccorres_rewrite apply (clarsimp simp: liftE_liftM) - apply (ctac add: invalidateTranslationSingleASID_ccorres[simplified dc_def]) + apply (ctac add: invalidateTranslationSingleASID_ccorres) apply clarsimp apply clarsimp apply (clarsimp simp: guard_is_UNIV_def conj_comms tcb_cnode_index_defs) apply (simp add: throwError_def) apply (rule ccorres_split_throws) - apply (rule ccorres_return_void_C[unfolded dc_def]) + apply (rule ccorres_return_void_C) apply vcg apply wpsimp apply (simp add: Collect_const_mem) @@ -2064,7 +2058,7 @@ lemma performPageInvocationUnmap_ccorres: apply (rule ccorres_rhs_assoc) apply (drule_tac s=cap in sym, simp) (* schematic ugliness *) apply ccorres_rewrite - apply (ctac add: performPageInvocationUnmap_ccorres'[simplified K_def, simplified]) + apply (ctac add: performPageInvocationUnmap_ccorres') apply (rule_tac P=\ and P'=UNIV in ccorres_from_vcg_throws) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: return_def) @@ -2343,13 +2337,13 @@ lemma performASIDPoolInvocation_ccorres: apply (rule ccorres_rhs_assoc2) apply (rule_tac ccorres_split_nothrow [where r'=dc and xf'=xfdc]) apply (simp add: updateCap_def) - apply (rule_tac A="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPML4Cap' rv \ asid \ mask asid_bits \ asid \ ucast asidInvalid)" + apply (rule_tac A="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPML4Cap' oldcap \ asid \ mask asid_bits \ asid \ ucast asidInvalid)" and A'=UNIV in ccorres_guard_imp2) apply (rule ccorres_pre_getCTE) - apply (rule_tac P="cte_wp_at' ((=) rv o cteCap) ctSlot - and K (isPML4Cap' rv \ asid \ mask asid_bits \ asid \ ucast asidInvalid) - and cte_wp_at' ((=) rva) ctSlot" + apply (rule_tac P="cte_wp_at' ((=) oldcap o cteCap) ctSlot + and K (isPML4Cap' oldcap \ asid \ mask asid_bits \ asid \ ucast asidInvalid) + and cte_wp_at' ((=) rv) ctSlot" and P'=UNIV in ccorres_from_vcg) apply (rule allI, rule conseqPre, vcg) apply (clarsimp simp: cte_wp_at_ctes_of) @@ -2420,7 +2414,7 @@ lemma performASIDPoolInvocation_ccorres: apply (rule conseqPre, vcg) apply clarsimp apply (wpsimp wp: liftM_wp) - apply (wpsimp wp: getASID_wp simp: o_def inv_def) + apply (wpsimp wp: getASID_wp simp: inv_def) apply (clarsimp simp: empty_fail_getObject) apply (wpsimp wp: udpateCap_asidpool' hoare_vcg_all_lift hoare_vcg_imp_lift') apply vcg diff --git a/proof/crefine/autocorres-test/AutoCorresTest.thy b/proof/crefine/autocorres-test/AutoCorresTest.thy index e2ccc77cb8..01ee243362 100644 --- a/proof/crefine/autocorres-test/AutoCorresTest.thy +++ b/proof/crefine/autocorres-test/AutoCorresTest.thy @@ -77,7 +77,7 @@ lemma reorder_gets: (do g; x \ gets f; h x od)" - by (fastforce simp: bind_def' NonDetMonadVCG.valid_def gets_def get_def return_def) + by (fastforce simp: bind_def' Nondet_VCG.valid_def gets_def get_def return_def) thm (* no arguments, no precondition, dc return *) @@ -100,7 +100,7 @@ lemma (* handleYield_ccorres: *) (* Show that current thread is unmodified. * FIXME: proper way to do this? *) apply (subst reorder_gets[symmetric, unfolded K_bind_def]) - using tcbSchedDequeue'_modifies apply (fastforce simp: NonDetMonadVCG.valid_def) + using tcbSchedDequeue'_modifies apply (fastforce simp: Nondet_VCG.valid_def) apply (subst double_gets_drop_regets) apply (rule corres_pre_getCurThread_wrapper) apply (rule corres_split[OF tcbSchedDequeue_ccorres[ac]]) @@ -146,7 +146,7 @@ lemma corres_noop2_no_exs: apply (clarsimp simp: corres_underlying_def) apply (rule conjI) apply (drule x, drule y) - apply (clarsimp simp: NonDetMonadVCG.valid_def empty_fail_def Ball_def Bex_def) + apply (clarsimp simp: Nondet_VCG.valid_def empty_fail_def Ball_def Bex_def) apply fast apply (insert z) apply (clarsimp simp: no_fail_def) diff --git a/proof/crefine/lib/AutoCorresModifiesProofs.thy b/proof/crefine/lib/AutoCorresModifiesProofs.thy index 1ae3af10be..bbd0fb16f4 100644 --- a/proof/crefine/lib/AutoCorresModifiesProofs.thy +++ b/proof/crefine/lib/AutoCorresModifiesProofs.thy @@ -32,7 +32,7 @@ text \ (via L1_call_simpl), so the limitations of ac_corres do not apply. \ lemma autocorres_modifies_transfer: - notes select_wp[wp] hoare_seq_ext[wp] + notes hoare_seq_ext[wp] fixes \ globals f' f_'proc modifies_eqn P xf assumes f'_def: "f' \ AC_call_L1 P globals xf (L1_call_simpl check_termination \ f_'proc)" assumes f_modifies: "\\. \\\<^bsub>/UNIV\<^esub> {\} Call f_'proc {t. modifies_eqn (globals t) (globals \)}" @@ -413,7 +413,7 @@ fun modifies_call_tac (callee_modifies: incr_net) ctxt n = DETERM ( (* VCG for trivial state invariants, such as globals modifies specs. * Takes vcg rules from "valid_inv". *) -val valid_invN = Context.theory_name @{theory} ^ ".valid_inv" +val valid_invN = Context.theory_name { long=true } @{theory} ^ ".valid_inv" fun modifies_vcg_tac leaf_tac ctxt n = let val vcg_rules = Named_Theorems.get ctxt valid_invN |> Tactic.build_net; fun vcg n st = Seq.make (fn () => let diff --git a/proof/crefine/lib/AutoCorres_C.thy b/proof/crefine/lib/AutoCorres_C.thy index e32ea07f89..6b3f6b7abf 100644 --- a/proof/crefine/lib/AutoCorres_C.thy +++ b/proof/crefine/lib/AutoCorres_C.thy @@ -69,10 +69,10 @@ FIXME: Move this change into AutoCorres itself, or the underlying VCG library. lemmas [wp del] = NonDetMonadEx.validE_whenE - NonDetMonadVCG.whenE_wps + Nondet_VCG.whenE_wps lemmas hoare_whenE_wp2 [wp] = - NonDetMonadVCG.whenE_wps[simplified if_apply_def2] + Nondet_VCG.whenE_wps[simplified if_apply_def2] section \Rules for proving @{term ccorres_underlying} goals\ @@ -296,8 +296,8 @@ lemma ccorres_to_corres_with_termination: "\s s'. \ cstate_relation s (globals s'); P s; \ snd (dspec_f s); G s' \ \ \ \ Call f_'proc \ Normal s'" shows "corres_underlying {(s, s'). cstate_relation s s'} True True R P Q dspec_f ac_f" - using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def - apply (clarsimp simp: corres_underlying_def ccorres_underlying_def rf_sr_def) + using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def rf_sr_def + apply (clarsimp simp: corres_underlying_def ccorres_underlying_def) apply (rule conjI) apply (fastforce simp: unif_rrel_def intro: EHOther dest: in_AC_call_simpl) apply (clarsimp simp: AC_call_L1_def L2_call_L1_def L1_call_simpl_def) @@ -334,8 +334,8 @@ lemma ccorres_to_corres_no_termination: assumes pre: "\s'. G s' \ ccorres_to_corres_pre Q \ Q' s'" assumes ret: "\r s'. R r (ret_xf s') \ R' r (ret_xf' s')" shows "corres_underlying {(s, s'). cstate_relation s s'} True True R P Q dspec_f ac_f" - using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def - apply (clarsimp simp: ac_def corres_underlying_def ccorres_underlying_def rf_sr_def) + using ccorres ret pre unfolding ac_def ccorres_to_corres_pre_def rf_sr_def + apply (clarsimp simp: ac_def corres_underlying_def ccorres_underlying_def) apply (rule conjI) apply (fastforce simp: unif_rrel_def intro: EHOther dest: in_AC_call_simpl) apply (clarsimp simp: AC_call_L1_def L2_call_L1_def L1_call_simpl_def) @@ -694,7 +694,7 @@ lemma exec_no_fault: using valid ce asms apply - apply (frule hoare_sound) - apply (clarsimp simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + apply (clarsimp simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (drule spec, drule spec, drule (1) mp) apply auto done @@ -707,7 +707,7 @@ lemma exec_no_stuck: using valid ce asms apply - apply (frule hoare_sound) - apply (clarsimp simp: NonDetMonad.bind_def cvalid_def split_def HoarePartialDef.valid_def) + apply (clarsimp simp: Nondet_Monad.bind_def cvalid_def split_def HoarePartialDef.valid_def) apply (drule spec, drule spec, drule (1) mp) apply auto done @@ -805,17 +805,6 @@ section \Additional infrastructure\ context kernel begin -lemma wpc_helper_corres_final: - "corres_underlying sr nf nf' rv Q Q' f f' - \ wpc_helper (P, P') (Q, {s. Q' s}) (corres_underlying sr nf nf' rv P (\s. s \ P') f f')" - apply (clarsimp simp: wpc_helper_def) - apply (erule corres_guard_imp) - apply auto - done - -wpc_setup "\m. corres_underlying sr nf nf' rv P P' m f'" wpc_helper_corres_final -wpc_setup "\m. corres_underlying sr nf nf' rv P P' (m >>= f) f'" wpc_helper_corres_final - lemma condition_const: "condition (\_. P) L R = (if P then L else R)" by (simp add: condition_def split: if_splits) @@ -932,7 +921,7 @@ lemma terminates_spec_no_fail: using spec_result_Normal p_spec by simp have L1_call_simpl_no_fail: "no_fail (\s. P s s) (L1_call_simpl check_termination \ f_'proc)" - apply (wpsimp simp: L1_call_simpl_def wp: no_fail_select select_wp) + apply (wpsimp simp: L1_call_simpl_def wp: no_fail_select) using terminates normal by auto have select_f_L1_call_simpl_no_fail: "\s. no_fail (\_. P s s) (select_f (L1_call_simpl check_termination \ f_'proc s))" @@ -948,7 +937,7 @@ lemma terminates_spec_no_fail: apply (wpsimp wp: select_f_L1_call_simpl_no_fail no_fail_select wp_del: select_f_wp) apply (rule hoare_strengthen_post[OF select_f_L1_call_simpl_rv], fastforce) - apply (wpsimp wp: select_wp nf_pre)+ + apply (wpsimp wp: nf_pre)+ done qed diff --git a/proof/crefine/lib/Corres_C.thy b/proof/crefine/lib/Corres_C.thy index 49ae2eab3c..b3dc183795 100644 --- a/proof/crefine/lib/Corres_C.thy +++ b/proof/crefine/lib/Corres_C.thy @@ -190,11 +190,22 @@ lemma ccorres_split_nothrow_novcgE: apply (clarsimp simp: guard_is_UNIV_def split: sum.split) done -(* Unit would be more appropriate, but the record package will simplify xfdc to () *) +\ \Unit would be more appropriate, but the record package will rewrite xfdc to (). + This can happen even when protected by a cong rule, as seen in the following example. +definition + "xfdc \ \(t :: cstate). ()" +lemma + "\x b. \snd x = b\ + \ ccorres_underlying rf_sr \ + (\a b. dc a b) (\a. xfdc a) dc xfdc + \ UNIV [SKIP] a c" + supply ccorres_weak_cong[cong] + apply clarify + oops\ definition "xfdc (t :: cstate) \ (0 :: nat)" -lemma xfdc_equal [simp]: +lemma xfdc_equal[simp]: "xfdc t = xfdc s" unfolding xfdc_def by simp @@ -655,7 +666,7 @@ lemma cte_C_cap_C_update: fixes val :: "cap_C" and ptr :: "cte_C ptr" assumes cl: "clift hp ptr = Some z" shows "(clift (hrs_mem_update (heap_update (Ptr &(ptr\[''cap_C''])) val) hp)) = - clift hp(ptr \ cte_C.cap_C_update (\_. val) z)" + (clift hp)(ptr \ cte_C.cap_C_update (\_. val) z)" using cl by (simp add: clift_field_update) @@ -826,7 +837,7 @@ lemma ccorres_sequence_x_while_genQ': \n. Suc n < length xs \ \F (n * j)\ xs ! n \\_. F (Suc n * j)\; i + length xs * j < 2 ^ len_of TYPE('c); \s f. xf (xf_update f s) = f (xf s) \ globals (xf_update f s) = globals s; j > 0 \ \ ccorres (\rv i'. i' = of_nat (i + length xs * of_nat j)) xf (\s. P 0 \ F 0 s) ({s. xf s = of_nat i} \ Q) hs - (NonDetMonad.sequence_x xs) + (Nondet_Monad.sequence_x xs) (While {s. P (xf s)} (body;; Basic (\s. xf_update (\_. xf s + of_nat j) s)))" apply (simp add: sequence_x_sequence liftM_def[symmetric] @@ -834,7 +845,7 @@ lemma ccorres_sequence_x_while_genQ': apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_while_genQ [where xf'=xfdc and r'=dc and xf_update=xf_update, simplified], - (simp add: dc_def)+) + (simp add: dc_def cong: ccorres_all_cong)+) done lemma ccorres_sequence_x_while_gen': @@ -845,7 +856,7 @@ lemma ccorres_sequence_x_while_gen': \n. Suc n < length xs \ \F (n * j)\ xs ! n \\_. F (Suc n * j)\; i + length xs * j < 2 ^ len_of TYPE('c); \s f. xf (xf_update f s) = f (xf s) \ globals (xf_update f s) = globals s; 0 < j \ \ ccorres (\rv i'. i' = of_nat (i + length xs * of_nat j)) xf (F 0) {s. xf s = of_nat i} hs - (NonDetMonad.sequence_x xs) + (Nondet_Monad.sequence_x xs) (While {s. P (xf s)} (body;; Basic (\s. xf_update (\_. xf s + of_nat j) s)))" apply (simp add: sequence_x_sequence liftM_def[symmetric] @@ -853,17 +864,13 @@ lemma ccorres_sequence_x_while_gen': apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_while_gen' [where xf'=xfdc and r'=dc and xf_update=xf_update, simplified], - (simp add: dc_def)+) + (simp add: dc_def cong: ccorres_all_cong)+) done lemma i_xf_for_sequence: "\s f. i_' (i_'_update f s) = f (i_' s) \ globals (i_'_update f s) = globals s" by simp -lemmas ccorres_sequence_x_while' - = ccorres_sequence_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_sequence_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes one: "\n < length xs. ccorres dc xfdc (F (n * j) ) ({s. xf s = of_nat n * of_nat j} \ Q) hs (xs ! n) body" @@ -881,8 +888,9 @@ lemma ccorres_sequence_x_while_genQ: (While {s. P (xf s)} (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequence_x_while_genQ' [where i=0 and xf_update=xf_update and Q=Q, simplified]) - apply (simp add: assms hi[simplified])+ + apply (rule ccorres_rel_imp) + apply (rule ccorres_sequence_x_while_genQ' [where i=0 and xf_update=xf_update and Q=Q, simplified]) + apply (simp add: assms hi[simplified])+ apply (rule conseqPre, vcg) apply (clarsimp simp add: xf) apply (rule conseqPre, vcg) @@ -904,22 +912,15 @@ lemma ccorres_sequence_x_while_gen: (While {s. P (xf s)} (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequence_x_while_gen' [where i=0 and xf_update=xf_update, simplified]) - apply (simp add: assms hi[simplified])+ + apply (rule ccorres_rel_imp) + apply (rule ccorres_sequence_x_while_gen' [where i=0 and xf_update=xf_update, simplified]) + apply (simp add: assms hi[simplified])+ apply vcg apply (simp add: xf) apply vcg apply (simp add: xf rf_sr_def) done -lemmas ccorres_sequence_x_while - = ccorres_sequence_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - -lemmas ccorres_sequence_x_whileQ - = ccorres_sequence_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_gen: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ ccorres dc xfdc (F (n * j)) {s. xf s = of_nat n * of_nat j} hs (f (xs ! n)) body" @@ -937,13 +938,9 @@ lemma ccorres_mapM_x_while_gen: unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_gen[where xf_update=xf_update]) - apply (simp add: assms hi[simplified])+ + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_while - = ccorres_mapM_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ ccorres dc xfdc (F (n * j)) ({s. xf s = of_nat n * of_nat j} \ Q) hs (f (xs ! n)) body" @@ -963,13 +960,9 @@ lemma ccorres_mapM_x_while_genQ: unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_genQ[where xf_update=xf_update]) - apply (simp add: assms hi[simplified])+ + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_whileQ - = ccorres_mapM_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_mapM_x_while_gen': fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ @@ -988,14 +981,10 @@ lemma ccorres_mapM_x_while_gen': unfolding mapM_x_def apply (rule ccorres_rel_imp) apply (rule ccorres_sequence_x_while_gen'[where xf_update=xf_update]) - apply (clarsimp simp only: length_map nth_map rl) - apply (simp add: assms hi[simplified])+ + apply (clarsimp simp only: length_map nth_map rl) + apply (simp add: assms hi[simplified])+ done -lemmas ccorres_mapM_x_while' - = ccorres_mapM_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, - where j=1, simplified] - lemma ccorres_zipWithM_x_while_genQ: fixes xf :: "globals myvars \ ('c :: len) word" assumes rl: "\n. n < length xs \ n < length ys \ ccorres dc xfdc (F (n * j)) ({s. xf s = of_nat n * of_nat j} \ Q) @@ -1015,21 +1004,53 @@ lemma ccorres_zipWithM_x_while_genQ: (body ;; Basic (\s. xf_update (\_. xf s + of_nat j) s))))" unfolding zipWithM_x_def apply (rule ccorres_guard_imp) - apply (rule ccorres_rel_imp [OF ccorres_sequence_x_while_genQ[where F=F, OF _ _ _ _ _ xf j]], - simp_all add: length_zipWith) - apply (simp add: length_zipWith zipWith_nth) - apply (rule rl) - apply (rule guard) - apply (rule bodyi) - apply (simp add: zipWith_nth hi[simplified]) - apply (rule wb) + apply (rule ccorres_rel_imp [OF ccorres_sequence_x_while_genQ[where F=F, OF _ _ _ _ _ xf j]]; + simp) + apply (simp add: zipWith_nth) + apply (rule rl) + apply (rule guard) + apply (rule bodyi) + apply (simp add: zipWith_nth hi[simplified]) + apply (rule wb) + apply simp+ done +\ \Temporarily remove ccorres_weak_cong, so that the following lemmas can be constructed + with simplified return relations. + We do not use ccorres_all_cong due to it causing unexpected eta-expansion.\ +context +notes ccorres_weak_cong[cong del] +begin +lemmas ccorres_sequence_x_while' + = ccorres_sequence_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_sequence_x_while + = ccorres_sequence_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_sequence_x_whileQ + = ccorres_sequence_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_while + = ccorres_mapM_x_while_gen [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_whileQ + = ccorres_mapM_x_while_genQ [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + +lemmas ccorres_mapM_x_while' + = ccorres_mapM_x_while_gen' [OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, + where j=1, simplified] + lemmas ccorres_zipWithM_x_while_gen = ccorres_zipWithM_x_while_genQ[where Q=UNIV, simplified] lemmas ccorres_zipWithM_x_while = ccorres_zipWithM_x_while_gen[OF _ _ _ _ _ i_xf_for_sequence, folded word_bits_def, where j=1, simplified] +end end @@ -1114,7 +1135,7 @@ proof - apply (rule ccorres_cond_true) apply (rule ccorres_rhs_assoc)+ apply (rule ccorres_splitE) - apply (simp add: inl_rrel_inl_rrel) + apply simp apply (rule_tac ys="zs" in one'') apply simp apply (rule ceqv_refl) @@ -1141,7 +1162,8 @@ proof - qed thus ?thesis by (clarsimp simp: init_xs_def dest!: spec[where x=Nil] - elim!: ccorres_rel_imp2 inl_inrE) + elim!: ccorres_rel_imp2 inl_inrE + cong: ccorres_all_cong) qed lemma ccorres_sequenceE_while_down: @@ -1223,10 +1245,11 @@ lemma ccorres_sequenceE_while: Basic (\s. i_'_update (\_. i_' s + 1) s)))" apply (rule ccorres_guard_imp2) apply (rule ccorres_symb_exec_r) - apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], - (assumption | simp)+) - apply (simp add: word_bits_def) - apply simp+ + apply (rule ccorres_rel_imp2) + apply (rule ccorres_sequenceE_while_gen'[where i=0, simplified, where xf_update=i_'_update], + (assumption | simp)+) + apply (simp add: word_bits_def) + apply simp+ apply vcg apply (rule conseqPre, vcg) apply clarsimp diff --git a/proof/crefine/lib/Ctac.thy b/proof/crefine/lib/Ctac.thy index 4de8f947ed..f07db59f25 100644 --- a/proof/crefine/lib/Ctac.thy +++ b/proof/crefine/lib/Ctac.thy @@ -1755,7 +1755,7 @@ next apply (simp add: simpl_sequence_Cons sequenceE_Cons) apply (rule ccorres_guard_imp2) apply (rule ccorres_splitE) - apply (simp add: inl_rrel_inl_rrel) + apply simp apply (rule Cons.prems(1)[where zs=Nil, simplified]) apply (rule ceqv_refl) apply (simp add: liftME_def[symmetric] liftME_liftM) @@ -1809,7 +1809,7 @@ lemma mapME_x_simpl_sequence_fun_related: clarsimp elim!: inl_inrE) apply (erule_tac x="length zs" in meta_allE | erule_tac x="xs ! length zs" in meta_allE)+ - apply (simp add: dc_def) + apply (simp add: dc_def cong: ccorres_all_cong) done lemmas mapME_x_simpl_sequence_same @@ -1818,8 +1818,8 @@ lemmas mapME_x_simpl_sequence_same lemmas call_ignore_cong = refl[of "call i f g r" for i f g r] (* These could be done with ML patterns, but this fits in better with tactics *) -lemmas match_valid = trivial[of "NonDetMonadVCG.valid P a P'" for P a P'] -lemmas match_validE = trivial[of "NonDetMonadVCG.validE P a P' P''" for P a P' P''] +lemmas match_valid = trivial[of "Nondet_VCG.valid P a P'" for P a P'] +lemmas match_validE = trivial[of "Nondet_VCG.validE P a P' P''" for P a P' P''] lemmas match_hoare = trivial[of "HoarePartialDef.hoarep G T F P C P' A" for G T F P C P' A] lemmas match_all_hoare = trivial[of "\x. HoarePartialDef.hoarep G T F (P x) C (P' x) (A x)" for G T F P C P' A] lemmas match_xpres = trivial[of "xpres xf v \ c" for xf v \ c] @@ -1858,11 +1858,10 @@ method_setup ctac_print_xf = \CtacImpl.corres_print_xf\ "Print out what ctac thinks is the current xf" (* Set up wpc *) -lemma - wpc_helper_ccorres_final: - "ccorres_underlying sr G rv xf arrel axf Q Q' hs f f' - \ wpc_helper (P, P') (Q, Q') - (ccorres_underlying sr G rv xf arrel axf P P' hs f f')" +lemma wpc_helper_ccorres_final: + "ccorres_underlying sr G rv xf arrel axf Q Q'' hs f f' + \ wpc_helper (P, P', P'') (Q, Q', Q'') + (ccorres_underlying sr G rv xf arrel axf P P'' hs f f')" apply (clarsimp simp: wpc_helper_def) apply (erule ccorres_guard_imp) apply auto @@ -1870,14 +1869,15 @@ lemma wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs m conc" wpc_helper_ccorres_final wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs (m >>= a) conc" wpc_helper_ccorres_final +wpc_setup "\m. ccorres_underlying sr G rv xf arrel axf P P' hs (m >>=E a) conc" wpc_helper_ccorres_final context kernel begin (* Set up ctac proof sets. These are tried in reverse order (further down is tried first) *) -declare ccorres_Guard [corres_pre] -declare ccorres_Guard_Seq [corres_pre] +declare ccorres_Guard [ccorres_pre] +declare ccorres_Guard_Seq [ccorres_pre] lemma c_guard_field_abs: fixes p :: "'a :: mem_type ptr" @@ -2031,6 +2031,7 @@ fun tac ctxt = ORELSE (resolve_tac ctxt [@{thm xpresI}] THEN' simp_tac (ctxt |> Splitter.del_split @{thm "if_split"})) 1 )) THEN simp_tac (put_simpset HOL_basic_ss ctxt addsimps @{thms com.case}) 1 + THEN no_name_eta_tac ctxt \ end diff --git a/proof/crefine/lib/ctac-method.ML b/proof/crefine/lib/ctac-method.ML index 71651db945..eca4ad1ab4 100644 --- a/proof/crefine/lib/ctac-method.ML +++ b/proof/crefine/lib/ctac-method.ML @@ -123,7 +123,7 @@ fun ceqv_simpl_seq ctxt = Config.get ctxt (fst ceqv_simpl_sequence_pair) val setup = Attrib.setup @{binding "corres"} (Attrib.add_del ctac_add ctac_del) "correspondence rules" - #> Attrib.setup @{binding "corres_pre"} + #> Attrib.setup @{binding "ccorres_pre"} (Attrib.add_del ctac_pre_add ctac_pre_del) "correspondence preprocessing rules" #> Attrib.setup @{binding "corres_post"} @@ -1107,7 +1107,9 @@ fun shorten_names mp = mp -- Shorten_Names.shorten_names_preserve_new >> MethodExtras.then_all_new val corres_ctac_tactic = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_ctac (apply upds default_ctac_opts) ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_ctac (apply upds default_ctac_opts) ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)); val option_args = Args.parens (P.list (Scan.first ctac_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1133,7 +1135,9 @@ val corres_abstract_args = corres_pre_abstract_args corres_pre_lift_tac_clift; val corres_abstract_init_args = corres_pre_abstract_args corres_pre_lift_tac_cinit; val corres_symb_rhs = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_symb_rhs_tac (apply upds default_csymbr_opts) ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_symb_rhs_tac (apply upds default_csymbr_opts) ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)); val option_args = Args.parens (P.list (Scan.first csymbr_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1143,7 +1147,8 @@ in end; val corres_ceqv = let - fun tac upds ctxt = Method.SIMPLE_METHOD' (corres_solve_ceqv (#trace (apply upds default_ceqv_opts)) 0 ctxt); + fun tac upds ctxt + = Method.SIMPLE_METHOD' (corres_solve_ceqv (#trace (apply upds default_ceqv_opts)) 0 ctxt); val option_args = Args.parens (P.list (Scan.first ceqv_options)) val opt_option_args = Scan.lift (Scan.optional option_args []) @@ -1156,7 +1161,8 @@ end; * We should be able to get the xfs from the goal ... *) fun corres_boilerplate unfold_haskell_p = let fun tac (upds, xfs : string list) ctxt - = Method.SIMPLE_METHOD' (corres_boilerplate_tac (apply upds default_cinit_opts) unfold_haskell_p xfs ctxt) + = Method.SIMPLE_METHOD' (corres_boilerplate_tac (apply upds default_cinit_opts) unfold_haskell_p xfs ctxt + THEN_ALL_NEW (fn _ => no_name_eta_tac ctxt)) val var_lift_args = Args.$$$ liftoptN |-- Args.colon |-- Scan.repeat (Scan.unless (Scan.first boilerplate_modifiers) Args.name) diff --git a/proof/drefine/Arch_DR.thy b/proof/drefine/Arch_DR.thy index 85ddf58a12..7ac7351dd6 100644 --- a/proof/drefine/Arch_DR.thy +++ b/proof/drefine/Arch_DR.thy @@ -363,7 +363,7 @@ proof - apply (clarsimp simp add: corres_alternate2 split: ARM_A.pde.split) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pt_slot_ref_def all_pd_pt_slots_def opt_object_page_directory @@ -409,7 +409,7 @@ proof - apply (rename_tac word1 set word2) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: pd_aligned obj_at_def lookup_pd_slot_pd a_type_simps) @@ -458,7 +458,7 @@ proof - lookup_error_injection dc_def[symmetric]) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pde_def obj_at_def opt_object_page_directory @@ -477,7 +477,7 @@ proof - lookup_error_injection dc_def[symmetric]) apply (rule corres_alternate1) apply (rule corres_from_rdonly, simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def in_monad select_def, wp) apply (clarsimp simp: transform_pde_def obj_at_def opt_object_page_directory @@ -557,7 +557,6 @@ lemma select_ret_or_throw_twiceE: done crunch inv[wp]: select_ret_or_throw "P" - (wp: select_wp) lemma corres_initial_bindE_rdonly_select_ret_or_throw: assumes y: "\rv'. corres_underlying sr nf nf' (e \ r) P P' (select_ret_or_throw S X) (d rv')" @@ -659,7 +658,7 @@ proof (induct x) apply (rule ucast_up_inj[where 'b=32]) apply (simp add: ucast_ucast_mask is_aligned_mask asid_low_bits_def) apply simp - apply (wp select_wp | simp add:valid_cap_def split del: if_split)+ + apply (wp | simp add:valid_cap_def split del: if_split)+ done next case ASIDControlCap @@ -737,7 +736,7 @@ next apply (rule less_trans) apply simp apply simp - apply (wp lsfco_not_idle select_inv select_wp | simp)+ + apply (wp lsfco_not_idle select_inv | simp)+ apply (simp add: cte_wp_at_caps_of_state neq_Nil_conv invs_mdb_cte mdb_cte_at_rewrite) apply auto done @@ -948,7 +947,7 @@ next corres_alternate2) apply (rule corres_alternate1) apply (rule corres_from_rdonly,simp_all)[1] - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (simp add: returnOk_def, wp) apply (clarsimp simp: in_monad select_def arch_invocation_relation_def translate_arch_invocation_def transform_page_table_inv_def @@ -1105,7 +1104,7 @@ lemma set_cap_opt_cap': apply (rule hoare_seq_ext [OF _ dget_object_sp]) apply (case_tac obj; simp add: KHeap_D.set_object_def has_slots_def update_slots_def object_slots_def split del: if_split cong: if_cong bind_cong; - wpsimp wp: select_wp) + wpsimp) by (auto elim!:rsubst[where P=P] simp: opt_cap_def slots_of_def object_slots_def) lemma set_cap_opt_cap: @@ -1206,7 +1205,7 @@ lemma invoke_page_table_corres: apply clarsimp apply (wp store_pte_cte_wp_at) apply fastforce - apply (wp hoare_post_taut)+ + apply wpsimp+ apply (rule_tac Q="\rv s. invs s \ valid_etcbs s \ a \ idle_thread s \ cte_wp_at \ (a,b) s \ caps_of_state s' = caps_of_state s" in hoare_strengthen_post) apply wp @@ -1588,23 +1587,20 @@ lemma valid_etcbs_clear_um_detype: by (clarsimp simp: valid_etcbs_def st_tcb_at_def is_etcb_at_def st_tcb_at_kh_def obj_at_kh_def obj_at_def detype_def detype_ext_def clear_um_def) - lemma unat_map_upd: - "unat_map (Some \ transform_asid_table_entry \ arm_asid_table - as (asid_high_bits_of base \ frame)) = - unat_map (Some \ transform_asid_table_entry \ arm_asid_table as) - (unat (asid_high_bits_of base) \ AsidPoolCap frame 0)" + "unat_map (Some \ transform_asid_table_entry \ (asid_table as)(asid_high_bits_of base \ frame)) = + (unat_map (Some \ transform_asid_table_entry \ asid_table as)) + (unat (asid_high_bits_of base) \ AsidPoolCap frame 0)" apply (rule ext) - apply (clarsimp simp:unat_map_def asid_high_bits_of_def - transform_asid_table_entry_def) + apply (clarsimp simp:unat_map_def asid_high_bits_of_def transform_asid_table_entry_def) apply (intro impI conjI) apply (subgoal_tac "x<256") - apply (clarsimp simp:unat_map_def asid_high_bits_of_def asid_low_bits_def - transform_asid_table_entry_def transform_asid_def) + apply (clarsimp simp: unat_map_def asid_high_bits_of_def asid_low_bits_def + transform_asid_table_entry_def transform_asid_def) apply (drule_tac x="of_nat x" in unat_cong) apply (subst (asm) word_unat.Abs_inverse) apply (clarsimp simp:unats_def unat_ucast)+ -done + done declare descendants_of_empty[simp] diff --git a/proof/drefine/CNode_DR.thy b/proof/drefine/CNode_DR.thy index 0845255a68..f8bf527b4f 100644 --- a/proof/drefine/CNode_DR.thy +++ b/proof/drefine/CNode_DR.thy @@ -121,7 +121,7 @@ lemma dcorres_opt_parent_set_parent_helper: "dcorres dc \ P (gets (opt_parent (transform_cslot_ptr src)) >>= case_option (return ()) - (\parent. modify (\s. s\cdl_cdt := cdl_cdt s(transform_cslot_ptr child \ parent)\))) + (\parent. modify (\s. s\cdl_cdt := (cdl_cdt s)(transform_cslot_ptr child \ parent)\))) g \ dcorres dc \ (\s. cdt s child = None \ cte_at child s \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ P s) @@ -143,7 +143,7 @@ lemma dcorres_opt_parent_set_parent_helper: lemma dcorres_set_parent_helper: "dcorres dc \ P - (modify (\s. s\cdl_cdt := cdl_cdt s(transform_cslot_ptr child \ parent)\)) + (modify (\s. s\cdl_cdt := (cdl_cdt s)(transform_cslot_ptr child \ parent)\)) g \ dcorres dc \ (\s. cdt s child = None \ cte_at child s \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ P s) @@ -218,7 +218,7 @@ lemma insert_cap_sibling_corres: apply (rule_tac s=s' in transform_cdt_slot_inj_on_cte_at[where P=\]) apply (auto simp: swp_def dest: mdb_cte_atD elim!: ranE)[1] - apply ((wp set_cap_caps_of_state2 get_cap_wp static_imp_wp + apply ((wp set_cap_caps_of_state2 get_cap_wp hoare_weak_lift_imp | simp add: swp_def cte_wp_at_caps_of_state)+) apply (wp set_cap_idle | simp add:set_untyped_cap_as_full_def split del: if_split)+ @@ -231,7 +231,7 @@ lemma insert_cap_sibling_corres: cte_wp_at_caps_of_state has_parent_cte_at is_physical_def dest!:is_untyped_cap_eqD) apply fastforce - apply (wp get_cap_wp set_cap_idle static_imp_wp + apply (wp get_cap_wp set_cap_idle hoare_weak_lift_imp | simp add:set_untyped_cap_as_full_def split del: if_split)+ apply (rule_tac Q = "\r s. cdt s sibling = None @@ -303,7 +303,7 @@ lemma insert_cap_child_corres: apply (rule_tac s=s' in transform_cdt_slot_inj_on_cte_at[where P=\]) apply (auto simp: swp_def dest: mdb_cte_atD elim!: ranE)[1] - apply (wp set_cap_caps_of_state2 get_cap_wp static_imp_wp + apply (wp set_cap_caps_of_state2 get_cap_wp hoare_weak_lift_imp | simp add: swp_def cte_wp_at_caps_of_state)+ apply (wp set_cap_idle | simp add:set_untyped_cap_as_full_def split del:if_split)+ @@ -314,14 +314,14 @@ lemma insert_cap_child_corres: apply (wp set_cap_mdb_cte_at | simp add:not_idle_thread_def)+ apply (clarsimp simp:mdb_cte_at_def cte_wp_at_caps_of_state) apply fastforce - apply (wp get_cap_wp set_cap_idle static_imp_wp + apply (wp get_cap_wp set_cap_idle hoare_weak_lift_imp | simp split del:if_split add:set_untyped_cap_as_full_def)+ apply (rule_tac Q = "\r s. not_idle_thread (fst child) s \ (\cap. caps_of_state s src = Some cap) \ should_be_parent_of src_capa (is_original_cap s src) cap (cap_insert_dest_original cap src_capa) \ mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" in hoare_strengthen_post) - apply (wp set_cap_mdb_cte_at static_imp_wp | simp add:not_idle_thread_def)+ + apply (wp set_cap_mdb_cte_at hoare_weak_lift_imp | simp add:not_idle_thread_def)+ apply (clarsimp simp:mdb_cte_at_def cte_wp_at_caps_of_state) apply fastforce apply clarsimp @@ -756,7 +756,7 @@ lemma cap_revoke_corres_helper: apply (erule cte_wp_at_weakenE, simp) apply (simp,blast) apply simp+ - apply (wp select_wp,(clarsimp simp: select_ext_def in_monad)+) + apply (wp, (clarsimp simp: select_ext_def in_monad)+) apply (rule dcorres_expand_pfx) apply (rule_tac r'="\cap cap'. cap = transform_cap cap'" and Q ="\r. \" and Q'="\r s. cte_wp_at (\x. x = r) (aa,ba) s \ s = sfix" in corres_split_forwards') @@ -792,7 +792,7 @@ lemma cap_revoke_corres_helper: in corres_split_forwards') apply (rule corres_guard_imp[OF corres_trivial[OF preemption_corres]]) apply simp+ - apply (wp alternative_wp) + apply wp apply (simp add:valid_def throwError_def return_def) apply (simp add:valid_def returnOk_def return_def) apply fastforce @@ -878,21 +878,21 @@ lemma corres_mapM_to_mapM_x: by (simp add: mapM_x_mapM liftM_def[symmetric]) lemma ep_waiting_set_recv_upd_kh: - "ep_at epptr s \ (ep_waiting_set_recv epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ep_waiting_set_recv epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ep_waiting_set_recv epptr s)" apply (rule set_eqI) apply (clarsimp simp:ep_waiting_set_recv_def obj_at_def is_ep_def) done lemma ep_waiting_set_send_upd_kh: - "ep_at epptr s \ (ep_waiting_set_send epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ep_waiting_set_send epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ep_waiting_set_send epptr s)" apply (rule set_eqI) apply (clarsimp simp:ep_waiting_set_send_def obj_at_def is_ep_def) done lemma ntfn_waiting_set_upd_kh: - "ep_at epptr s \ (ntfn_waiting_set epptr (update_kheap (kheap s(epptr \ kernel_object.Endpoint X)) s)) + "ep_at epptr s \ (ntfn_waiting_set epptr (update_kheap ((kheap s)(epptr \ kernel_object.Endpoint X)) s)) = (ntfn_waiting_set epptr s)" apply (rule set_eqI) apply (clarsimp simp:ntfn_waiting_set_def obj_at_def is_ep_def) diff --git a/proof/drefine/Corres_D.thy b/proof/drefine/Corres_D.thy index 17afc63fad..db947dae27 100644 --- a/proof/drefine/Corres_D.thy +++ b/proof/drefine/Corres_D.thy @@ -84,7 +84,7 @@ lemma corres_free_return: lemma corres_free_set_object: "\ \ s s'. s = transform s' \ P s \ P' s' \ - s = transform ((\s. s \kheap := kheap s (ptr \ obj)\) s')\ \ + s = transform ((\s. s \kheap := (kheap s)(ptr \ obj)\) s')\ \ dcorres dc P P' (return a) (set_object ptr obj )" by (clarsimp simp: corres_underlying_def put_def return_def modify_def bind_def get_def set_object_def get_object_def in_monad) @@ -244,7 +244,7 @@ lemma dcorres_gets_the: lemma wpc_helper_dcorres: "dcorres r Q Q' f f' - \ wpc_helper (P, P') (Q, {s. Q' s}) (dcorres r P (\s. s \ P') f f')" + \ wpc_helper (P, P', P'') (Q, Q', Q'') (dcorres r P P' f f')" apply (clarsimp simp: wpc_helper_def) apply (erule corres_guard_imp) apply simp diff --git a/proof/drefine/Finalise_DR.thy b/proof/drefine/Finalise_DR.thy index 415e0c9180..4ce1997a8b 100644 --- a/proof/drefine/Finalise_DR.thy +++ b/proof/drefine/Finalise_DR.thy @@ -542,7 +542,7 @@ lemma flush_space_dwp[wp]: apply (clarsimp split:option.splits) apply (rule do_machine_op_wp) apply clarsimp - apply (wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp)+ apply (rule do_machine_op_wp) apply clarsimp apply wp @@ -650,7 +650,7 @@ lemma opt_object_asid_pool: lemma transform_asid_pool_contents_upd: "transform_asid_pool_contents (pool(ucast asid := pd)) = - transform_asid_pool_contents pool(snd (transform_asid asid) \ transform_asid_pool_entry pd)" + (transform_asid_pool_contents pool)(snd (transform_asid asid) \ transform_asid_pool_entry pd)" apply (clarsimp simp:transform_asid_pool_contents_def transform_asid_def) apply (rule ext) apply (case_tac x) @@ -1148,7 +1148,7 @@ lemma dcorres_delete_cap_simple_set_pt: lemma transform_page_table_contents_upd: - "transform_page_table_contents fun(unat (y && mask pt_bits >> 2) \ transform_pte pte) = + "(transform_page_table_contents fun)(unat (y && mask pt_bits >> 2) \ transform_pte pte) = transform_page_table_contents (fun(ucast ((y::word32) && mask pt_bits >> 2) := pte))" apply (rule ext) apply (clarsimp simp: transform_page_table_contents_def unat_map_def) @@ -1167,7 +1167,7 @@ lemma transform_page_table_contents_upd: lemma transform_page_directory_contents_upd: "ucast ((ptr::word32) && mask pd_bits >> 2) \ kernel_mapping_slots - \ transform_page_directory_contents f(unat (ptr && mask pd_bits >> 2) \ transform_pde a_pde) + \ (transform_page_directory_contents f)(unat (ptr && mask pd_bits >> 2) \ transform_pde a_pde) = transform_page_directory_contents (f(ucast (ptr && mask pd_bits >> 2) := a_pde))" apply (rule ext) apply (simp (no_asm) add: transform_page_directory_contents_def unat_map_def) @@ -3602,7 +3602,7 @@ next | simp add: not_idle_thread_def del: gets_to_return)+ apply (simp add: conj_comms) apply (wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at - hoare_vcg_const_Ball_lift set_cap_cte_cap_wp_to static_imp_wp + hoare_vcg_const_Ball_lift set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master[simplified in_monad, simplified] | simp only: not_idle_thread_def pred_conj_def simp_thms)+ apply (rule hoare_strengthen_post) diff --git a/proof/drefine/Ipc_DR.thy b/proof/drefine/Ipc_DR.thy index e9fed4fa54..7e753044d2 100644 --- a/proof/drefine/Ipc_DR.thy +++ b/proof/drefine/Ipc_DR.thy @@ -794,7 +794,7 @@ lemma not_idle_after_reply_cancel_ipc: apply (simp add:cap_delete_one_def unless_def) apply wp+ apply (simp add:IpcCancel_A.empty_slot_def) - apply (wp set_cap_idle select_wp | simp add: if_apply_def2 imp_conjR + apply (wp set_cap_idle | simp add: if_apply_def2 imp_conjR | strengthen imp_consequent[where P="invs s" for s] imp_consequent[where P="valid_idle s" for s])+ apply (strengthen invs_valid_idle) apply (wp thread_set_invs_trivial | simp add: ran_tcb_cap_cases)+ @@ -1233,7 +1233,7 @@ lemma cap_insert_cte_wp_at_masked_as_full: shows "\\s. if slot = dest then P cap else cte_wp_at P slot s\ cap_insert cap src dest \\uu. cte_wp_at P slot\" apply (simp add:cap_insert_def set_untyped_cap_as_full_def) - apply (wp set_cap_cte_wp_at hoare_vcg_if_lift get_cap_wp static_imp_wp dxo_wp_weak + apply (wp set_cap_cte_wp_at hoare_vcg_if_lift get_cap_wp hoare_weak_lift_imp dxo_wp_weak | simp split del:if_split)+ apply (intro conjI impI allI | clarsimp simp:cte_wp_at_caps_of_state)+ diff --git a/proof/drefine/KHeap_DR.thy b/proof/drefine/KHeap_DR.thy index 408ea41880..dbf6089e4c 100644 --- a/proof/drefine/KHeap_DR.thy +++ b/proof/drefine/KHeap_DR.thy @@ -82,11 +82,10 @@ termination CSpace_D.resolve_address_bits end -crunch cdl_cdt [wp]: "KHeap_D.set_cap" "\s. P (cdl_cdt s)" - (wp: crunch_wps select_wp simp: crunch_simps) - -crunch cdl_cdt [wp]: "PageTableUnmap_D.cancel_all_ipc", "PageTableUnmap_D.unbind_maybe_notification" "\s. P (cdl_cdt s)" - (wp: crunch_wps select_wp simp: crunch_simps) +crunches + "KHeap_D.set_cap", "PageTableUnmap_D.cancel_all_ipc", "PageTableUnmap_D.unbind_maybe_notification" + for cdl_cdt [wp]: "\s. P (cdl_cdt s)" + (wp: crunch_wps simp: crunch_simps) lemma descendants_cdl_cdt_lift: "(\P. \\s. P (cdl_cdt s)\ f \\_ s. P (cdl_cdt s)\) \ @@ -602,7 +601,7 @@ lemma xf_cnode_contents: lemma transform_cnode_contents_upd: "\well_formed_cnode_n sz cn; cn sl' = Some ocap'\ \ - transform_cnode_contents sz cn(nat (bl_to_bin sl') \ transform_cap cap') = + (transform_cnode_contents sz cn)(nat (bl_to_bin sl') \ transform_cap cap') = transform_cnode_contents sz (cn(sl' \ cap'))" apply (rule ext) apply clarsimp @@ -621,7 +620,7 @@ lemma transform_cnode_contents_upd: lemma caps_of_state_cnode_upd: "\ kheap s p' = Some (CNode sz cn); well_formed_cnode_n sz cn; cn sl' = Some ocap' \ \ - caps_of_state (update_kheap (kheap s(p' \ CNode sz (cn(sl' \ cap')))) s) = + caps_of_state (update_kheap ((kheap s)(p' \ CNode sz (cn(sl' \ cap')))) s) = (caps_of_state s) ((p',sl') \ cap')" apply (rule ext) apply (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases wf_cs_upd) @@ -2715,7 +2714,7 @@ lemma set_parent_corres: get_def set_cdt_def return_def bind_def) apply (simp add:transform_current_thread_def weak_valid_mdb_def) apply (rename_tac s') - apply (subgoal_tac "transform s'\cdl_cdt:=cdl_cdt(transform s') + apply (subgoal_tac "transform s'\cdl_cdt:=(cdl_cdt(transform s')) (transform_cslot_ptr slot' \ transform_cslot_ptr pslot')\ = cdl_cdt_single_update (transform s') (transform_cslot_ptr slot') (transform_cslot_ptr pslot')") apply (clarsimp simp:cdl_cdt_transform) @@ -2820,7 +2819,7 @@ done lemma transform_objects_update_kheap_simp: "\kheap s ptr = Some ko; ekheap s ptr = opt_etcb\ - \ transform_objects (update_kheap (kheap s(ptr \ obj)) s) = + \ transform_objects (update_kheap ((kheap s)(ptr \ obj)) s) = (\x. if x \ ptr then transform_objects s x else (if ptr = idle_thread s then None else Some (transform_object (machine_state s) ptr opt_etcb obj)))" diff --git a/proof/drefine/Schedule_DR.thy b/proof/drefine/Schedule_DR.thy index cbec79861b..2ef3fefa7d 100644 --- a/proof/drefine/Schedule_DR.thy +++ b/proof/drefine/Schedule_DR.thy @@ -139,7 +139,7 @@ lemma corrupt_intents_current_thread: by (simp add: corrupt_intents_def) crunch cdl_cur: corrupt_frame "\s. cdl_current_thread s = x" - (wp: select_wp simp: corrupt_intents_current_thread) + (simp: corrupt_intents_current_thread) (* Switching to the active thread has no effect. *) lemma switch_to_thread_idempotent_corres: diff --git a/proof/drefine/StateTranslationProofs_DR.thy b/proof/drefine/StateTranslationProofs_DR.thy index dfcd7e6a77..d2913c5452 100644 --- a/proof/drefine/StateTranslationProofs_DR.thy +++ b/proof/drefine/StateTranslationProofs_DR.thy @@ -66,7 +66,7 @@ abbreviation "update_tcb_boundntfn ntfn_opt tcb \ tcb \tcb_bound_notification := ntfn_opt\" abbreviation -"dupdate_cdl_object ptr obj s \ cdl_objects_update (\_. cdl_objects s(ptr \ obj)) s" +"dupdate_cdl_object ptr obj s \ cdl_objects_update (\_. (cdl_objects s)(ptr \ obj)) s" abbreviation "dupdate_tcb_intent intent tcb\ tcb \cdl_tcb_intent := intent\" diff --git a/proof/drefine/StateTranslation_D.thy b/proof/drefine/StateTranslation_D.thy index 24e957f0ed..335c5d8c2a 100644 --- a/proof/drefine/StateTranslation_D.thy +++ b/proof/drefine/StateTranslation_D.thy @@ -53,12 +53,12 @@ where else if x = 2 then Some EndpointType else if x = 3 then Some NotificationType else if x = 4 then Some CNodeType - else if x = 5 then Some (FrameType 12) - else if x = 6 then Some (FrameType 16) - else if x = 7 then Some (FrameType 20) - else if x = 8 then Some (FrameType 24) - else if x = 9 then Some PageTableType - else if x = 10 then Some PageDirectoryType + else if x = 5 then Some PageDirectoryType + else if x = 6 then Some (FrameType 12) + else if x = 7 then Some (FrameType 16) + else if x = 8 then Some (FrameType 20) + else if x = 9 then Some (FrameType 24) + else if x = 10 then Some PageTableType else None" definition diff --git a/proof/drefine/Tcb_DR.thy b/proof/drefine/Tcb_DR.thy index 5cfaa6ca65..6effc4ba93 100644 --- a/proof/drefine/Tcb_DR.thy +++ b/proof/drefine/Tcb_DR.thy @@ -427,7 +427,9 @@ lemma dcorres_idempotent_as_user: done lemma transform_full_intent_kheap_update_eq: - "\ q \ u' \ \ transform_full_intent (machine_state (s\kheap := kheap s(u' \ x')\)) q = transform_full_intent (machine_state s) q" + "q \ u' \ + transform_full_intent (machine_state (s\kheap := (kheap s)(u' \ x')\)) q = + transform_full_intent (machine_state s) q" by simp (* Suspend functions correspond. *) diff --git a/proof/drefine/Untyped_DR.thy b/proof/drefine/Untyped_DR.thy index b9e65d608d..ef6cb7d92f 100644 --- a/proof/drefine/Untyped_DR.thy +++ b/proof/drefine/Untyped_DR.thy @@ -65,7 +65,7 @@ next apply (rule someI2_ex, fastforce+)+ done - (* FIXME: For some reason In_Monad.in_fail doesn't fire below. This version would probably + (* FIXME: For some reason Nondet_In_Monad.in_fail doesn't fire below. This version would probably have been better in the first place. *) have [simp]: "\s. fst (fail s) = {}" by (simp add: fail_def) @@ -736,7 +736,7 @@ lemma init_arch_objects_corres_noop: done lemma monad_commute_set_cap_cdt: - "monad_commute \ (KHeap_D.set_cap ptr cap) (modify (\s. s\cdl_cdt := cdl_cdt s(ptr2 \ ptr3)\))" + "monad_commute \ (KHeap_D.set_cap ptr cap) (modify (\s. s\cdl_cdt := (cdl_cdt s)(ptr2 \ ptr3)\))" apply (clarsimp simp:monad_commute_def) apply (rule sym) apply (subst bind_assoc[symmetric]) @@ -865,7 +865,7 @@ lemma create_cap_mdb_cte_at: \ cte_wp_at ((\)cap.NullCap) parent s \ cte_at (fst tup) s\ create_cap type sz parent dev tup \\rv s. mdb_cte_at (swp (cte_wp_at ((\)cap.NullCap)) s) (cdt s)\" apply (simp add: create_cap_def split_def mdb_cte_at_def) - apply (wp hoare_vcg_all_lift set_cap_default_not_none set_cdt_cte_wp_at static_imp_wp dxo_wp_weak + apply (wp hoare_vcg_all_lift set_cap_default_not_none set_cdt_cte_wp_at hoare_weak_lift_imp dxo_wp_weak | simp | wps)+ apply (fastforce simp: cte_wp_at_caps_of_state) done diff --git a/proof/infoflow/ADT_IF.thy b/proof/infoflow/ADT_IF.thy index 2bfa4eeedd..bb7d5fd295 100644 --- a/proof/infoflow/ADT_IF.thy +++ b/proof/infoflow/ADT_IF.thy @@ -787,7 +787,7 @@ lemma kernel_entry_if_invs: kernel_entry_if e tc \\_. invs\" unfolding kernel_entry_if_def - by (wpsimp wp: thread_set_invs_trivial static_imp_wp + by (wpsimp wp: thread_set_invs_trivial hoare_weak_lift_imp simp: arch_tcb_update_aux2 ran_tcb_cap_cases)+ lemma kernel_entry_if_globals_equiv: @@ -796,7 +796,7 @@ lemma kernel_entry_if_globals_equiv: kernel_entry_if e tc \\_. globals_equiv st\" apply (simp add: kernel_entry_if_def) - apply (wp static_imp_wp handle_event_globals_equiv + apply (wp hoare_weak_lift_imp handle_event_globals_equiv thread_set_invs_trivial thread_set_context_globals_equiv | simp add: ran_tcb_cap_cases arch_tcb_update_aux2)+ apply (clarsimp simp: cur_thread_idle) @@ -831,7 +831,7 @@ lemma kernel_entry_silc_inv[wp]: \\_. silc_inv aag st\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: static_imp_wp handle_event_silc_inv thread_set_silc_inv thread_set_invs_trivial + wp: hoare_weak_lift_imp handle_event_silc_inv thread_set_silc_inv thread_set_invs_trivial thread_set_not_state_valid_sched thread_set_pas_refined | wp (once) hoare_vcg_imp_lift | force)+ @@ -1016,7 +1016,7 @@ lemma kernel_entry_pas_refined[wp]: \\_. pas_refined aag\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases schact_is_rct_def arch_tcb_update_aux2 - wp: static_imp_wp handle_event_pas_refined thread_set_pas_refined + wp: hoare_weak_lift_imp handle_event_pas_refined thread_set_pas_refined guarded_pas_domain_lift thread_set_invs_trivial thread_set_not_state_valid_sched | force)+ @@ -1026,7 +1026,7 @@ lemma kernel_entry_if_domain_sep_inv: \\_. domain_sep_inv irqs st\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: handle_event_domain_sep_inv static_imp_wp + wp: handle_event_domain_sep_inv hoare_weak_lift_imp thread_set_invs_trivial thread_set_not_state_valid_sched)+ lemma kernel_entry_if_valid_sched: @@ -1037,7 +1037,7 @@ lemma kernel_entry_if_valid_sched: by (wpsimp simp: kernel_entry_if_def ran_tcb_cap_cases arch_tcb_update_aux2 wp: handle_event_valid_sched thread_set_invs_trivial hoare_vcg_disj_lift thread_set_no_change_tcb_state ct_in_state_thread_state_lift - thread_set_not_state_valid_sched static_imp_wp)+ + thread_set_not_state_valid_sched hoare_weak_lift_imp)+ lemma kernel_entry_if_irq_masks: "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False st and invs\ @@ -2505,7 +2505,7 @@ proof(induct rule: cap_revoke.induct[where ?a1.0=s]) apply (wp drop_spec_validE[OF preemption_point_irq_state_inv[simplified validE_R_def]] drop_spec_validE[OF preemption_point_irq_state_inv'[where irq=irq]] drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] - cap_delete_domain_sep_inv cap_delete_irq_state_inv select_wp + cap_delete_domain_sep_inv cap_delete_irq_state_inv drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] drop_spec_validE[OF liftE_wp] drop_spec_validE[OF hoare_vcg_conj_liftE1] | simp | wp (once) hoare_drop_imps)+ @@ -2643,7 +2643,7 @@ lemma handle_invocation_irq_state_inv: split del: if_split) apply (wp syscall_valid) apply ((wp irq_state_inv_triv | wpc | simp)+)[2] - apply (wp static_imp_wp perform_invocation_irq_state_inv hoare_vcg_all_lift + apply (wp hoare_weak_lift_imp perform_invocation_irq_state_inv hoare_vcg_all_lift hoare_vcg_ex_lift decode_invocation_IRQHandlerCap | wpc | wp (once) hoare_drop_imps diff --git a/proof/infoflow/ARM/ArchADT_IF.thy b/proof/infoflow/ARM/ArchADT_IF.thy index 152d537534..e0438c9dec 100644 --- a/proof/infoflow/ARM/ArchADT_IF.thy +++ b/proof/infoflow/ARM/ArchADT_IF.thy @@ -67,7 +67,7 @@ lemma do_user_op_if_invs[ADT_IF_assms]: do_user_op_if f tc \\_. invs and ct_running\" apply (simp add: do_user_op_if_def split_def) - apply (wp do_machine_op_ct_in_state select_wp device_update_invs | wp (once) dmo_invs | simp)+ + apply (wp do_machine_op_ct_in_state device_update_invs | wp (once) dmo_invs | simp)+ apply (clarsimp simp: user_mem_def user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def ptable_rights_s_def ptable_lift_s_def) apply (frule ptable_rights_imp_frame) @@ -77,31 +77,31 @@ lemma do_user_op_if_invs[ADT_IF_assms]: done crunch domain_sep_inv[ADT_IF_assms, wp]: do_user_op_if "domain_sep_inv irqs st" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch valid_sched[ADT_IF_assms, wp]: do_user_op_if "valid_sched" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch irq_masks[ADT_IF_assms, wp]: do_user_op_if "\s. P (irq_masks_of_state s)" - (ignore: user_memory_update wp: select_wp dmo_wp no_irq) + (ignore: user_memory_update wp: dmo_wp no_irq) crunch valid_list[ADT_IF_assms, wp]: do_user_op_if "valid_list" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) lemma do_user_op_if_scheduler_action[ADT_IF_assms, wp]: "do_user_op_if f tc \\s. P (scheduler_action s)\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma do_user_op_silc_inv[ADT_IF_assms, wp]: "do_user_op_if f tc \silc_inv aag st\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_pas_refined[ADT_IF_assms, wp]: "do_user_op_if f tc \pas_refined aag\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done crunches do_user_op_if @@ -109,7 +109,7 @@ crunches do_user_op_if and cur_domain[ADT_IF_assms, wp]: "\s. P (cur_domain s)" and idle_thread[ADT_IF_assms, wp]: "\s. P (idle_thread s)" and domain_fields[ADT_IF_assms, wp]: "domain_fields P" - (wp: select_wp ignore: user_memory_update) + (ignore: user_memory_update) lemma do_use_op_guarded_pas_domain[ADT_IF_assms, wp]: "do_user_op_if f tc \guarded_pas_domain aag\" @@ -235,7 +235,7 @@ lemma do_user_op_if_idle_equiv[ADT_IF_assms, wp]: do_user_op_if uop tc \\_. idle_equiv st\" unfolding do_user_op_if_def - by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv select_wp) + by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv) lemma not_in_global_refs_vs_lookup: "\ (\\p) s; valid_vs_lookup s; valid_global_refs s; valid_arch_state s; valid_global_objs s \ @@ -254,7 +254,7 @@ lemma kernel_entry_if_valid_pdpt_objs[wp]: apply (simp add: kernel_entry_if_def) apply (wp | wpc | simp add: kernel_entry_if_def)+ apply (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 - wp: static_imp_wp thread_set_invs_trivial)+ + wp: hoare_weak_lift_imp thread_set_invs_trivial)+ done lemma kernel_entry_if_valid_vspace_objs_if[ADT_IF_assms, wp]: @@ -273,7 +273,7 @@ lemma schedule_if_valid_pdpt_objs[ADT_IF_assms, wp]: lemma do_user_op_if_valid_pdpt_objs[ADT_IF_assms, wp]: "\valid_vspace_objs_if\ do_user_op_if a b \\rv s. valid_vspace_objs_if s\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: "valid_vspace_objs_if (machine_state_update f s) = valid_vspace_objs_if s" @@ -282,20 +282,20 @@ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: lemma do_user_op_if_irq_state_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_state_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_masks_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_masks_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_measure_if[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_measure_if s)\" apply (rule hoare_pre) apply (simp add: do_user_op_if_def user_memory_update_def irq_measure_if_def - | wps |wp dmo_wp select_wp | wpc)+ + | wps |wp dmo_wp | wpc)+ done lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: diff --git a/proof/infoflow/ARM/ArchArch_IF.thy b/proof/infoflow/ARM/ArchArch_IF.thy index 92215d8e0d..0f42093362 100644 --- a/proof/infoflow/ARM/ArchArch_IF.thy +++ b/proof/infoflow/ARM/ArchArch_IF.thy @@ -76,7 +76,7 @@ crunch irq_state_of_state[wp]: arch_perform_invocation "\s. P (irq_state crunch irq_state_of_state[Arch_IF_assms, wp]: arch_finalise_cap, prepare_thread_delete "\s :: det_state. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps invalidateLocalTLB_ASID_def dsb_def cleanCaches_PoU_def invalidate_I_PoU_def clean_D_PoU_def) @@ -765,9 +765,9 @@ lemma perform_page_invocation_reads_respects: lemma equiv_asids_arm_asid_table_update: "\ equiv_asids R s t; kheap s pool_ptr = kheap t pool_ptr \ \ equiv_asids R - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s) + (s\arch_state := arch_state s\arm_asid_table := (asid_table s) (asid_high_bits_of asid \ pool_ptr)\\) - (t\arch_state := arch_state t\arm_asid_table := arm_asid_table (arch_state t) + (t\arch_state := arch_state t\arm_asid_table := (asid_table t) (asid_high_bits_of asid \ pool_ptr)\\)" by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap) @@ -1428,8 +1428,8 @@ lemma set_mrs_globals_equiv: apply (clarsimp) apply (insert length_msg_lt_msg_max) apply (simp) - apply (wp set_object_globals_equiv static_imp_wp) - apply (wp hoare_vcg_all_lift set_object_globals_equiv static_imp_wp)+ + apply (wp set_object_globals_equiv hoare_weak_lift_imp) + apply (wp hoare_vcg_all_lift set_object_globals_equiv hoare_weak_lift_imp)+ apply (clarsimp simp:arm_global_pd_not_tcb)+ done @@ -1444,7 +1444,7 @@ lemma perform_page_invocation_globals_equiv: apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift dmo_cacheRangeOp_lift mapM_swp_store_pde_globals_equiv mapM_x_swp_store_pte_globals_equiv mapM_x_swp_store_pde_globals_equiv set_cap_globals_equiv'' - unmap_page_globals_equiv store_pte_globals_equiv store_pde_globals_equiv static_imp_wp + unmap_page_globals_equiv store_pte_globals_equiv store_pde_globals_equiv hoare_weak_lift_imp do_flush_globals_equiv set_mrs_globals_equiv set_message_info_globals_equiv | wpc | simp add: do_machine_op_bind cleanByVA_PoU_def)+ by (auto simp: cte_wp_parent_not_global_pd authorised_for_globals_page_inv_def valid_page_inv_def @@ -1479,7 +1479,7 @@ lemma perform_asid_control_invocation_globals_equiv: max_index_upd_invs_simple set_cap_no_overlap set_cap_caps_no_overlap max_index_upd_caps_overlap_reserved region_in_kernel_window_preserved - hoare_vcg_all_lift get_cap_wp static_imp_wp + hoare_vcg_all_lift get_cap_wp hoare_weak_lift_imp set_cap_idx_up_aligned_area[where dev = False,simplified] | simp)+ (* factor out the implication -- we know what the relevant components of the diff --git a/proof/infoflow/ARM/ArchDecode_IF.thy b/proof/infoflow/ARM/ArchDecode_IF.thy index 4905fef5ae..4a257da281 100644 --- a/proof/infoflow/ARM/ArchDecode_IF.thy +++ b/proof/infoflow/ARM/ArchDecode_IF.thy @@ -204,7 +204,7 @@ lemma arch_decode_invocation_reads_respects_f[Decode_IF_assms]: apply (wp check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF ensure_safe_mapping_reads_respects] reads_respects_f_inv'[OF resolve_vaddr_reads_respects] reads_respects_f_inv'[OF create_mapping_entries_rev] diff --git a/proof/infoflow/ARM/ArchFinalCaps.thy b/proof/infoflow/ARM/ArchFinalCaps.thy index 261ac95d0c..63ffbffb7c 100644 --- a/proof/infoflow/ARM/ArchFinalCaps.thy +++ b/proof/infoflow/ARM/ArchFinalCaps.thy @@ -181,7 +181,7 @@ lemma perform_page_invocation_silc_inv: apply (wp mapM_wp[OF _ subset_refl] set_cap_silc_inv mapM_x_wp[OF _ subset_refl] perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] - hoare_vcg_all_lift hoare_vcg_if_lift static_imp_wp + hoare_vcg_all_lift hoare_vcg_if_lift hoare_weak_lift_imp | wpc | simp only: swp_def o_def fun_app_def K_def | wp (once) hoare_drop_imps)+ @@ -212,7 +212,7 @@ lemma perform_asid_control_invocation_silc_inv: apply (rule hoare_pre) apply (wp modify_wp cap_insert_silc_inv' retype_region_silc_inv[where sz=pageBits] set_cap_silc_inv get_cap_slots_holding_overlapping_caps[where st=st] - delete_objects_silc_inv static_imp_wp + delete_objects_silc_inv hoare_weak_lift_imp | wpc | simp )+ apply (clarsimp simp: authorised_asid_control_inv_def silc_inv_def valid_aci_def ptr_range_def page_bits_def) apply (rule conjI) @@ -275,15 +275,15 @@ lemma arch_invoke_irq_control_silc_inv[FinalCaps_assms]: done lemma invoke_tcb_silc_inv[FinalCaps_assms]: - notes static_imp_wp [wp] - static_imp_conj_wp [wp] + notes hoare_weak_lift_imp [wp] + hoare_weak_lift_imp_conj [wp] shows "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and tcb_inv_wf tinv and K (authorised_tcb_inv aag tinv)\ invoke_tcb tinv \\_. silc_inv aag st\" apply (case_tac tinv) apply ((wp restart_silc_inv hoare_vcg_if_lift suspend_silc_inv mapM_x_wp[OF _ subset_refl] - static_imp_wp + hoare_weak_lift_imp | wpc | simp split del: if_split add: authorised_tcb_inv_def check_cap_at_def | clarsimp diff --git a/proof/infoflow/ARM/ArchIRQMasks_IF.thy b/proof/infoflow/ARM/ArchIRQMasks_IF.thy index d532b90317..b9819e009f 100644 --- a/proof/infoflow/ARM/ArchIRQMasks_IF.thy +++ b/proof/infoflow/ARM/ArchIRQMasks_IF.thy @@ -31,7 +31,7 @@ crunch irq_masks[IRQMasks_IF_assms, wp]: invoke_untyped "\s. P (irq_mask mapM_x_def_bak unless_def) crunch irq_masks[IRQMasks_IF_assms, wp]: finalise_cap "\s. P (irq_masks_of_state s)" - (wp: select_wp crunch_wps dmo_wp no_irq + (wp: crunch_wps dmo_wp no_irq simp: crunch_simps no_irq_setHardwareASID no_irq_invalidateLocalTLB_ASID no_irq_set_current_pd no_irq_invalidateLocalTLB_VAASID no_irq_cleanByVA_PoU) @@ -80,14 +80,14 @@ lemma dmo_getActiveIRQ_return_axiom[IRQMasks_IF_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre, rule dmo_wp) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done crunch irq_masks[IRQMasks_IF_assms, wp]: activate_thread "\s. P (irq_masks_of_state s)" crunch irq_masks[IRQMasks_IF_assms, wp]: schedule "\s. P (irq_masks_of_state s)" - (wp: dmo_wp alternative_wp select_wp crunch_wps simp: crunch_simps clearExMonitor_def) + (wp: dmo_wp crunch_wps simp: crunch_simps clearExMonitor_def) end @@ -139,13 +139,13 @@ lemma invoke_tcb_irq_masks[IRQMasks_IF_assms]: apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (simp add: option_update_thread_def | wp static_imp_wp hoare_vcg_all_lift | wpc)+ + apply (simp add: option_update_thread_def | wp hoare_weak_lift_imp hoare_vcg_all_lift | wpc)+ by fastforce+ end diff --git a/proof/infoflow/ARM/ArchIpc_IF.thy b/proof/infoflow/ARM/ArchIpc_IF.thy index 1f1c607746..00429cf5e8 100644 --- a/proof/infoflow/ARM/ArchIpc_IF.thy +++ b/proof/infoflow/ARM/ArchIpc_IF.thy @@ -420,7 +420,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: apply (simp add: word_size_def) apply (erule is_aligned_no_overflow') apply simp - apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift static_imp_wp | simp)+ + apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift hoare_weak_lift_imp | simp)+ apply (fastforce dest: get_tcb_not_asid_pool_at)+ done diff --git a/proof/infoflow/ARM/ArchNoninterference.thy b/proof/infoflow/ARM/ArchNoninterference.thy index e4dfcc2a17..b9958a3093 100644 --- a/proof/infoflow/ARM/ArchNoninterference.thy +++ b/proof/infoflow/ARM/ArchNoninterference.thy @@ -23,9 +23,10 @@ lemma do_user_op_if_integrity[Noninterference_assms]: \\_. integrity aag X st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_respects_Write dmo_device_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift) + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp) apply (rule hoare_pre_cont) - apply (wp select_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ apply (rule conjI) apply clarsimp apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) @@ -53,12 +54,12 @@ lemma do_user_op_if_globals_equiv_scheduler[Noninterference_assms]: \\_. globals_equiv_scheduler st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_globals_equiv_scheduler - dmo_device_memory_update_globals_equiv_scheduler select_wp)+ + dmo_device_memory_update_globals_equiv_scheduler)+ apply (auto simp: ptable_lift_s_def ptable_rights_s_def) done crunch silc_dom_equiv[Noninterference_assms, wp]: do_user_op_if "silc_dom_equiv aag st" - (ignore: do_machine_op user_memory_update wp: crunch_wps select_wp) + (ignore: do_machine_op user_memory_update wp: crunch_wps) lemma sameFor_scheduler_affects_equiv[Noninterference_assms]: "\ (s,s') \ same_for aag PSched; (s,s') \ same_for aag (Partition l); @@ -350,7 +351,7 @@ lemma getActiveIRQ_ret_no_dmo[Noninterference_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done @@ -375,7 +376,7 @@ lemma dmo_getActive_IRQ_reads_respect_scheduler[Noninterference_assms]: lemma integrity_asids_update_reference_state[Noninterference_assms]: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := (kheap s)(t \ blah)\)" by clarsimp lemma getActiveIRQ_no_non_kernel_IRQs[Noninterference_assms]: diff --git a/proof/infoflow/ARM/ArchPasUpdates.thy b/proof/infoflow/ARM/ArchPasUpdates.thy index 1c81b5482c..8f8aaaa622 100644 --- a/proof/infoflow/ARM/ArchPasUpdates.thy +++ b/proof/infoflow/ARM/ArchPasUpdates.thy @@ -14,7 +14,7 @@ named_theorems PasUpdates_assms crunches arch_post_cap_deletion, arch_finalise_cap, prepare_thread_delete for domain_fields[PasUpdates_assms, wp]: "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/ARM/ArchScheduler_IF.thy b/proof/infoflow/ARM/ArchScheduler_IF.thy index f2406c7032..54cc779509 100644 --- a/proof/infoflow/ARM/ArchScheduler_IF.thy +++ b/proof/infoflow/ARM/ArchScheduler_IF.thy @@ -131,12 +131,12 @@ lemma thread_set_context_globals_equiv[Scheduler_IF_assms]: lemma arch_scheduler_affects_equiv_update[Scheduler_IF_assms]: "arch_scheduler_affects_equiv st s - \ arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: arch_scheduler_affects_equiv_def) lemma equiv_asid_equiv_update[Scheduler_IF_assms]: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: equiv_asid_def obj_at_def get_tcb_def) end @@ -434,7 +434,7 @@ lemma thread_set_scheduler_affects_equiv[Scheduler_IF_assms, wp]: split: option.splits kernel_object.splits) apply (subst arch_tcb_update_aux) apply simp - apply (subgoal_tac "s = (s\kheap := kheap s(idle_thread s \ TCB y)\)", simp) + apply (subgoal_tac "s = (s\kheap := (kheap s)(idle_thread s \ TCB y)\)", simp) apply (rule state.equality) apply (rule ext) apply simp+ diff --git a/proof/infoflow/ARM/ArchSyscall_IF.thy b/proof/infoflow/ARM/ArchSyscall_IF.thy index bfaa82c626..709535dc31 100644 --- a/proof/infoflow/ARM/ArchSyscall_IF.thy +++ b/proof/infoflow/ARM/ArchSyscall_IF.thy @@ -120,7 +120,7 @@ lemma decode_arch_invocation_authorised_for_globals[Syscall_IF_assms]: apply (simp add: split_def Let_def cong: cap.case_cong arch_cap.case_cong if_cong option.case_cong split del: if_split) - apply (wp select_wp select_ext_weak_wp whenE_throwError_wp check_vp_wpR unlessE_wp get_pde_wp + apply (wp select_ext_weak_wp whenE_throwError_wp check_vp_wpR unlessE_wp get_pde_wp get_master_pde_wp find_pd_for_asid_authority3 create_mapping_entries_parent_for_refs | wpc | simp add: authorised_for_globals_page_inv_def diff --git a/proof/infoflow/ARM/ArchTcb_IF.thy b/proof/infoflow/ARM/ArchTcb_IF.thy index 2d068493a0..b5800ea077 100644 --- a/proof/infoflow/ARM/ArchTcb_IF.thy +++ b/proof/infoflow/ARM/ArchTcb_IF.thy @@ -121,7 +121,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid check_cap_inv2[where Q="\_. P"] cap_delete_P cap_insert_P thread_set_P thread_set_P' set_mcpriority_P set_mcpriority_idle_thread - dxo_wp_weak static_imp_wp) + dxo_wp_weak hoare_weak_lift_imp) | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def del: hoare_True_E_R | wpc @@ -144,7 +144,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: lemma tc_reads_respects_f[Tcb_IF_assms]: assumes domains_distinct[wp]: "pas_domains_distinct aag" and tc[simp]: "ti = ThreadControl x41 x42 x43 x44 x45 x46 x47 x48" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs and simple_sched_action @@ -221,7 +221,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: invs_psp_aligned invs_vspace_objs invs_arch_state | wp (once) hoare_drop_imp)+ apply (simp add: option_update_thread_def tcb_cap_cases_def - | wp static_imp_wp static_imp_conj_wp thread_set_pas_refined + | wp hoare_weak_lift_imp hoare_weak_lift_imp_conj thread_set_pas_refined reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] | wpc)+ apply (wp hoare_vcg_all_lift thread_set_tcb_fault_handler_update_invs diff --git a/proof/infoflow/ARM/ArchUserOp_IF.thy b/proof/infoflow/ARM/ArchUserOp_IF.thy index 02bf34edc7..394b5ac167 100644 --- a/proof/infoflow/ARM/ArchUserOp_IF.thy +++ b/proof/infoflow/ARM/ArchUserOp_IF.thy @@ -982,7 +982,7 @@ lemma do_user_op_reads_respects_g: apply (rule spec_equiv_valid_guard_imp) apply (wpsimp wp: dmo_user_memory_update_reads_respects_g dmo_device_state_update_reads_respects_g dmo_setExMonitor_reads_respects_g dmo_device_state_update_reads_respects_g - select_ev select_wp dmo_getExMonitor_reads_respects_g dmo_wp) + select_ev dmo_getExMonitor_reads_respects_g dmo_wp) apply clarsimp apply (rule conjI) apply clarsimp diff --git a/proof/infoflow/Arch_IF.thy b/proof/infoflow/Arch_IF.thy index 41907814de..20b15e6bf0 100644 --- a/proof/infoflow/Arch_IF.thy +++ b/proof/infoflow/Arch_IF.thy @@ -437,7 +437,7 @@ crunch irq_state_of_state[wp]: schedule "\s. P (irq_state_of_state s)" simp: machine_op_lift_def machine_rest_lift_def crunch_simps) crunch irq_state_of_state[wp]: finalise_cap "\s. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp simp: crunch_simps) + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps) crunch irq_state_of_state[wp]: send_signal, restart "\s. P (irq_state_of_state s)" diff --git a/proof/infoflow/FinalCaps.thy b/proof/infoflow/FinalCaps.thy index 60706a9b88..c90d1c1e32 100644 --- a/proof/infoflow/FinalCaps.thy +++ b/proof/infoflow/FinalCaps.thy @@ -647,8 +647,7 @@ lemma set_cap_slots_holding_overlapping_caps_helper: obj_refs cap = {} \ cap_irqs cap \ {}; ko_at (TCB tcb) (fst slot) s; tcb_cap_cases (snd slot) = Some (getF, setF, blah) \ \ x \ slots_holding_overlapping_caps cap - (s\kheap := kheap s(fst slot \ - TCB (setF (\ x. capa) tcb))\)" + (s\kheap := (kheap s)(fst slot \ TCB (setF (\x. capa) tcb))\)" apply (clarsimp simp: slots_holding_overlapping_caps_def) apply (rule_tac x=cap' in exI) apply (clarsimp simp: get_cap_cte_wp_at') @@ -741,7 +740,7 @@ lemma set_cap_silc_inv: apply (rule equiv_forI) apply (erule use_valid) unfolding set_cap_def - apply (wp set_object_wp get_object_wp static_imp_wp | simp add: split_def | wpc)+ + apply (wp set_object_wp get_object_wp hoare_weak_lift_imp | simp add: split_def | wpc)+ apply clarsimp apply (rule conjI) apply fastforce @@ -919,7 +918,7 @@ lemma cap_swap_silc_inv: apply (rule hoare_gen_asm) unfolding cap_swap_def apply (rule hoare_pre) - apply (wp set_cap_silc_inv hoare_vcg_ex_lift static_imp_wp + apply (wp set_cap_silc_inv hoare_vcg_ex_lift hoare_weak_lift_imp set_cap_slots_holding_overlapping_caps_other[where aag=aag] set_cdt_silc_inv | simp split del: if_split)+ apply (rule conjI) @@ -955,7 +954,7 @@ lemma cap_move_silc_inv: apply (rule hoare_pre) apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_cap_slots_holding_overlapping_caps_other[where aag=aag] - set_cdt_silc_inv static_imp_wp + set_cdt_silc_inv hoare_weak_lift_imp | simp)+ apply (rule conjI) apply (fastforce simp: cap_points_to_label_def) @@ -985,7 +984,7 @@ lemma cap_insert_silc_inv: \\_. silc_inv aag st\" unfolding cap_insert_def (* The order here matters. The first two need to be first. *) - apply (wp assert_wp static_imp_conj_wp set_cap_silc_inv hoare_vcg_ex_lift + apply (wp assert_wp hoare_weak_lift_imp_conj set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv | simp | wp (once) hoare_drop_imps)+ apply clarsimp @@ -1208,9 +1207,9 @@ lemma reply_cancel_ipc_silc_inv: reply_cancel_ipc t \\_. silc_inv aag st\" unfolding reply_cancel_ipc_def - apply (wp cap_delete_one_silc_inv select_wp hoare_vcg_if_lift | simp)+ + apply (wp cap_delete_one_silc_inv hoare_vcg_if_lift | simp)+ apply wps - apply (wp static_imp_wp hoare_vcg_all_lift hoare_vcg_ball_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_all_lift hoare_vcg_ball_lift) apply clarsimp apply (rename_tac b a) apply (frule(1) descendants_of_owned_or_transferable, force, force, elim disjE) @@ -1569,7 +1568,7 @@ lemma rec_del_silc_inv': valid_validE_R[OF rec_del_respects(2)[simplified]] "2.hyps" drop_spec_validE[OF liftE_wp] set_cap_silc_inv set_cap_pas_refined replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at static_imp_wp hoare_vcg_ball_lift + set_cap_cte_wp_at hoare_weak_lift_imp hoare_vcg_ball_lift | simp add: finalise_cap_not_reply_master_unlifted split del: if_split)+ (* where the action is *) apply (simp cong: conj_cong add: conj_comms) @@ -1608,7 +1607,7 @@ lemma rec_del_silc_inv': finalise_cap_invs[where slot=slot] finalise_cap_replaceable[where sl=slot] finalise_cap_makes_halted[where slot=slot] - finalise_cap_auth' static_imp_wp) + finalise_cap_auth' hoare_weak_lift_imp) apply (wp drop_spec_validE[OF liftE_wp] get_cap_auth_wp[where aag=aag] | simp add: is_final_cap_def)+ @@ -1719,7 +1718,7 @@ lemma rec_del_silc_inv_CTEDelete_transferable': apply (wp rec_del_silc_inv_not_transferable) apply simp apply (subst rec_del.simps[abs_def]) - apply (wp add: hoare_K_bind without_preemption_wp empty_slot_silc_inv static_imp_wp wp_transferable + apply (wp add: hoare_K_bind without_preemption_wp empty_slot_silc_inv hoare_weak_lift_imp wp_transferable rec_del_Finalise_transferable del: wp_not_transferable | wpc)+ @@ -1773,7 +1772,7 @@ lemma cap_revoke_silc_inv': apply (rule spec_valid_conj_liftE1, (wp | simp)+) apply (rule drop_spec_validE[OF valid_validE[OF cap_delete_silc_inv]]) apply (wp drop_spec_validE[OF assertE_wp] drop_spec_validE[OF without_preemption_wp] - get_cap_wp select_wp drop_spec_validE[OF returnOk_wp])+ + get_cap_wp drop_spec_validE[OF returnOk_wp])+ apply clarsimp apply (clarsimp cong: conj_cong simp: conj_comms) apply (rule conjI) @@ -2161,7 +2160,7 @@ lemma cap_insert_silc_inv': apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv set_cap_caps_of_state2 - set_untyped_cap_as_full_cdt_is_original_cap static_imp_wp + set_untyped_cap_as_full_cdt_is_original_cap hoare_weak_lift_imp | simp split del: if_split)+ apply (intro allI impI conjI) apply clarsimp @@ -2284,7 +2283,7 @@ lemma cap_insert_silc_inv''': apply (wp set_cap_silc_inv hoare_vcg_ex_lift set_untyped_cap_as_full_slots_holding_overlapping_caps_other[where aag=aag] get_cap_wp update_cdt_silc_inv set_cap_caps_of_state2 - set_untyped_cap_as_full_cdt_is_original_cap static_imp_wp + set_untyped_cap_as_full_cdt_is_original_cap hoare_weak_lift_imp | simp split del: if_split)+ apply (intro impI conjI allI) apply clarsimp @@ -2321,7 +2320,7 @@ lemma invoke_irq_handler_silc_inv: apply (rule hoare_gen_asm) apply (case_tac hi) apply (wp cap_insert_silc_inv'' cap_delete_one_silc_inv_subject cap_delete_one_cte_wp_at_other - static_imp_wp hoare_vcg_ex_lift + hoare_weak_lift_imp hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st] | simp add: authorised_irq_hdl_inv_def get_irq_slot_def conj_comms)+ apply (clarsimp simp: pas_refined_def irq_map_wellformed_aux_def) @@ -2489,7 +2488,7 @@ lemma send_ipc_silc_inv: send_ipc block call badge can_grant can_grant_reply thread epptr \\_. silc_inv aag st\" unfolding send_ipc_def - apply (wp setup_caller_cap_silc_inv static_imp_wp do_ipc_transfer_silc_inv gts_wp + apply (wp setup_caller_cap_silc_inv hoare_weak_lift_imp do_ipc_transfer_silc_inv gts_wp | wpc | simp add:st_tcb_at_tcb_states_of_state_eq | rule conjI impI @@ -2544,7 +2543,7 @@ lemma receive_ipc_base_silc_inv: \\_. silc_inv aag st\" apply (clarsimp simp: thread_get_def get_thread_state_def cong: endpoint.case_cong) apply (rule hoare_pre) - apply (wp setup_caller_cap_silc_inv static_imp_wp do_ipc_transfer_silc_inv + apply (wp setup_caller_cap_silc_inv hoare_weak_lift_imp do_ipc_transfer_silc_inv | wpc | simp split del: if_split)+ apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift set_simple_ko_get_tcb | wpc | simp split del: if_split)+ @@ -2632,7 +2631,7 @@ lemma setup_reply_master_silc_inv: unfolding setup_reply_master_def apply (wp set_cap_silc_inv hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st and P="\"] - get_cap_wp static_imp_wp + get_cap_wp hoare_weak_lift_imp | simp)+ apply (clarsimp simp: cap_points_to_label_def silc_inv_def) done @@ -2856,7 +2855,7 @@ lemma handle_event_silc_inv: crunch silc_inv[wp]: activate_thread "silc_inv aag st" crunch silc_inv[wp]: schedule "silc_inv aag st" - ( wp: alternative_wp OR_choice_weak_wp select_wp crunch_wps + ( wp: OR_choice_weak_wp crunch_wps ignore: set_scheduler_action simp: crunch_simps) diff --git a/proof/infoflow/Finalise_IF.thy b/proof/infoflow/Finalise_IF.thy index 1b39733a63..232c643622 100644 --- a/proof/infoflow/Finalise_IF.thy +++ b/proof/infoflow/Finalise_IF.thy @@ -601,7 +601,7 @@ lemma possible_switch_to_reads_respects: (possible_switch_to tptr)" apply (simp add: possible_switch_to_def ethread_get_def) apply (case_tac "aag_can_read aag tptr \ aag_can_affect aag l tptr") - apply (wp static_imp_wp tcb_sched_action_reads_respects | wpc | simp)+ + apply (wp hoare_weak_lift_imp tcb_sched_action_reads_respects | wpc | simp)+ apply (clarsimp simp: get_etcb_def) apply ((intro conjI impI allI | elim aag_can_read_self reads_equivE affects_equivE equiv_forE conjE disjE @@ -994,7 +994,7 @@ lemma reply_cancel_ipc_reads_respects_f: unfolding reply_cancel_ipc_def apply (rule gen_asm_ev) apply (wp cap_delete_one_reads_respects_f_transferable[where st=st] - select_singleton_ev select_inv select_wp assert_wp + select_singleton_ev select_inv assert_wp reads_respects_f[OF get_cap_rev, where st=st] reads_respects_f[OF thread_set_reads_respects, where st=st] reads_respects_f[OF gets_descendants_of_revrv[folded equiv_valid_def2]] @@ -1208,7 +1208,7 @@ next apply (wp drop_spec_ev[OF liftE_ev] set_cap_reads_respects_f[where st=st] set_cap_silc_inv[where st=st] | simp)+ apply (wp replace_cap_invs set_cap_cte_wp_at set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_cap_wp_to hoare_vcg_const_Ball_lift hoare_weak_lift_imp drop_spec_ev[OF liftE_ev] finalise_cap_reads_respects set_cap_silc_inv set_cap_only_timer_irq_inv set_cap_pas_refined_not_transferable | simp add: cte_wp_at_eq_simp diff --git a/proof/infoflow/IRQMasks_IF.thy b/proof/infoflow/IRQMasks_IF.thy index 897f34c316..026a172a59 100644 --- a/proof/infoflow/IRQMasks_IF.thy +++ b/proof/infoflow/IRQMasks_IF.thy @@ -169,7 +169,7 @@ end crunch irq_masks[wp]: cancel_ipc "\s. P (irq_masks_of_state s)" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_masks[wp]: restart, set_mcpriority "\s. P (irq_masks_of_state s)" @@ -212,7 +212,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) drop_spec_validE[OF valid_validE[OF preemption_point_domain_sep_inv]] cap_delete_domain_sep_inv cap_delete_irq_masks drop_spec_validE[OF assertE_wp] drop_spec_validE[OF returnOk_wp] - drop_spec_validE[OF liftE_wp] select_wp + drop_spec_validE[OF liftE_wp] drop_spec_validE[OF hoare_vcg_conj_liftE1] | simp | wp (once) hoare_drop_imps)+ apply fastforce @@ -306,7 +306,7 @@ lemma handle_invocation_irq_masks: \\rv s. P (irq_masks_of_state s)\" apply (simp add: handle_invocation_def ts_Restart_case_helper split_def liftE_liftM_liftME liftME_def bindE_assoc) - apply (wp static_imp_wp syscall_valid perform_invocation_irq_masks[where st=st] + apply (wp hoare_weak_lift_imp syscall_valid perform_invocation_irq_masks[where st=st] hoare_vcg_all_lift hoare_vcg_ex_lift decode_invocation_IRQHandlerCap | simp add: invs_valid_objs)+ done diff --git a/proof/infoflow/InfoFlow_IF.thy b/proof/infoflow/InfoFlow_IF.thy index 1b2e1eeb73..2de302236b 100644 --- a/proof/infoflow/InfoFlow_IF.thy +++ b/proof/infoflow/InfoFlow_IF.thy @@ -677,7 +677,7 @@ lemma requiv_wuc_eq[intro]: by (simp add: reads_equiv_def2) lemma update_object_noop: - "kheap s ptr = Some obj \ s\kheap := kheap s(ptr \ obj)\ = s" + "kheap s ptr = Some obj \ s\kheap := (kheap s)(ptr \ obj)\ = s" by (clarsimp simp: map_upd_triv) lemma set_object_rev: diff --git a/proof/infoflow/Interrupt_IF.thy b/proof/infoflow/Interrupt_IF.thy index be97fd3eca..862c1b8d81 100644 --- a/proof/infoflow/Interrupt_IF.thy +++ b/proof/infoflow/Interrupt_IF.thy @@ -44,7 +44,7 @@ lemma invoke_irq_handler_reads_respects_f: cap_delete_one_reads_respects_f[where st=st] reads_respects_f[OF get_irq_slot_reads_respects, where Q="\"] cap_insert_silc_inv'' cap_delete_one_silc_inv_subject - cap_delete_one_cte_wp_at_other static_imp_wp + cap_delete_one_cte_wp_at_other hoare_weak_lift_imp hoare_vcg_ex_lift slots_holding_overlapping_caps_from_silc_inv[where aag=aag and st=st] | simp | simp add: get_irq_slot_def)+ apply (clarsimp simp: pas_refined_def irq_map_wellformed_aux_def) diff --git a/proof/infoflow/Ipc_IF.thy b/proof/infoflow/Ipc_IF.thy index 66a0ec134d..80f3d615df 100644 --- a/proof/infoflow/Ipc_IF.thy +++ b/proof/infoflow/Ipc_IF.thy @@ -218,7 +218,7 @@ lemma update_waiting_ntfn_equiv_but_for_labels: update_waiting_ntfn nptr list boundtcb badge \\_. equiv_but_for_labels aag L st\" unfolding update_waiting_ntfn_def - apply (wp static_imp_wp as_user_equiv_but_for_labels set_thread_state_runnable_equiv_but_for_labels + apply (wp hoare_weak_lift_imp as_user_equiv_but_for_labels set_thread_state_runnable_equiv_but_for_labels set_thread_state_pas_refined set_notification_equiv_but_for_labels set_simple_ko_pred_tcb_at set_simple_ko_pas_refined hoare_vcg_disj_lift possible_switch_to_equiv_but_for_labels @@ -332,12 +332,12 @@ lemma sts_noop: lemma sts_to_modify': "monadic_rewrite True True (tcb_at tcb and (\s :: det_state. tcb \ cur_thread s)) (set_thread_state tcb st) - (modify (\s. s\kheap := kheap s(tcb \ TCB (the (get_tcb tcb s)\tcb_state := st\))\))" + (modify (\s. s\kheap := (kheap s)(tcb \ TCB (the (get_tcb tcb s)\tcb_state := st\))\))" apply (clarsimp simp: set_thread_state_def set_object_def) apply (monadic_rewrite_l sts_noop \wpsimp wp: get_object_wp\) apply (simp add: bind_assoc) apply monadic_rewrite_symb_exec_l+ - apply (rule_tac P="\s'. s' = s \ x = the (get_tcb tcb s)" in monadic_rewrite_pre_imp_eq) + apply (rule_tac P="\s'. s' = s \ tcba = the (get_tcb tcb s)" in monadic_rewrite_pre_imp_eq) apply (clarsimp simp: put_def modify_def get_def bind_def) apply (wpsimp wp: get_object_wp)+ by (clarsimp simp: get_tcb_def tcb_at_def) @@ -1136,7 +1136,7 @@ lemma transfer_caps_reads_respects: (transfer_caps mi caps endpoint receiver receive_buffer)" unfolding transfer_caps_def fun_app_def by (wp transfer_caps_loop_reads_respects get_receive_slots_rev - get_receive_slots_authorised hoare_vcg_all_lift static_imp_wp + get_receive_slots_authorised hoare_vcg_all_lift hoare_weak_lift_imp | wpc | simp add: ball_conj_distrib)+ lemma aag_has_auth_to_read_mrs: @@ -1360,7 +1360,7 @@ lemma receive_ipc_base_reads_respects: as_user_set_register_reads_respects' | simp | intro allI impI | rule pre_ev, wpc)+)[2] apply (intro allI impI) - apply (wp static_imp_wp set_simple_ko_reads_respects set_thread_state_reads_respects + apply (wp hoare_weak_lift_imp set_simple_ko_reads_respects set_thread_state_reads_respects setup_caller_cap_reads_respects do_ipc_transfer_reads_respects possible_switch_to_reads_respects gets_cur_thread_ev set_thread_state_pas_refined set_simple_ko_reads_respects hoare_vcg_all_lift @@ -1398,7 +1398,7 @@ lemma receive_ipc_reads_respects: apply (rename_tac epptr badge rights) apply (wp receive_ipc_base_reads_respects complete_signal_reads_respects - static_imp_wp set_simple_ko_reads_respects set_thread_state_reads_respects + hoare_weak_lift_imp set_simple_ko_reads_respects set_thread_state_reads_respects setup_caller_cap_reads_respects complete_signal_reads_respects thread_get_reads_respects get_thread_state_reads_respects diff --git a/proof/infoflow/Noninterference.thy b/proof/infoflow/Noninterference.thy index b4435fc1a8..681fa56e8a 100644 --- a/proof/infoflow/Noninterference.thy +++ b/proof/infoflow/Noninterference.thy @@ -325,7 +325,7 @@ lemma prop_of_two_valid: by (rule hoare_pre, wps f g, wp, simp) lemma thread_set_tcb_context_update_wp: - "\\s. P (s\kheap := kheap s(t \ TCB (tcb_arch_update f (the (get_tcb t s))))\)\ + "\\s. P (s\kheap := (kheap s)(t \ TCB (tcb_arch_update f (the (get_tcb t s))))\)\ thread_set (tcb_arch_update f) t \\_. P\" apply (simp add: thread_set_def) @@ -631,7 +631,7 @@ locale Noninterference_1 = "reads_respects_g aag l \ (do_machine_op (storeWord ptr w))" and integrity_asids_update_reference_state: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x asid s (s\kheap := (kheap s)(t \ blah)\)" and partitionIntegrity_subjectAffects_aobj: "\ partitionIntegrity aag s s'; kheap s x = Some (ArchObj ao); kheap s x \ kheap s' x; silc_inv aag st s; pas_refined aag s; pas_wellformed_noninterference aag \ @@ -685,7 +685,7 @@ locale Noninterference_1 = begin lemma integrity_update_reference_state: - "\ is_subject aag t; integrity aag X st s; st = st'\kheap := kheap st'(t \ blah)\ \ + "\ is_subject aag t; integrity aag X st s; st = st'\kheap := (kheap st')(t \ blah)\ \ \ integrity (aag :: 'a subject_label PAS) X st' s" apply (erule integrity_trans[rotated]) apply (clarsimp simp: integrity_def opt_map_def integrity_asids_update_reference_state) diff --git a/proof/infoflow/PasUpdates.thy b/proof/infoflow/PasUpdates.thy index 14b5ae3469..1d6ceed8db 100644 --- a/proof/infoflow/PasUpdates.thy +++ b/proof/infoflow/PasUpdates.thy @@ -45,7 +45,7 @@ crunch domain_fields[wp]: cap_swap_ext, set_thread_state_ext, tcb_sched_action, reschedule_required, cap_swap_for_delete, finalise_cap, cap_move, cap_swap, cap_delete, cancel_badged_sends, cap_insert "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/RISCV64/ArchADT_IF.thy b/proof/infoflow/RISCV64/ArchADT_IF.thy index a70fe81d27..82a752388a 100644 --- a/proof/infoflow/RISCV64/ArchADT_IF.thy +++ b/proof/infoflow/RISCV64/ArchADT_IF.thy @@ -25,7 +25,7 @@ lemma do_user_op_if_invs[ADT_IF_assms]: do_user_op_if f tc \\_. invs and ct_running\" apply (simp add: do_user_op_if_def split_def) - apply (wp do_machine_op_ct_in_state select_wp device_update_invs | wp (once) dmo_invs | simp)+ + apply (wp do_machine_op_ct_in_state device_update_invs | wp (once) dmo_invs | simp)+ apply (clarsimp simp: user_mem_def user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def ptable_rights_s_def ptable_lift_s_def) apply (frule ptable_rights_imp_frame) @@ -35,31 +35,31 @@ lemma do_user_op_if_invs[ADT_IF_assms]: done crunch domain_sep_inv[ADT_IF_assms, wp]: do_user_op_if "domain_sep_inv irqs st" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch valid_sched[ADT_IF_assms, wp]: do_user_op_if "valid_sched" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) crunch irq_masks[ADT_IF_assms, wp]: do_user_op_if "\s. P (irq_masks_of_state s)" - (ignore: user_memory_update wp: select_wp dmo_wp no_irq) + (ignore: user_memory_update wp: dmo_wp no_irq) crunch valid_list[ADT_IF_assms, wp]: do_user_op_if "valid_list" - (ignore: user_memory_update wp: select_wp) + (ignore: user_memory_update) lemma do_user_op_if_scheduler_action[ADT_IF_assms, wp]: "do_user_op_if f tc \\s. P (scheduler_action s)\" - by (simp add: do_user_op_if_def | wp select_wp | wpc)+ + by (simp add: do_user_op_if_def | wp | wpc)+ lemma do_user_op_silc_inv[ADT_IF_assms, wp]: "do_user_op_if f tc \silc_inv aag st\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_pas_refined[ADT_IF_assms, wp]: "do_user_op_if f tc \pas_refined aag\" apply (simp add: do_user_op_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done crunches do_user_op_if @@ -67,7 +67,7 @@ crunches do_user_op_if and cur_domain[ADT_IF_assms, wp]: "\s. P (cur_domain s)" and idle_thread[ADT_IF_assms, wp]: "\s. P (idle_thread s)" and domain_fields[ADT_IF_assms, wp]: "domain_fields P" - (wp: select_wp ignore: user_memory_update) + (ignore: user_memory_update) lemma do_use_op_guarded_pas_domain[ADT_IF_assms, wp]: "do_user_op_if f tc \guarded_pas_domain aag\" @@ -187,7 +187,7 @@ lemma do_user_op_if_idle_equiv[ADT_IF_assms, wp]: do_user_op_if uop tc \\_. idle_equiv st\" unfolding do_user_op_if_def - by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv select_wp) + by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv) lemma kernel_entry_if_valid_vspace_objs_if[ADT_IF_assms, wp]: "\valid_vspace_objs_if and invs and (\s. e \ Interrupt \ ct_active s)\ @@ -214,20 +214,20 @@ lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: lemma do_user_op_if_irq_state_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_state_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_masks_of_state[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_masks_of_state s)\" apply (rule hoare_pre) - apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp select_wp | wpc)+ + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ done lemma do_user_op_if_irq_measure_if[ADT_IF_assms]: "do_user_op_if utf uc \\s. P (irq_measure_if s)\" apply (rule hoare_pre) apply (simp add: do_user_op_if_def user_memory_update_def irq_measure_if_def - | wps |wp dmo_wp select_wp | wpc)+ + | wps |wp dmo_wp | wpc)+ done lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: diff --git a/proof/infoflow/RISCV64/ArchArch_IF.thy b/proof/infoflow/RISCV64/ArchArch_IF.thy index 3901077868..46de768008 100644 --- a/proof/infoflow/RISCV64/ArchArch_IF.thy +++ b/proof/infoflow/RISCV64/ArchArch_IF.thy @@ -74,7 +74,7 @@ crunch irq_state_of_state[wp]: arch_perform_invocation "\s. P (irq_state crunch irq_state_of_state[Arch_IF_assms, wp]: arch_finalise_cap, prepare_thread_delete "\s :: det_state. P (irq_state_of_state s)" - (wp: select_wp modify_wp crunch_wps dmo_wp + (wp: modify_wp crunch_wps dmo_wp simp: crunch_simps hwASIDFlush_def) lemma equiv_asid_machine_state_update[Arch_IF_assms, simp]: @@ -400,10 +400,10 @@ lemma perform_page_invocation_reads_respects: lemma equiv_asids_riscv_asid_table_update: "\ equiv_asids R s t; kheap s pool_ptr = kheap t pool_ptr \ \ equiv_asids R - (s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s) - (asid_high_bits_of asid \ pool_ptr)\\) - (t\arch_state := arch_state t\riscv_asid_table := riscv_asid_table (arch_state t) - (asid_high_bits_of asid \ pool_ptr)\\)" + (s\arch_state := arch_state s\riscv_asid_table := (asid_table s) + (asid_high_bits_of asid \ pool_ptr)\\) + (t\arch_state := arch_state t\riscv_asid_table := (asid_table t) + (asid_high_bits_of asid \ pool_ptr)\\)" by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap opt_map_def) lemma riscv_asid_table_update_reads_respects: @@ -943,8 +943,8 @@ lemma set_mrs_globals_equiv: apply (clarsimp) apply (insert length_msg_lt_msg_max) apply (simp) - apply (wp set_object_globals_equiv static_imp_wp) - apply (wp hoare_vcg_all_lift set_object_globals_equiv static_imp_wp)+ + apply (wp set_object_globals_equiv hoare_weak_lift_imp) + apply (wp hoare_vcg_all_lift set_object_globals_equiv hoare_weak_lift_imp)+ apply (fastforce simp: valid_arch_state_def obj_at_def get_tcb_def dest: valid_global_arch_objs_pt_at) done @@ -981,7 +981,7 @@ lemma perform_pg_inv_unmap_globals_equiv: apply (rule hoare_weaken_pre) apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv set_cap_globals_equiv'' unmap_page_globals_equiv store_pte_globals_equiv - store_pte_globals_equiv static_imp_wp set_message_info_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv unmap_page_valid_arch_state perform_pg_inv_get_addr_globals_equiv | wpc | simp add: do_machine_op_bind sfence_def)+ apply (clarsimp simp: acap_map_data_def) @@ -998,7 +998,7 @@ lemma perform_pg_inv_map_globals_equiv: unfolding perform_pg_inv_map_def by (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv set_cap_globals_equiv'' unmap_page_globals_equiv store_pte_globals_equiv - store_pte_globals_equiv static_imp_wp set_message_info_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv unmap_page_valid_arch_state perform_pg_inv_get_addr_globals_equiv | wpc | simp add: do_machine_op_bind sfence_def | fastforce)+ @@ -1049,7 +1049,7 @@ lemma perform_asid_control_invocation_globals_equiv: max_index_upd_invs_simple set_cap_no_overlap set_cap_caps_no_overlap max_index_upd_caps_overlap_reserved region_in_kernel_window_preserved - hoare_vcg_all_lift get_cap_wp static_imp_wp + hoare_vcg_all_lift get_cap_wp hoare_weak_lift_imp set_cap_idx_up_aligned_area[where dev = False,simplified] | simp)+ (* factor out the implication -- we know what the relevant components of the diff --git a/proof/infoflow/RISCV64/ArchDecode_IF.thy b/proof/infoflow/RISCV64/ArchDecode_IF.thy index 657072fdb5..5be04e10b1 100644 --- a/proof/infoflow/RISCV64/ArchDecode_IF.thy +++ b/proof/infoflow/RISCV64/ArchDecode_IF.thy @@ -144,7 +144,7 @@ lemma decode_asid_control_invocation_reads_respects_f: apply (wp check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] gets_apply_ev is_final_cap_reads_respects @@ -193,7 +193,7 @@ lemma decode_frame_invocation_reads_respects_f: reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF get_pte_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] find_vspace_for_asid_reads_respects is_final_cap_reads_respects @@ -254,7 +254,7 @@ lemma decode_page_table_invocation_reads_respects_f: reads_respects_f_inv'[OF ensure_empty_rev] reads_respects_f_inv'[OF get_pte_rev] reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] - reads_respects_f_inv'[OF ensure_no_children_rev] select_wp + reads_respects_f_inv'[OF ensure_no_children_rev] reads_respects_f_inv'[OF lookup_error_on_failure_rev] find_vspace_for_asid_reads_respects is_final_cap_reads_respects diff --git a/proof/infoflow/RISCV64/ArchFinalCaps.thy b/proof/infoflow/RISCV64/ArchFinalCaps.thy index 4dd9854b4e..f10001e11f 100644 --- a/proof/infoflow/RISCV64/ArchFinalCaps.thy +++ b/proof/infoflow/RISCV64/ArchFinalCaps.thy @@ -160,7 +160,7 @@ lemma perform_page_invocation_silc_inv: apply (wp mapM_wp[OF _ subset_refl] set_cap_silc_inv mapM_x_wp[OF _ subset_refl] perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] - hoare_vcg_all_lift hoare_vcg_if_lift static_imp_wp + hoare_vcg_all_lift hoare_vcg_if_lift hoare_weak_lift_imp | wpc | simp only: swp_def o_def fun_app_def K_def | wp (once) hoare_drop_imps)+ @@ -186,7 +186,7 @@ lemma perform_asid_control_invocation_silc_inv: apply (rule hoare_pre) apply (wp modify_wp cap_insert_silc_inv' retype_region_silc_inv[where sz=pageBits] set_cap_silc_inv get_cap_slots_holding_overlapping_caps[where st=st] - delete_objects_silc_inv static_imp_wp + delete_objects_silc_inv hoare_weak_lift_imp | wpc | simp )+ apply (clarsimp simp: authorised_asid_control_inv_def silc_inv_def valid_aci_def ptr_range_def page_bits_def) apply (rule conjI) @@ -250,15 +250,15 @@ lemma arch_invoke_irq_control_silc_inv[FinalCaps_assms]: done lemma invoke_tcb_silc_inv[FinalCaps_assms]: - notes static_imp_wp [wp] - static_imp_conj_wp [wp] + notes hoare_weak_lift_imp [wp] + hoare_weak_lift_imp_conj [wp] shows "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and tcb_inv_wf tinv and K (authorised_tcb_inv aag tinv)\ invoke_tcb tinv \\_. silc_inv aag st\" apply (case_tac tinv) apply ((wp restart_silc_inv hoare_vcg_if_lift suspend_silc_inv mapM_x_wp[OF _ subset_refl] - static_imp_wp + hoare_weak_lift_imp | wpc | simp split del: if_split add: authorised_tcb_inv_def check_cap_at_def | clarsimp diff --git a/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy b/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy index 6e3ae7cd4f..bd11499548 100644 --- a/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy +++ b/proof/infoflow/RISCV64/ArchIRQMasks_IF.thy @@ -30,7 +30,7 @@ crunch irq_masks[IRQMasks_IF_assms, wp]: invoke_untyped "\s. P (irq_mask simp: crunch_simps no_irq_clearMemory mapM_x_def_bak unless_def) crunch irq_masks[IRQMasks_IF_assms, wp]: finalise_cap "\s. P (irq_masks_of_state s)" - ( wp: select_wp crunch_wps dmo_wp no_irq + ( wp: crunch_wps dmo_wp no_irq simp: crunch_simps no_irq_setVSpaceRoot no_irq_hwASIDFlush) crunch irq_masks[IRQMasks_IF_assms, wp]: send_signal "\s. P (irq_masks_of_state s)" @@ -77,14 +77,14 @@ lemma dmo_getActiveIRQ_return_axiom[IRQMasks_IF_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre, rule dmo_wp) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done crunch irq_masks[IRQMasks_IF_assms, wp]: activate_thread "\s. P (irq_masks_of_state s)" crunch irq_masks[IRQMasks_IF_assms, wp]: schedule "\s. P (irq_masks_of_state s)" - (wp: dmo_wp alternative_wp select_wp crunch_wps simp: crunch_simps) + (wp: dmo_wp crunch_wps simp: crunch_simps) end @@ -135,13 +135,13 @@ lemma invoke_tcb_irq_masks[IRQMasks_IF_assms]: apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ apply (rule_tac Q="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" and E="\_ s. P (irq_masks_of_state s)" in hoare_post_impErr) apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) apply fastforce apply blast - apply (simp add: option_update_thread_def | wp static_imp_wp hoare_vcg_all_lift | wpc)+ + apply (simp add: option_update_thread_def | wp hoare_weak_lift_imp hoare_vcg_all_lift | wpc)+ by fastforce+ lemma init_arch_objects_irq_masks: diff --git a/proof/infoflow/RISCV64/ArchIpc_IF.thy b/proof/infoflow/RISCV64/ArchIpc_IF.thy index 7898354887..9e2e734276 100644 --- a/proof/infoflow/RISCV64/ArchIpc_IF.thy +++ b/proof/infoflow/RISCV64/ArchIpc_IF.thy @@ -419,7 +419,7 @@ lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: apply (simp add: word_size_def) apply (erule is_aligned_no_overflow') apply simp - apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift static_imp_wp | simp)+ + apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift hoare_weak_lift_imp | simp)+ apply (fastforce dest: get_tcb_not_asid_pool_at)+ done diff --git a/proof/infoflow/RISCV64/ArchNoninterference.thy b/proof/infoflow/RISCV64/ArchNoninterference.thy index dcf4c00cfc..6ebdc9d167 100644 --- a/proof/infoflow/RISCV64/ArchNoninterference.thy +++ b/proof/infoflow/RISCV64/ArchNoninterference.thy @@ -19,9 +19,10 @@ lemma do_user_op_if_integrity[Noninterference_assms]: \\_. integrity aag X st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_respects_Write dmo_device_update_respects_Write - hoare_vcg_all_lift hoare_vcg_imp_lift) + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp) apply (rule hoare_pre_cont) - apply (wp select_wp | wpc | clarsimp)+ + apply (wp | wpc | clarsimp)+ apply (rule conjI) apply clarsimp apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) @@ -39,12 +40,12 @@ lemma do_user_op_if_globals_equiv_scheduler[Noninterference_assms]: \\_. globals_equiv_scheduler st\" apply (simp add: do_user_op_if_def) apply (wpsimp wp: dmo_user_memory_update_globals_equiv_scheduler - dmo_device_memory_update_globals_equiv_scheduler select_wp)+ + dmo_device_memory_update_globals_equiv_scheduler)+ apply (auto simp: ptable_lift_s_def ptable_rights_s_def) done crunch silc_dom_equiv[Noninterference_assms, wp]: do_user_op_if "silc_dom_equiv aag st" - (ignore: do_machine_op user_memory_update wp: crunch_wps select_wp) + (ignore: do_machine_op user_memory_update wp: crunch_wps) lemma sameFor_scheduler_affects_equiv[Noninterference_assms]: "\ (s,s') \ same_for aag PSched; (s,s') \ same_for aag (Partition l); @@ -93,7 +94,7 @@ lemma arch_globals_equiv_strengthener_thread_independent[Noninterference_assms]: lemma integrity_asids_update_reference_state[Noninterference_assms]: "is_subject aag t - \ integrity_asids aag {pasSubject aag} x a s (s\kheap := kheap s(t \ blah)\)" + \ integrity_asids aag {pasSubject aag} x a s (s\kheap := (kheap s)(t \ blah)\)" by (clarsimp simp: opt_map_def) lemma inte_obj_arch: @@ -352,7 +353,7 @@ lemma getActiveIRQ_ret_no_dmo[Noninterference_assms, wp]: apply (simp add: getActiveIRQ_def) apply (rule hoare_pre) apply (insert irq_oracle_max_irq) - apply (wp alternative_wp select_wp dmo_getActiveIRQ_irq_masks) + apply (wp dmo_getActiveIRQ_irq_masks) apply clarsimp done diff --git a/proof/infoflow/RISCV64/ArchPasUpdates.thy b/proof/infoflow/RISCV64/ArchPasUpdates.thy index a7392bebfd..f785571dc1 100644 --- a/proof/infoflow/RISCV64/ArchPasUpdates.thy +++ b/proof/infoflow/RISCV64/ArchPasUpdates.thy @@ -14,7 +14,7 @@ named_theorems PasUpdates_assms crunches arch_post_cap_deletion, arch_finalise_cap, prepare_thread_delete for domain_fields[PasUpdates_assms, wp]: "domain_fields P" - ( wp: syscall_valid select_wp crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp simp: crunch_simps check_cap_at_def filterM_mapM unless_def ignore: without_preemption filterM rec_del check_cap_at cap_revoke ignore_del: retype_region_ext create_cap_ext cap_insert_ext ethread_set cap_move_ext diff --git a/proof/infoflow/RISCV64/ArchScheduler_IF.thy b/proof/infoflow/RISCV64/ArchScheduler_IF.thy index f39e0482a4..28df9083df 100644 --- a/proof/infoflow/RISCV64/ArchScheduler_IF.thy +++ b/proof/infoflow/RISCV64/ArchScheduler_IF.thy @@ -130,12 +130,12 @@ lemma thread_set_context_globals_equiv[Scheduler_IF_assms]: lemma arch_scheduler_affects_equiv_update[Scheduler_IF_assms]: "arch_scheduler_affects_equiv st s - \ arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: arch_scheduler_affects_equiv_def) lemma equiv_asid_equiv_update[Scheduler_IF_assms]: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: equiv_asid_def obj_at_def get_tcb_def) end @@ -363,7 +363,7 @@ lemma thread_set_scheduler_affects_equiv[Scheduler_IF_assms, wp]: split: option.splits kernel_object.splits) apply (subst arch_tcb_update_aux) apply simp - apply (subgoal_tac "s = (s\kheap := kheap s(idle_thread s \ TCB y)\)", simp) + apply (subgoal_tac "s = (s\kheap := (kheap s)(idle_thread s \ TCB y)\)", simp) apply (rule state.equality) apply (rule ext) apply simp+ diff --git a/proof/infoflow/RISCV64/ArchTcb_IF.thy b/proof/infoflow/RISCV64/ArchTcb_IF.thy index 1e17e1d882..2602fcb8f4 100644 --- a/proof/infoflow/RISCV64/ArchTcb_IF.thy +++ b/proof/infoflow/RISCV64/ArchTcb_IF.thy @@ -123,7 +123,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid check_cap_inv2[where Q="\_. P"] cap_delete_P cap_insert_P thread_set_P thread_set_P' set_mcpriority_P set_mcpriority_idle_thread - dxo_wp_weak static_imp_wp) + dxo_wp_weak hoare_weak_lift_imp) | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def option_update_thread_def del: hoare_True_E_R | wpc)+) (*slow*) @@ -140,7 +140,7 @@ lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: lemma tc_reads_respects_f[Tcb_IF_assms]: assumes domains_distinct[wp]: "pas_domains_distinct aag" and tc[simp]: "ti = ThreadControl x41 x42 x43 x44 x45 x46 x47 x48" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs and simple_sched_action @@ -217,7 +217,7 @@ lemma tc_reads_respects_f[Tcb_IF_assms]: invs_psp_aligned invs_vspace_objs invs_arch_state | wp (once) hoare_drop_imp)+ apply (simp add: option_update_thread_def tcb_cap_cases_def - | wp static_imp_wp static_imp_conj_wp thread_set_pas_refined + | wp hoare_weak_lift_imp hoare_weak_lift_imp_conj thread_set_pas_refined reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] | wpc)+ apply (wp hoare_vcg_all_lift thread_set_tcb_fault_handler_update_invs diff --git a/proof/infoflow/RISCV64/ArchUserOp_IF.thy b/proof/infoflow/RISCV64/ArchUserOp_IF.thy index 33680e99cb..7e0168d38f 100644 --- a/proof/infoflow/RISCV64/ArchUserOp_IF.thy +++ b/proof/infoflow/RISCV64/ArchUserOp_IF.thy @@ -820,7 +820,7 @@ lemma do_user_op_reads_respects_g: apply (clarsimp simp: globals_equiv_def reads_equiv_g_def) apply (rule spec_equiv_valid_guard_imp) apply (wpsimp wp: dmo_user_memory_update_reads_respects_g dmo_device_state_update_reads_respects_g - dmo_device_state_update_reads_respects_g select_ev select_wp dmo_wp) + dmo_device_state_update_reads_respects_g select_ev dmo_wp) apply clarsimp apply (rule conjI) apply clarsimp diff --git a/proof/infoflow/Scheduler_IF.thy b/proof/infoflow/Scheduler_IF.thy index 0e55b21e0c..de3d3aeaa4 100644 --- a/proof/infoflow/Scheduler_IF.thy +++ b/proof/infoflow/Scheduler_IF.thy @@ -60,7 +60,7 @@ locale Scheduler_IF_1 = "arch_scheduler_affects_equiv s s' \ arch_scheduler_affects_equiv s' s" and arch_scheduler_affects_equiv_update: "arch_scheduler_affects_equiv st s - \ arch_scheduler_affects_equiv st (s\kheap := kheap s(x \ TCB y')\)" + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" and arch_scheduler_affects_equiv_sa_update[simp]: "\f. arch_scheduler_affects_equiv (scheduler_action_update f s) s' = arch_scheduler_affects_equiv s s'" @@ -106,7 +106,7 @@ locale Scheduler_IF_1 = "\P. arch_switch_to_idle_thread \\s. P (work_units_completed s)\" and equiv_asid_equiv_update: "\ get_tcb x s = Some y; equiv_asid asid st s \ - \ equiv_asid asid st (s\kheap := kheap s(x \ TCB y')\)" + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" and equiv_asid_cur_thread_update[simp]: "\f. equiv_asid asid (cur_thread_update f s) s' = equiv_asid asid s s'" "\f. equiv_asid asid s (cur_thread_update f s') = equiv_asid asid s s'" @@ -605,7 +605,7 @@ proof - apply (simp add: scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d e s w i x hoare_vcg_imp_lift) + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d e s w i x hoare_vcg_imp_lift) apply fastforce done qed @@ -671,7 +671,7 @@ proof - apply (simp add: asahi_scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d w) + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d w) apply clarsimp done qed @@ -731,7 +731,7 @@ proof - apply (simp add: asahi_ex_scheduler_affects_equiv_def[abs_def]) apply (rule hoare_pre) apply (wps c) - apply (wp static_imp_wp a silc_dom_equiv_states_equiv_lift d w x hoare_vcg_imp_lift') + apply (wp hoare_weak_lift_imp a silc_dom_equiv_states_equiv_lift d w x hoare_vcg_imp_lift') apply clarsimp done qed @@ -2221,7 +2221,7 @@ context Scheduler_IF_1 begin lemma scheduler_affects_equiv_update: "\ get_tcb x s = Some y; pasObjectAbs aag x \ reads_scheduler aag l; scheduler_affects_equiv aag l st s \ - \ scheduler_affects_equiv aag l st (s\kheap := kheap s(x \ TCB y')\)" + \ scheduler_affects_equiv aag l st (s\kheap := (kheap s)(x \ TCB y')\)" by (clarsimp simp: scheduler_affects_equiv_def equiv_for_def equiv_asids_def states_equiv_for_def scheduler_globals_frame_equiv_def arch_scheduler_affects_equiv_update equiv_asid_equiv_update) diff --git a/proof/infoflow/Syscall_IF.thy b/proof/infoflow/Syscall_IF.thy index b162edd9a5..eb4de64723 100644 --- a/proof/infoflow/Syscall_IF.thy +++ b/proof/infoflow/Syscall_IF.thy @@ -174,7 +174,7 @@ proof (induct rule: cap_revoke.induct[where ?a1.0=s]) cap_delete_pas_refined cap_delete_silc_inv[where st=st] cap_delete_only_timer_irq_inv[where st=st' and irq=irq] drop_spec_ev[OF assertE_ev] drop_spec_ev[OF liftE_ev] - get_cap_wp select_wp select_ev drop_spec_ev2_inv[OF liftE_ev2] + get_cap_wp select_ev drop_spec_ev2_inv[OF liftE_ev2] reads_respects_f[OF get_cap_rev, where st=st and aag=aag] | simp (no_asm) add: returnOk_def | rule next_revoke_eq' | (simp add: pred_conj_def, erule conjE, assumption) @@ -705,7 +705,6 @@ lemma handle_recv_reads_respects_f: simp: aag_cap_auth_def cap_auth_conferred_def cap_rights_to_auth_def)[1] apply (wp reads_respects_f[OF handle_fault_reads_respects,where st=st]) apply (wpsimp wp: get_simple_ko_wp get_cap_wp)+ - apply (rule VSpaceEntries_AI.hoare_vcg_all_liftE) apply (rule_tac Q="\r s. silc_inv aag st s \ einvs s \ pas_refined aag s \ tcb_at rv s \ pas_cur_domain aag s \ is_subject aag rv \ is_subject aag (cur_thread s) \ is_subject aag (fst (fst r))" diff --git a/proof/infoflow/Tcb_IF.thy b/proof/infoflow/Tcb_IF.thy index ad90ae59c8..7fb3dc8dba 100644 --- a/proof/infoflow/Tcb_IF.thy +++ b/proof/infoflow/Tcb_IF.thy @@ -90,7 +90,7 @@ next apply (simp add: conj_comms) apply (wp set_cap_P set_cap_Q replace_cap_invs final_cap_same_objrefs set_cap_cte_cap_wp_to - set_cap_cte_wp_at hoare_vcg_const_Ball_lift static_imp_wp + set_cap_cte_wp_at hoare_vcg_const_Ball_lift hoare_weak_lift_imp | rule finalise_cap_not_reply_master | simp add: in_monad)+ apply (rule hoare_strengthen_post) @@ -486,7 +486,7 @@ context Tcb_IF_2 begin lemma invoke_tcb_reads_respects_f: assumes domains_distinct[wp]: "pas_domains_distinct aag" - notes validE_valid[wp del] static_imp_wp [wp] + notes validE_valid[wp del] hoare_weak_lift_imp [wp] shows "reads_respects_f aag l (silc_inv aag st and only_timer_irq_inv irq st' and einvs diff --git a/proof/infoflow/refine/ADT_IF_Refine.thy b/proof/infoflow/refine/ADT_IF_Refine.thy index 5750ae76e7..cfeaaa4cb6 100644 --- a/proof/infoflow/refine/ADT_IF_Refine.thy +++ b/proof/infoflow/refine/ADT_IF_Refine.thy @@ -813,7 +813,6 @@ lemma abstract_invs: crunches checkActiveIRQ_if for ksDomainTime_inv[wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) lemma kernelEntry_if_valid_domain_time: "e \ Interrupt \ \\\ kernelEntry_if e tc \\_ s. 0 < ksDomainTime s \ valid_domain_list' s\" diff --git a/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy b/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy index 2d6be286d0..09a27e4a9c 100644 --- a/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy +++ b/proof/infoflow/refine/ARM/ArchADT_IF_Refine.thy @@ -24,7 +24,7 @@ lemma kernelEntry_invs'[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. invs'\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -36,7 +36,7 @@ lemma kernelEntry_arch_extras[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. arch_extras\" apply (simp add: kernelEntry_if_def) - apply (wp handleEvent_valid_duplicates' threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp handleEvent_valid_duplicates' threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -246,7 +246,7 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: apply (wp device_update_invs' dmo_setExMonitor_wp' dmo_invs' | simp)+ apply (clarsimp simp add: no_irq_modify user_memory_update_def) apply wpsimp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def split: option.splits) @@ -257,25 +257,25 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: lemma doUserOp_valid_duplicates[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \arch_extras\" apply (simp add: doUserOp_if_def split_def) - apply (wp dmo_setExMonitor_wp' dmo_invs' select_wp | simp)+ + apply (wp dmo_setExMonitor_wp' dmo_invs' | simp)+ done lemma doUserOp_if_schedact[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksSchedulerAction s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_st_tcb_at[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \st_tcb_at' st t\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_cur_thread[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksCurThread s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: @@ -334,7 +334,7 @@ lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: apply (rule corres_split[OF corres_machine_op', where r'="(=)"]) apply (rule corres_underlying_trivial, simp) apply (rule corres_return_same_trivial) - by (wp hoare_TrueI[where P = \] | simp)+ + by wpsimp+ lemma dmo_getActiveIRQ_corres[ADT_IF_Refine_assms]: "corres (=) \ \ (do_machine_op (getActiveIRQ in_kernel)) (doMachineOp (getActiveIRQ in_kernel'))" @@ -401,7 +401,6 @@ lemma handle_preemption_if_corres[ADT_IF_Refine_assms]: crunches doUserOp_if for ksDomainTime_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) crunches checkActiveIRQ_if for arch_extras[ADT_IF_Refine_assms, wp]: arch_extras @@ -422,7 +421,7 @@ lemma doUserOp_if_no_interrupt[ADT_IF_Refine_assms]: doUserOp_if uop tc \\r s. (fst r) \ Some Interrupt\" apply (simp add: doUserOp_if_def del: split_paired_All) - apply (wp select_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp simp: uop_sane_def simp del: split_paired_All) done diff --git a/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy b/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy index bd82a221c7..18518c2964 100644 --- a/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy +++ b/proof/infoflow/refine/ARM/ArchADT_IF_Refine_C.thy @@ -118,7 +118,7 @@ lemma corres_dmo_getExMonitor_C: apply (rule_tac r'="\(r, ms) (r', ms'). r = r' \ ms = rv \ ms' = rv'" in corres_split) apply (rule corres_trivial, rule corres_select_f') - apply (clarsimp simp: getExMonitor_def machine_rest_lift_def NonDetMonad.bind_def gets_def + apply (clarsimp simp: getExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def) apply (clarsimp simp: getExMonitor_no_fail[simplified no_fail_def]) apply (clarsimp simp: split_def) @@ -132,7 +132,7 @@ lemma corres_dmo_getExMonitor_C: cmachine_state_relation_def Let_def) apply (rule corres_trivial, clarsimp) apply (wp hoare_TrueI)+ - apply (rule TrueI conjI | clarsimp simp: getExMonitor_def machine_rest_lift_def NonDetMonad.bind_def + apply (rule TrueI conjI | clarsimp simp: getExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def)+ done @@ -150,7 +150,7 @@ lemma corres_dmo_setExMonitor_C: ms' = rv'\exclusive_state := es\" in corres_split) apply (rule corres_trivial, rule corres_select_f') - apply (clarsimp simp: setExMonitor_def machine_rest_lift_def NonDetMonad.bind_def gets_def + apply (clarsimp simp: setExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def) apply (clarsimp simp: setExMonitor_no_fail[simplified no_fail_def]) apply (simp add: split_def) @@ -162,7 +162,7 @@ lemma corres_dmo_setExMonitor_C: apply (clarsimp simp: rf_sr_def cstate_relation_def carch_state_relation_def cmachine_state_relation_def Let_def) apply (wp hoare_TrueI)+ - apply (rule TrueI conjI | clarsimp simp: setExMonitor_def machine_rest_lift_def NonDetMonad.bind_def + apply (rule TrueI conjI | clarsimp simp: setExMonitor_def machine_rest_lift_def Nondet_Monad.bind_def gets_def get_def return_def modify_def put_def select_f_def)+ done @@ -247,7 +247,7 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_split[OF device_update_corres_C]) apply (rule corres_split[OF corres_dmo_setExMonitor_C, where R="\\" and R'="\\"]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (clarsimp simp: ex_abs_def restrict_map_def invs_pspace_aligned' invs_pspace_distinct' ptable_lift_s'_def ptable_rights_s'_def split: if_splits) diff --git a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy index fd13f3ae1f..56b80b2ac3 100644 --- a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy +++ b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine.thy @@ -24,7 +24,7 @@ lemma kernelEntry_invs'[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. invs'\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -36,7 +36,7 @@ lemma kernelEntry_arch_extras[ADT_IF_Refine_assms, wp]: kernelEntry_if e tc \\_. arch_extras\" apply (simp add: kernelEntry_if_def) - apply (wp threadSet_invs_trivial threadSet_ct_running' static_imp_wp + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp | wp (once) hoare_drop_imps | clarsimp)+ done @@ -186,7 +186,7 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: apply (wp device_update_invs' dmo_invs' | simp)+ apply (clarsimp simp add: no_irq_modify user_memory_update_def) apply wpsimp - apply (wp select_wp)+ + apply wp+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def split: option.splits) @@ -197,25 +197,25 @@ lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: lemma doUserOp_valid_duplicates[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \arch_extras\" apply (simp add: doUserOp_if_def split_def) - apply (wp dmo_invs' select_wp | simp)+ + apply (wp dmo_invs' | simp)+ done lemma doUserOp_if_schedact[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksSchedulerAction s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_st_tcb_at[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \st_tcb_at' st t\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma doUserOp_if_cur_thread[ADT_IF_Refine_assms, wp]: "doUserOp_if f tc \\s. P (ksCurThread s)\" apply (simp add: doUserOp_if_def) - apply (wp select_wp | wpc | simp)+ + apply (wp | wpc | simp)+ done lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: @@ -348,7 +348,6 @@ lemma handle_preemption_if_corres[ADT_IF_Refine_assms]: crunches doUserOp_if for ksDomainTime_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomainTime s)" and ksDomSchedule_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomSchedule s)" - (wp: select_wp) crunches checkActiveIRQ_if for arch_extras[ADT_IF_Refine_assms, wp]: arch_extras @@ -369,7 +368,7 @@ lemma doUserOp_if_no_interrupt[ADT_IF_Refine_assms]: doUserOp_if uop tc \\r s. (fst r) \ Some Interrupt\" apply (simp add: doUserOp_if_def del: split_paired_All) - apply (wp select_wp | wpc)+ + apply (wp | wpc)+ apply (clarsimp simp: uop_sane_def simp del: split_paired_All) done diff --git a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy index 077a9129e1..7982e61afb 100644 --- a/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy +++ b/proof/infoflow/refine/RISCV64/ArchADT_IF_Refine_C.thy @@ -169,7 +169,7 @@ lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: apply (rule corres_underlying_split4) apply (rule corres_split[OF user_memory_update_corres_C]) apply (rule corres_split[OF device_update_corres_C]) - apply (wp select_wp | simp)+ + apply (wp | simp)+ apply (clarsimp simp: ex_abs_def restrict_map_def invs_pspace_aligned' invs_pspace_distinct' ptable_lift_s'_def ptable_rights_s'_def split: if_splits) diff --git a/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy b/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy index 45bd52f4e6..d87ce20c4c 100644 --- a/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchAcc_AI.thy @@ -817,10 +817,6 @@ lemma set_asid_pool_valid_objs [wp]: unfolding set_asid_pool_def by (wpsimp wp: set_object_valid_objs simp: valid_obj_def) -lemma invs_valid_global_arch_objs: - "invs s \ valid_global_arch_objs s" - by (clarsimp simp: invs_def valid_state_def valid_arch_state_def) - lemma is_aligned_pt: "\ pt_at pt_t pt s; pspace_aligned s \ \ is_aligned pt (pt_bits pt_t)" apply (clarsimp simp: obj_at_def) @@ -1143,7 +1139,7 @@ lemma set_object_caps_of_state: done lemma set_pt_aobjs_of: - "\\s. aobjs_of s p \ None \ P (aobjs_of s(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" + "\\s. aobjs_of s p \ None \ P ((aobjs_of s)(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" unfolding set_pt_def supply fun_upd_apply[simp del] by (wpsimp wp: set_object_wp) @@ -1180,13 +1176,9 @@ lemma set_pt_global_objs [wp]: crunch v_ker_map[wp]: set_pt "valid_kernel_mappings" (ignore: set_object wp: set_object_v_ker_map crunch_wps) - -lemma set_pt_asid_map [wp]: - "\valid_asid_map\ set_pt p pt \\_. valid_asid_map\" - apply (simp add: valid_asid_map_def vspace_at_asid_def) - apply (rule hoare_lift_Pf2 [where f="arch_state"]) - apply wp+ - done +lemma set_pt_asid_map[wp]: + "set_pt p pt \valid_asid_map\" + by (wp valid_asid_map_lift_strong) crunches store_pte for pred_tcb[wp]: "\s. Q (pred_tcb_at proj P t s)" @@ -1275,7 +1267,7 @@ lemma pt_walk_upd_idem: \ pt_walk top_level level' pt_ptr vptr (ptes_of s) = Some (level', pt_ptr') \ pt_ptr' \ obj_ref; is_aligned pt_ptr (pt_bits top_level); top_level \ max_pt_level \ - \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := kheap s(obj_ref \ ko)\)) + \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := (kheap s)(obj_ref \ ko)\)) = pt_walk top_level level pt_ptr vptr (ptes_of s)" by (rule pt_walk_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1342,7 +1334,7 @@ lemma vs_lookup_table_upd_idem: \ vs_lookup_table level' asid vref s = Some (level', p') \ p' \ obj_ref; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1351,7 +1343,7 @@ lemma vs_lookup_table_Some_upd_idem: "\ vs_lookup_table level asid vref s = Some (level, obj_ref); vref \ user_region; pspace_aligned s; pspace_distinct s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (subst vs_lookup_table_upd_idem; simp?) (fastforce dest: no_loop_vs_lookup_table) @@ -1360,7 +1352,7 @@ lemma ex_vs_lookup_upd_idem: "\ \\ (level, p) s; pspace_aligned s; pspace_distinct s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ \\ (level, p) (s\kheap := kheap s(p \ ko)\) = \\ (level, p) s" + \ \\ (level, p) (s\kheap := (kheap s)(p \ ko)\) = \\ (level, p) s" apply (rule iffI; clarsimp) apply (rule_tac x=asid in exI) apply (rule_tac x=vref in exI) @@ -1438,7 +1430,7 @@ lemma pt_lookup_target_pt_upd_eq: by (rule pt_lookup_target_pt_eqI; clarsimp) lemma kheap_pt_upd_simp[simp]: - "(kheap s(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) + "((kheap s)(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) = (kheap s |> aobj_of |> pt_of)(p \ pt)" unfolding aobj_of_def opt_map_def by (auto split: kernel_object.split) @@ -1518,7 +1510,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1526,7 +1518,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1551,7 +1543,7 @@ lemma load_word_offs_in_user_frame[wp]: lemma valid_machine_state_heap_updI: "\ valid_machine_state s; typ_at type p s; a_type obj = type \ - \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" by (fastforce simp: valid_machine_state_def intro: in_user_frame_same_type_upd elim: valid_machine_stateE) @@ -1672,7 +1664,7 @@ lemma set_asid_pool_valid_global [wp]: lemma vs_lookup_table_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" apply (subst vs_lookup_table_upd_idem; fastforce) done @@ -1680,14 +1672,14 @@ lemma vs_lookup_table_unreachable_upd_idem: lemma vs_lookup_table_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_unreachable_upd_idem; fastforce) lemma vs_lookup_target_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" supply fun_upd_apply[simp del] apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def obind_assoc) @@ -1722,12 +1714,12 @@ lemma vs_lookup_target_unreachable_upd_idem: lemma vs_lookup_target_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" by (rule vs_lookup_target_unreachable_upd_idem; fastforce) lemma vs_lookup_table_fun_upd_deep_idem: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ko)\) = Some (level, p'); + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ko)\) = Some (level, p'); vs_lookup_table level' asid vref s = Some (level', p); level' \ level; vref \ user_region; valid_vspace_objs s; valid_asid_table s; pspace_aligned s; pspace_distinct s \ @@ -1816,8 +1808,8 @@ lemma vs_lookup_target_pt_levelI: lemma vs_lookup_target_asid_pool_level_upd_helper: "\ graph_of ap \ graph_of ap'; kheap s p = Some (ArchObj (ASIDPool ap')); vref \ user_region; - vspace_for_pool pool_ptr asid (asid_pools_of s(p \ ap)) = Some pt_ptr; - pool_for_asid asid (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ + vspace_for_pool pool_ptr asid ((asid_pools_of s)(p \ ap)) = Some pt_ptr; + pool_for_asid asid (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ \ vs_lookup_target asid_pool_level asid vref s = Some (asid_pool_level, pt_ptr)" apply (clarsimp simp: pool_for_asid_vs_lookup vspace_for_pool_def entry_for_pool_def in_omonad) apply (clarsimp split: if_splits) @@ -1828,7 +1820,7 @@ lemma vs_lookup_target_asid_pool_level_upd_helper: done lemma vs_lookup_target_None_upd_helper: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some (level, table_ptr); ((\pa. level_pte_of (level_type level) pa ((pts_of s)(p := None))) |> pte_ref) (pt_slot_offset level table_ptr vref) @@ -1920,7 +1912,7 @@ lemma set_asid_pool_equal_mappings[wp]: lemma translate_address_asid_pool_upd: "pts_of s p = None \ translate_address pt_ptr vref - (\pt_t pa. level_pte_of pt_t pa (kheap s(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) + (\pt_t pa. level_pte_of pt_t pa ((kheap s)(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) = translate_address pt_ptr vref (ptes_of s)" by simp @@ -1988,6 +1980,16 @@ lemma set_asid_pool_valid_asid_pool_caps[wp]: unfolding valid_asid_pool_caps_def by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift') +lemma set_asid_pool_None_valid_asid_map[wp]: + "\ valid_asid_map and (\s. asid_pools_of s p = Some ap) \ + set_asid_pool p (ap (asid_low := None)) + \\_. valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq pool_for_asid_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift get_object_wp) + apply (fastforce simp: entry_for_pool_def obind_None_eq in_omonad split: if_split_asm) + done + lemma set_asid_pool_invs_unmap: "\invs and (\s. asid_pools_of s p = Some ap) and @@ -1996,7 +1998,7 @@ lemma set_asid_pool_invs_unmap: set_asid_pool p (ap (asid_low := None)) \\_. invs\" apply (simp add: invs_def valid_state_def valid_pspace_def - valid_arch_caps_def valid_asid_map_def) + valid_arch_caps_def) apply (wp valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_vspace_objs_unmap valid_irq_handlers_lift @@ -2251,7 +2253,7 @@ lemma pt_walk_below_pt_upd_idem: pt_walk (level' - 1) level (pptr_from_pte (pt_apply (pt_upd pt (table_index (level_type level') p) pte) (pt_index level' vref))) vref - (\pt_t pa. level_pte_of pt_t pa (pts_of s(table_base (level_type level') p \ + (\pt_t pa. level_pte_of pt_t pa ((pts_of s)(table_base (level_type level') p \ pt_upd pt (table_index (level_type level') p) pte))) = pt_walk (level' - 1) level (pptr_from_pte (pt_apply (pt_upd pt (table_index (level_type level') p) pte) @@ -2820,7 +2822,7 @@ crunches do_machine_op and pspace_in_kernel_window[wp]: pspace_in_kernel_window and cap_refs_in_kernel_window[wp]: cap_refs_in_kernel_window and vspace_at_asid[wp]: "\s. P (vspace_at_asid a pt s)" - and valid_vs_lookup[wp]: "valid_vs_lookup" + and valid_vs_lookup[wp]: "\s. P (valid_vs_lookup s)" and valid_obj[wp]: "valid_obj t obj" (simp: valid_kernel_mappings_def wp: valid_obj_typ) diff --git a/proof/invariant-abstract/AARCH64/ArchArch_AI.thy b/proof/invariant-abstract/AARCH64/ArchArch_AI.thy index 23d6aa1694..0d9d21e69f 100644 --- a/proof/invariant-abstract/AARCH64/ArchArch_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchArch_AI.thy @@ -78,56 +78,10 @@ lemma check_vp_inv: "\P\ check_vp_alignment sz w \\ - (- dom (\a::asid_low_index. p (ucast a :: machine_word)) \ {x. ucast x + (y::AARCH64_A.asid) \ 0} = {}) = - (- dom p \ {x. x \ 2 ^ asid_low_bits - 1 \ x + ucast y \ 0} = {})" - apply safe - apply clarsimp - apply (rule ccontr) - apply (erule_tac x="ucast x" in in_emptyE) - apply (clarsimp simp: p2_low_bits_max) - apply (rule conjI) - apply (clarsimp simp: ucast_ucast_mask) - apply (subst (asm) less_mask_eq) - apply (rule word_less_sub_le [THEN iffD1]) - apply (simp add: word_bits_def) - apply (simp add: asid_low_bits_def) - apply simp - apply (clarsimp simp: mask_2pm1[symmetric] ucast_ucast_mask2 is_down is_aligned_mask) - apply (frule and_mask_eq_iff_le_mask[THEN iffD2]) - apply (simp add: asid_low_bits_def) - apply (erule notE) - apply (subst word_plus_and_or_coroll) - apply word_eqI_solve - apply (subst (asm) word_plus_and_or_coroll; word_bitwise, clarsimp simp: word_size) - apply (clarsimp simp: p2_low_bits_max) - apply (rule ccontr) - apply simp - apply (erule_tac x="ucast x" in in_emptyE) - apply clarsimp - apply (rule conjI, blast) - apply (rule conjI) - apply (rule word_less_sub_1) - apply (rule order_less_le_trans) - apply (rule ucast_less, simp) - apply (simp add: asid_low_bits_def) - apply clarsimp - apply (erule notE) - apply (simp add: is_aligned_mask asid_low_bits_def) - apply (subst word_plus_and_or_coroll) - apply word_eqI_solve - apply (subst (asm) word_plus_and_or_coroll) - apply (word_bitwise, clarsimp simp: word_size) - apply (word_bitwise) - done - - lemma asid_high_bits_max_word: "(2 ^ asid_high_bits - 1) = (max_word :: asid_high_index)" by (simp add: asid_high_bits_def) @@ -438,7 +392,8 @@ lemma valid_arch_caps: lemma valid_asid_map': "valid_asid_map s \ valid_asid_map s'" - by (clarsimp simp: valid_asid_map_def) + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def s'_def + entry_for_pool_def ko) lemma vspace_for_asid[simp]: "vspace_for_asid asid s' = vspace_for_asid asid s" @@ -462,7 +417,7 @@ context Arch begin global_naming AARCH64 lemma vmid_for_asid_empty_update: "\ asid_table s asid_high = None; asid_pools_of s ap = Some Map.empty \ \ - vmid_for_asid_2 asid (asid_table s(asid_high \ ap)) (asid_pools_of s) = vmid_for_asid s asid" + vmid_for_asid_2 asid ((asid_table s)(asid_high \ ap)) (asid_pools_of s) = vmid_for_asid s asid" by (clarsimp simp: vmid_for_asid_2_def obind_def entry_for_pool_def opt_map_def split: option.splits) @@ -496,7 +451,7 @@ lemma valid_asid_pool_caps_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ obj_refs cap = {ap} \ vs_cap_ref cap = Some (ucast asid << asid_low_bits, 0)) \ - valid_asid_pool_caps_2 (caps_of_state s) (asid_table s(asid \ ap))" + valid_asid_pool_caps_2 (caps_of_state s) ((asid_table s)(asid \ ap))" apply clarsimp apply (prop_tac "asid_update ap asid s", (unfold_locales; assumption)) apply (fastforce dest: asid_update.valid_asid_pool_caps') @@ -589,7 +544,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid) \ is_aligned asid asid_low_bits) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg valid_asid_pool_caps_upd_strg) @@ -602,7 +557,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -624,7 +579,8 @@ lemma valid_asid_map_asid_upd_strg: asid_pools_of s ap = Some Map.empty \ asid_table s asid = None \ valid_asid_map (asid_table_update asid ap s)" - by (simp add: valid_asid_map_def) + by (simp add: valid_asid_map_def entry_for_asid_def obind_None_eq entry_for_pool_def + pool_for_asid_def) lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ @@ -934,7 +890,8 @@ lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] lemma obj_at_upd2: - "obj_at P t' (s\kheap := kheap s(t \ v, x \ v')\) = (if t' = x then P v' else obj_at P t' (s\kheap := kheap s(t \ v)\))" + "obj_at P t' (s\kheap := (kheap s)(t \ v, x \ v')\) = + (if t' = x then P v' else obj_at P t' (s\kheap := (kheap s)(t \ v)\))" by (simp add: obj_at_update obj_at_def) lemma vcpu_invalidate_active_hyp_refs_empty[wp]: @@ -1008,7 +965,7 @@ lemma ex_nonz_cap_to_vcpu_udpate[simp]: by (simp add: ex_nonz_cap_to_def) lemma caps_of_state_VCPU_update: - "vcpu_at a s \ caps_of_state (s\kheap := kheap s(a \ ArchObj (VCPU b))\) = caps_of_state s" + "vcpu_at a s \ caps_of_state (s\kheap := (kheap s)(a \ ArchObj (VCPU b))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def) lemma set_vcpu_ex_nonz_cap_to[wp]: @@ -1018,7 +975,7 @@ lemma set_vcpu_ex_nonz_cap_to[wp]: done lemma caps_of_state_tcb_arch_update: - "ko_at (TCB y) t' s \ caps_of_state (s\kheap := kheap s(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" + "ko_at (TCB y) t' s \ caps_of_state (s\kheap := (kheap s)(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def tcb_cap_cases_def) lemma arch_thread_set_ex_nonz_cap_to[wp]: @@ -1336,7 +1293,7 @@ crunch_ignore (add: select_ext find_vspace_for_asid) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp hoare_vcg_all_lift + (wp: crunch_wps select_ext_weak_wp hoare_vcg_all_lift hoare_vcg_all_lift_R hoare_drop_imps simp: crunch_simps) @@ -1390,6 +1347,16 @@ lemma vs_lookup_slot_pte_at: apply (rule is_aligned_add; simp add: is_aligned_shift) done +(* used in Refine *) +lemma pt_lookup_slot_pte_at: + "\ vspace_for_asid asid s = Some pt; pt_lookup_slot pt vref (ptes_of s) = Some (level, slot); + vref \ user_region; invs s\ + \ pte_at (level_type level) slot s" + apply (drule (1) pt_lookup_slot_vs_lookup_slotI) + apply clarsimp + apply (erule (3) vs_lookup_slot_pte_at) + done + lemma vmpage_size_of_level_pt_bits_left: "\ pt_bits_left level = pageBitsForSize vmpage_size; level \ max_pt_level \ \ vmsize_of_level level = vmpage_size" @@ -1432,6 +1399,11 @@ lemma pageBitsForSize_level_0_eq: by (simp add: pageBitsForSize_def pt_bits_left_def ptTranslationBits_def split: vmpage_size.splits if_split_asm) +(* FIXME AARCH64: replace user_vtop_canonical_user *) +lemma user_vtop_leq_canonical_user: + "vref \ user_vtop \ vref \ canonical_user" + using user_vtop_leq_canonical_user by simp + lemma decode_fr_inv_map_wf[wp]: assumes "arch_cap = FrameCap p rights vmpage_size dev option" shows @@ -1449,11 +1421,12 @@ proof - apply (clarsimp simp: if_distribR if_bool_simps disj_imp cong: if_cong split del: if_split) apply (rule pull_out) apply clarsimp - apply (prop_tac "\ user_vtop \ args ! 0 + mask (pageBitsForSize vmpage_size) \ args!0 \ user_region") + apply (prop_tac "\ user_vtop < args ! 0 + mask (pageBitsForSize vmpage_size) \ args!0 \ user_region") apply (clarsimp simp: user_region_def not_le) - apply (rule user_vtop_canonical_user) - apply (erule aligned_add_mask_lessD) - apply (simp add: vmsz_aligned_def) + apply (rule user_vtop_leq_canonical_user) + apply (simp add: vmsz_aligned_def not_less) + apply (drule is_aligned_no_overflow_mask) + apply simp apply (rename_tac pte_ptr level) apply (clarsimp simp: valid_arch_inv_def valid_page_inv_def neq_Nil_conv) apply (rename_tac cptr cidx excaps') @@ -1641,7 +1614,7 @@ lemma decode_asid_control_invocation_wf[wp]: apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) - apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp)+ + apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp)+ apply (rule conjI, fastforce) apply (cases excaps, simp) apply (case_tac list, simp) diff --git a/proof/invariant-abstract/AARCH64/ArchBits_AI.thy b/proof/invariant-abstract/AARCH64/ArchBits_AI.thy index 86f9d6b194..f5afcbc3fb 100644 --- a/proof/invariant-abstract/AARCH64/ArchBits_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchBits_AI.thy @@ -50,6 +50,18 @@ lemma invs_valid_vs_lookup[elim!]: "invs s \ valid_vs_lookup s " by (clarsimp simp: invs_def valid_state_def valid_arch_caps_def) +lemma invs_vmid_inv[elim!]: + "invs s \ vmid_inv s" + by (auto simp: invs_def valid_state_def valid_arch_state_def) + +lemma invs_valid_vmid_table[elim!]: + "invs s \ valid_vmid_table s" + by (auto simp: invs_def valid_state_def valid_arch_state_def) + +lemma invs_valid_global_arch_objs[elim!]: + "invs s \ valid_global_arch_objs s" + by (clarsimp simp: invs_def valid_state_def valid_arch_state_def) + lemma pbfs_atleast_pageBits: "pageBits \ pageBitsForSize sz" by (cases sz) (auto simp: pageBits_def) diff --git a/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy index 1829419051..0cbede9b26 100644 --- a/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCNodeInv_AI.thy @@ -410,10 +410,8 @@ lemma cap_swap_asid_map[wp, CNodeInv_AI_assms]: cte_wp_at (weak_derived c) a and cte_wp_at (weak_derived c') b\ cap_swap c a c' b \\rv. valid_asid_map\" - apply (simp add: cap_swap_def set_cdt_def valid_asid_map_def vspace_at_asid_def) - apply (rule hoare_pre) - apply (wp set_cap.vs_lookup|simp - |rule hoare_lift_Pf [where f=arch_state])+ + apply (simp add: cap_swap_def set_cdt_def vspace_at_asid_def) + apply (wp set_cap.vs_lookup|simp|rule hoare_lift_Pf [where f=arch_state])+ done @@ -542,7 +540,7 @@ context Arch begin global_naming AARCH64 lemma post_cap_delete_pre_is_final_cap': "\valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -619,7 +617,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy index 5f85806480..38a0a88e20 100644 --- a/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCSpacePre_AI.thy @@ -154,7 +154,7 @@ lemma arch_derived_is_device: lemma valid_arch_mdb_simple: "\ valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ - valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) (caps_of_state s(dest \ cap))" + valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -179,34 +179,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (clarsimp simp: valid_arch_mdb_def) lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" by (clarsimp simp: valid_arch_mdb_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -239,10 +239,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def)+ end diff --git a/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy b/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy index 8e74745c27..99901c650d 100644 --- a/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchCSpace_AI.thy @@ -175,7 +175,7 @@ lemma is_derived_is_cap: lemma vs_lookup_pages_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_pages (s\kheap := kheap s(p \ ko')\) = vs_lookup_pages s" + \ vs_lookup_pages (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_pages s" unfolding vs_lookup_target_def vs_lookup_slot_def apply (frule aobjs_of_non_aobj_upd[where ko'=ko'], simp+) apply (rule ext)+ @@ -190,7 +190,7 @@ lemma vs_lookup_pages_non_aobj_upd: lemma vs_lookup_target_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(p \ ko')\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_target level asid vref s" by (drule vs_lookup_pages_non_aobj_upd[where ko'=ko'], auto dest: fun_cong) diff --git a/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy index e2eff8d130..d500ad97dc 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetSchedAux_AI.thy @@ -92,7 +92,7 @@ crunches perform_asid_control_invocation and schedact[wp]: "\s. P (scheduler_action s)" and ready_queues[wp]: "\s. P (ready_queues s)" and cur_domain[wp]: "\s. P (cur_domain s)" - (wp: static_imp_wp) + (wp: hoare_weak_lift_imp) lemma perform_asid_control_invocation_valid_sched: "\ct_active and invs and valid_aci aci and valid_sched and valid_idle\ diff --git a/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy index 59c232c9b7..e45b48e3ba 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetSchedSchedule_AI.thy @@ -345,7 +345,7 @@ crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) crunch valid_sched [wp, DetSchedSchedule_AI_assms]: diff --git a/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy b/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy index 570eeb0858..899b51d9d1 100644 --- a/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchDetype_AI.thy @@ -496,8 +496,15 @@ proof - thus ?thesis by (simp add: valid_kernel_mappings_def detype_def ball_ran_eq) qed -lemma valid_asid_map_detype[detype_invs_proofs]: "valid_asid_map (detype (untyped_range cap) s)" - by (simp add: valid_asid_map_def) +lemma valid_asid_map_detype[detype_invs_proofs]: + "valid_asid_map (detype (untyped_range cap) s)" +proof - + have "valid_asid_map s" + using invs by (simp add: invs_def valid_state_def) + thus ?thesis + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def + entry_for_pool_def) +qed lemma equal_kernel_mappings_detype[detype_invs_proofs]: "equal_kernel_mappings (detype (untyped_range cap) s)" diff --git a/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy b/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy index 636df09577..5bd5f26a34 100644 --- a/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchFinalise_AI.thy @@ -469,7 +469,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -664,7 +664,7 @@ lemma arch_thread_set_valid_objs_vcpu_Some[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -705,7 +705,7 @@ lemma vcpu_invalidate_tcbs_inv[wp]: lemma sym_refs_vcpu_None: assumes sym_refs: "sym_refs (state_hyp_refs_of s)" assumes tcb: "ko_at (TCB tcb) t s" "tcb_vcpu (tcb_arch tcb) = Some vr" - shows "sym_refs (state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), + shows "sym_refs (state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), vr \ ArchObj (VCPU (vcpu_tcb_update Map.empty v)))\))" (is "sym_refs (state_hyp_refs_of ?s')") proof - @@ -739,7 +739,7 @@ proof - qed lemma arch_thread_set_wp: - "\\s. get_tcb p s \ None \ Q (s\kheap := kheap s(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ + "\\s. get_tcb p s \ None \ Q (s\kheap := (kheap s)(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ arch_thread_set f p \\_. Q\" apply (simp add: arch_thread_set_def) @@ -1598,7 +1598,7 @@ crunches (wp: crunch_wps subset_refl) crunch irq_node[Finalise_AI_asms,wp]: prepare_thread_delete "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" (simp: crunch_simps wp: crunch_wps) @@ -1830,7 +1830,7 @@ lemma (* replace_cap_invs_arch_update *)[Finalise_AI_asms]: lemma dmo_pred_tcb_at[wp]: "do_machine_op mop \\s. P (pred_tcb_at f Q t s)\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply (clarsimp simp: pred_tcb_at_def obj_at_def) done @@ -1908,7 +1908,7 @@ lemma set_asid_pool_obj_at_ptr: locale_abbrev "asid_table_update asid ap s \ - s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\" lemma valid_table_caps_table [simp]: "valid_table_caps (s\arch_state := arch_state s\arm_asid_table := table'\\) = valid_table_caps s" diff --git a/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy b/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy index b3bd2a1f89..9180267283 100644 --- a/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchInvariants_AI.thy @@ -587,12 +587,15 @@ definition pspace_in_kernel_window :: "'z::state_ext state \ bool" w definition vspace_at_asid :: "asid \ obj_ref \ 'z::state_ext state \ bool" where "vspace_at_asid asid pt \ \s. vspace_for_asid asid s = Some pt" +definition kernel_window_range :: "obj_ref set" where + "kernel_window_range \ {pptr_base ..< pptrTop}" + definition valid_uses_2 :: "arm_vspace_region_uses \ bool" where "valid_uses_2 uses \ \p. (\canonical_address p \ uses p = ArmVSpaceInvalidRegion) - \ (p \ {pptr_base ..< pptrTop} + \ (p \ kernel_window_range \ uses p \ {ArmVSpaceKernelWindow, ArmVSpaceInvalidRegion}) - \ (uses p = ArmVSpaceKernelWindow \ p \ {pptr_base ..< pptrTop}) + \ (uses p = ArmVSpaceKernelWindow \ p \ kernel_window_range) \ \The kernel device window doesn't occupy the entire region above kdev_base\ \ (kdev_base \ p \ uses p \ {ArmVSpaceDeviceWindow, ArmVSpaceInvalidRegion}) \ \No user window in hyp kernel address space\ @@ -621,9 +624,21 @@ lemmas vmid_for_asid_def = vmid_for_asid_2_def abbreviation (input) asid_map :: "'z::state_ext state \ asid \ vmid" where "asid_map \ vmid_for_asid" +locale_abbrev + "vmid_table s \ arm_vmid_table (arch_state s)" + (* vmIDs stored in ASID pools form the inverse of the vmid_table *) definition vmid_inv :: "'z::state_ext state \ bool" where - "vmid_inv s \ is_inv (arm_vmid_table (arch_state s)) (vmid_for_asid s)" + "vmid_inv s \ is_inv (vmid_table s) (vmid_for_asid s)" + +(* The vmID table never stores ASID 0 *) +definition valid_vmid_table_2 :: "(vmid \ asid) \ bool" where + "valid_vmid_table_2 table \ \vmid. table vmid \ Some 0" + +locale_abbrev valid_vmid_table :: "'z::state_ext state \ bool" where + "valid_vmid_table s \ valid_vmid_table_2 (vmid_table s)" + +lemmas valid_vmid_table_def = valid_vmid_table_2_def definition valid_global_arch_objs where "valid_global_arch_objs \ \s. vspace_pt_at (global_pt s) s" @@ -641,8 +656,8 @@ locale_abbrev valid_global_tables :: "'z::state_ext state \ bool" wh lemmas valid_global_tables_def = valid_global_tables_2_def definition valid_arch_state :: "'z::state_ext state \ bool" where - "valid_arch_state \ valid_asid_table and valid_uses and vmid_inv and cur_vcpu and - valid_global_arch_objs and valid_global_tables" + "valid_arch_state \ valid_asid_table and valid_uses and vmid_inv and valid_vmid_table and + cur_vcpu and valid_global_arch_objs and valid_global_tables" (* ---------------------------------------------------------------------------------------------- *) @@ -698,10 +713,10 @@ definition state_hyp_refs_of :: "'z::state_ext state \ obj_ref \ \s p. case_option {} (hyp_refs_of) (kheap s p)" -(* covered by ASIDPool case of valid_vspace_obj, inv_vmid, and definition of - vspace_for_asid (asid 0 never mapped) *) +(* Mostly covered by ASIDPool case of valid_vspace_obj and vmid_inv, but we still need to make sure + that ASID 0 is never mapped. *) definition valid_asid_map :: "'z::state_ext state \ bool" where - "valid_asid_map \ \" + "valid_asid_map \ \s. entry_for_asid 0 s = None" definition valid_global_objs :: "'z::state_ext state \ bool" where "valid_global_objs \ \" @@ -791,6 +806,14 @@ lemma vcpuBits_bounded[simp,intro!]: "vcpuBits < word_bits" including machine_bit_simps by (simp add: word_bits_def) +lemma ptTranslationBits_le_machine_word[simplified, simp]: + "ptTranslationBits pt_t < LENGTH(machine_word_len)" + by (simp add: bit_simps) + +lemma pte_bits_leq_table_size[simp]: + "pte_bits \ table_size pt_t" + by (simp add: table_size_def) + (* with asid_pool_level normalised to -1, max_pt_level otherwise becomes -2 *) lemma max_pt_level_def2: "max_pt_level = (if config_ARM_PA_SIZE_BITS_40 then 2 else 3)" by (simp add: max_pt_level_def asid_pool_level_def Kernel_Config.config_ARM_PA_SIZE_BITS_40_def) @@ -1498,6 +1521,11 @@ lemma pptrTop_le_ipa_size: "pptrTop \ mask ipa_size" by (simp add: bit_simps pptrTop_def mask_def) +lemma below_pptrTop_ipa_size: + "p < pptrTop \ p \ mask ipa_size" + using pptrTop_le_ipa_size + by simp + lemma addrFromPPtr_mask_ipa: "\ pptr_base \ pt_ptr; pt_ptr < pptrTop \ \ addrFromPPtr pt_ptr && mask ipa_size = addrFromPPtr pt_ptr" @@ -1516,7 +1544,24 @@ lemmas window_defs = lemma valid_uses_kernel_window: "\ valid_uses s; p \ kernel_window s \ \ p \ {pptr_base ..< pptrTop} \ canonical_address p" unfolding valid_uses_def window_defs - by (erule_tac x=p in allE) auto + by (erule_tac x=p in allE) (auto simp: kernel_window_range_def) + +lemma kernel_window_bounded: + "\ p \ kernel_window s; valid_uses s \ \ p \ kernel_window_range" + by (fastforce dest: valid_uses_kernel_window simp: kernel_window_range_def) + +lemma pspace_in_kw_bounded: + "\ kheap s p = Some ko; pspace_in_kernel_window s; valid_uses s; pspace_aligned s \ \ + p \ kernel_window_range" + unfolding pspace_aligned_def + apply (drule bspec, fastforce) + apply (simp add: pspace_in_kernel_window_def) + apply (erule allE, erule allE, erule (1) impE) + apply (prop_tac "p \ kernel_window s") + apply (erule set_mp) + apply (clarsimp simp: is_aligned_no_overflow) + apply (fastforce dest: kernel_window_bounded) + done lemma pt_walk_max_level: "pt_walk top_level bot_level pt_ptr vptr ptes = Some (level, p) @@ -2261,6 +2306,15 @@ lemma valid_vspace_objsD: valid_vspace_obj level ao s" by (simp add: valid_vspace_objs_def) +lemma vspace_for_asid_not_normal_pt: + "\vspace_for_asid asid s = Some pt; normal_pt_at pt s; valid_vspace_objs s\ \ False" + apply (drule vspace_for_asid_vs_lookup) + apply (clarsimp simp: pt_at_eq) + apply (drule (1) valid_vspace_objsD, simp) + apply (fastforce simp: in_omonad) + apply clarsimp + done + (* A static bound on the size of pt_bits. For PA_40 configurations this is 40 (size of the PA/IPA address space). For PA_44 configurations, this is 48, because the page tables can theoretically translate 48 bits, even though the PA/IPA space is only 44 bits wide *) @@ -2344,6 +2398,29 @@ lemma is_aligned_addrFromPPtr[intro!]: "is_aligned p pageBits \ is_aligned (addrFromPPtr p) pageBits" by (simp add: is_aligned_addrFromPPtr_n pageBits_def pptrBaseOffset_alignment_def) +lemma pptrTop_ucast_ppn: + "\ p < pptrTop; is_aligned p pageBits \ \ + ucast (ucast (p >> pageBits)::ppn) = p >> pageBits" + apply (drule below_pptrTop_ipa_size) + apply word_eqI + using ppn_len_def'[unfolded ppn_len_def] + by (fastforce dest: bit_imp_le_length) + +lemma kernel_window_range_addrFromPPtr: + "p \ kernel_window_range \ addrFromPPtr p < pptrTop" + apply (simp add: kernel_window_range_def addrFromPPtr_def pptrBaseOffset_def + paddrBase_def pptr_base_def) + apply unat_arith + done + +lemma kernel_window_addrFromPPtr: + "\ p \ kernel_window_range; is_aligned p pageBits \ \ + ucast (ucast (addrFromPPtr p >> pageBits)::ppn) = addrFromPPtr p >> pageBits" + apply (rule pptrTop_ucast_ppn) + apply (erule kernel_window_range_addrFromPPtr) + apply (erule is_aligned_addrFromPPtr) + done + lemma is_aligned_ptrFromPAddr_n: "\is_aligned x sz; sz \ pptrBaseOffset_alignment\ \ is_aligned (ptrFromPAddr x) sz" @@ -2587,7 +2664,7 @@ lemma vspace_for_asid_lift: apply (simp add: obind_def pool_for_asid_def o_def split del: if_split) apply (rule hoare_lift_Pf[where f=asid_table]) apply (rule hoare_lift_Pf[where f=asid_pools_of]) - apply (wpsimp wp: assms entry_for_asid_lift split: option.splits)+ + apply (wpsimp wp: assms entry_for_asid_lift split: option.splits split_del: if_split)+ done lemma valid_global_arch_objs_lift: @@ -2819,7 +2896,7 @@ lemma vs_lookup_table_eq_lift: lemma aobjs_of_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ kheap s(p \ ko') |> aobj_of = aobjs_of s" + \ (kheap s)(p \ ko') |> aobj_of = aobjs_of s" by (rule ext) (auto simp: opt_map_def is_ArchObj_def aobj_of_def split: kernel_object.splits if_split_asm) @@ -2902,6 +2979,12 @@ lemma hyp_refs_of_rev: vcpu_tcb_refs_def refs_of_ao_def split: kernel_object.splits arch_kernel_obj.splits option.split) +lemma valid_asid_map_lift_strong: + assumes "\P. f \\s. P (asid_table s)\" + assumes "\P. f \\s. P (asid_pools_of s)\" + shows "f \valid_asid_map\" + by (wpsimp simp: valid_asid_map_def wp: entry_for_asid_lift assms) + end locale Arch_asid_table_update_eq = Arch + @@ -3043,6 +3126,14 @@ lemma valid_arch_caps_update [iff]: end +context Arch_arch_update_eq begin + +lemma valid_vmid_table_update[iff]: + "valid_vmid_table (f s) = valid_vmid_table s" + by (simp add: arch) + +end + context Arch_p_arch_update_eq begin sublocale Arch_p_asid_table_update_eq diff --git a/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy b/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy index c08556317a..1896b217a4 100644 --- a/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchIpc_AI.thy @@ -308,7 +308,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -483,7 +483,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (simp only: ball_conj_distrib[where P="\x. real_cte_at x s" for s]) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -511,7 +511,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by (auto simp: valid_arch_mdb_def) end diff --git a/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy b/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy index 316a11685e..dab8bd5aef 100644 --- a/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchKHeap_AI.thy @@ -409,7 +409,7 @@ lemma valid_vspace_objs_lift_weak: by (intro valid_vspace_objs_lift vspace_obj_pred_vspace_objs assms) lemma set_pt_pts_of: - "\\s. pts_of s p \ None \ P (pts_of s (p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" + "\\s. pts_of s p \ None \ P ((pts_of s)(p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" unfolding set_pt_def by (wpsimp wp: set_object_wp) (auto elim!: rsubst[where P=P] simp: opt_map_def split: option.splits) @@ -458,7 +458,7 @@ lemma pt_apply_pt_upd_neq: lemma ptes_of_pts_of_upd: "\ is_aligned p pte_bits; pts_of s (table_base pt_t p) = Some pt; pt_t = pt_type pt \ \ (\pt_t' p'. level_pte_of pt_t' p' - (pts_of s (table_base pt_t p \ pt_upd pt (table_index pt_t p) pte))) = + ((pts_of s)(table_base pt_t p \ pt_upd pt (table_index pt_t p) pte))) = ptes_of s (pt_t, p \ pte)" apply (rule ext)+ apply (clarsimp simp: fun_upd2_def) @@ -479,7 +479,7 @@ lemma store_pte_ptes_of_full: done lemma store_pte_ptes_of: - "\\s. ptes_of s pt_t p \ None \ P (ptes_of s pt_t (p \ pte)) \ + "\\s. ptes_of s pt_t p \ None \ P ((ptes_of s pt_t)(p \ pte)) \ store_pte pt_t p pte \\_ s. P (ptes_of s pt_t)\" by (wpsimp wp: store_pte_ptes_of_full simp: fun_upd2_def simp_del: fun_upd_apply) @@ -670,7 +670,7 @@ lemma store_pte_non_PageTablePTE_vs_lookup: lemma store_pte_not_ao: "\\s. \pt. aobjs_of s (table_base pt_t p) = Some (PageTable pt) \ - P (aobjs_of s (table_base pt_t p \ PageTable (pt_upd pt (table_index pt_t p) pte)))\ + P ((aobjs_of s)(table_base pt_t p \ PageTable (pt_upd pt (table_index pt_t p) pte)))\ store_pte pt_t p pte \\_ s. P (aobjs_of s)\" unfolding store_pte_def set_pt_def @@ -754,6 +754,9 @@ private lemma pred_vspace_objs_of_lift: "f \ \s. P (vspace_objs_ private lemma pred_pts_of_lift: "f \ \s. P (pts_of s) \" by (intro vspace_objs_of_pts_lift pred_vspace_objs_of_lift) +private lemma pred_asid_pools_of_lift: "f \ \s. P (asid_pools_of s) \" + by (intro vspace_objs_of_aps_lift pred_vspace_objs_of_lift) + lemma valid_global_vspace_mappings_lift: "f \valid_global_vspace_mappings\" unfolding valid_global_vspace_mappings_def @@ -770,8 +773,8 @@ lemma valid_global_objs_lift_weak: unfolding valid_global_objs_def by wp lemma valid_asid_map_lift: - "\valid_asid_map\ f \\rv. valid_asid_map\" - by (wpsimp simp: valid_asid_map_def) + "f \valid_asid_map\" + by (wp valid_asid_map_lift_strong arch pred_asid_pools_of_lift) lemma valid_kernel_mappings_lift: "\valid_kernel_mappings\ f \\rv. valid_kernel_mappings\" @@ -953,20 +956,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -974,7 +977,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -998,12 +1001,12 @@ lemma default_tcb_not_live[simp]: "\ live (TCB default_tcb)" lemma valid_vcpu_same_type: "\ valid_vcpu v s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_vcpu v (s\kheap := kheap s(p \ k)\)" + \ valid_vcpu v (s\kheap := (kheap s)(p \ k)\)" by (cases v; case_tac vcpu_tcb; clarsimp simp: valid_vcpu_def typ_at_same_type) lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) @@ -1024,14 +1027,14 @@ lemma valid_arch_mdb_lift: (* interface lemma *) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" apply (cases ao; simp) apply (fastforce simp: valid_vcpu_def obj_at_def split: option.splits) done lemma valid_vspace_obj_same_type: "\valid_vspace_obj l ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj l ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj l ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -1039,5 +1042,9 @@ lemma invs_valid_uses[elim!]: "invs s \ valid_uses s" by (simp add: invs_def valid_state_def valid_arch_state_def) +crunches set_object + for vmid_table[wp]: "\s. P (vmid_table s)" + (simp: get_object_def) + end end diff --git a/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy b/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy index 2edbd30d28..5b0a5e6c44 100644 --- a/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchKernelInit_AI.thy @@ -216,7 +216,7 @@ proof - ultimately show ?thesis unfolding valid_uses_2_def init_vspace_uses_def window_defs - by auto + by (auto simp: kernel_window_range_def) qed lemma valid_global_arch_objs_init_A_st[simp]: @@ -368,7 +368,7 @@ lemma invs_A: apply (clarsimp simp: valid_asid_table_def state_defs) apply (simp add: valid_arch_state_def state_defs obj_at_def a_type_def cur_vcpu_2_def vmid_inv_def is_inv_def vmid_for_asid_2_def obind_def - valid_global_tables_2_def empty_pt_def) + valid_global_tables_2_def empty_pt_def valid_vmid_table_def) apply (rule conjI) apply (clarsimp simp: valid_irq_node_def obj_at_def state_defs is_cap_table_def wf_empty_bits @@ -390,9 +390,12 @@ lemma invs_A: apply (clarsimp simp: valid_arch_caps_def valid_asid_pool_caps_def unique_table_caps_def caps_of_state_init_A_st_Null valid_table_caps_def unique_table_refs_def) apply (clarsimp simp: state_defs) - apply (clarsimp simp: valid_global_objs_def valid_kernel_mappings_def valid_asid_map_def) + apply (clarsimp simp: valid_global_objs_def valid_kernel_mappings_def) apply (rule conjI) apply (clarsimp simp: equal_kernel_mappings_def) + apply (rule conjI) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def init_A_st_def init_arch_state_def + obind_def pool_for_asid_def) apply (simp add: pspace_in_kernel_window_init_A_st cap_refs_in_kernel_window_def caps_of_state_init_A_st_Null valid_refs_def[unfolded cte_wp_at_caps_of_state]) done diff --git a/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy b/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy index f1ec835624..12791169bb 100644 --- a/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchRetype_AI.thy @@ -32,14 +32,14 @@ lemma slot_bits_def2 [Retype_AI_assms]: "slot_bits = cte_level_bits" definition "no_gs_types \ UNIV - {CapTableObject, - ArchObject SmallPageObj, ArchObject LargePageObj, ArchObject HugePageObj}" + ArchObject SmallPageObj, ArchObject LargePageObj, ArchObject HugePageObj, + ArchObject PageTableObj, ArchObject VSpaceObj}" lemma no_gs_types_simps [simp, Retype_AI_assms]: "Untyped \ no_gs_types" "TCBObject \ no_gs_types" "EndpointObject \ no_gs_types" "NotificationObject \ no_gs_types" - "ArchObject PageTableObj \ no_gs_types" "ArchObject ASIDPoolObj \ no_gs_types" by (simp_all add: no_gs_types_def) @@ -309,6 +309,14 @@ lemma asid_pools: by (clarsimp simp: in_opt_map_eq s'_def ps_def) (erule pspace_no_overlapC [OF orth _ _ cover vp]) +lemma asid_pools_of': + "asid_pools_of s' p = Some ap \ + asid_pools_of s p = Some ap \ ap = Map.empty \ p \ set (retype_addrs ptr ty n us)" + apply (clarsimp simp: in_opt_map_eq s'_def ps_def split: if_split_asm) + apply (auto simp: default_object_def default_arch_object_def empty_pt_def tyunt + split: apiobject_type.splits aobject_type.splits) + done + lemma pts_of: "pts_of s p = Some pt \ pts_of s' p = Some pt" by (clarsimp simp: in_opt_map_eq s'_def ps_def) @@ -716,7 +724,10 @@ lemma valid_kernel_mappings: lemma valid_asid_map: "valid_asid_map s \ valid_asid_map s'" - by (clarsimp simp: valid_asid_map_def) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def + entry_for_pool_def) + apply (fastforce dest!: asid_pools_of') + done lemma vspace_for_asid: "vspace_for_asid asid s' = Some pt \ vspace_for_asid asid s = Some pt" diff --git a/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy b/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy index 98b24a3eff..aa1043de05 100644 --- a/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchTcb_AI.thy @@ -258,7 +258,7 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def del: hoare_True_E_R diff --git a/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy b/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy index 343c4165c3..d64c744f1c 100644 --- a/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVCPU_AI.thy @@ -76,7 +76,7 @@ crunches do_machine_op (wp: valid_cur_vcpu_lift_cur_thread_update valid_cur_vcpu_lift crunch_wps) lemma valid_cur_vcpu_vcpu_update[simp]: - "vcpu_at v s \ valid_cur_vcpu (s\kheap := kheap s(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" + "vcpu_at v s \ valid_cur_vcpu (s\kheap := (kheap s)(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" by (clarsimp simp: valid_cur_vcpu_def active_cur_vcpu_of_def pred_tcb_at_def obj_at_def) crunches vcpu_save_reg, vcpu_write_reg, save_virt_timer, vgic_update, vcpu_disable @@ -252,7 +252,7 @@ lemma schedule_valid_cur_vcpu[wp]: (schedule :: (unit, unit) s_monad) \\_. valid_cur_vcpu\" unfolding schedule_def allActiveTCBs_def - by (wpsimp wp: alternative_wp select_wp) + by wpsimp crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_all_signals, bind_notification, fast_finalise, deleted_irq_handler, post_cap_deletion, cap_delete_one, @@ -262,7 +262,7 @@ crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_a restart, reschedule_required, possible_switch_to, thread_set_priority, reply_from_kernel for arch_state[wp]: "\s. P (arch_state s)" and cur_thread[wp]: "\s. P (cur_thread s)" - (wp: mapM_x_wp_inv thread_set.arch_state select_wp crunch_wps + (wp: mapM_x_wp_inv thread_set.arch_state crunch_wps simp: crunch_simps possible_switch_to_def reschedule_required_def) lemma do_unbind_notification_arch_tcb_at[wp]: @@ -294,7 +294,7 @@ crunches blocked_cancel_ipc, cap_delete_one, cancel_signal lemma reply_cancel_ipc_arch_tcb_at[wp]: "reply_cancel_ipc ntfnptr \arch_tcb_at P t\" unfolding reply_cancel_ipc_def thread_set_def - apply (wpsimp wp: set_object_wp select_wp) + apply (wpsimp wp: set_object_wp) by (clarsimp simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches cancel_ipc, send_ipc, receive_ipc @@ -373,7 +373,7 @@ crunches cap_insert, cap_move crunches suspend, unbind_notification, cap_swap_for_delete for state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" - (wp: crunch_wps thread_set_hyp_refs_trivial select_wp simp: crunch_simps) + (wp: crunch_wps thread_set_hyp_refs_trivial simp: crunch_simps) lemma prepare_thread_delete_valid_cur_vcpu[wp]: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ diff --git a/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy index 9fdb9ab43e..862bfb4c09 100644 --- a/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVSpaceEntries_AI.thy @@ -99,7 +99,7 @@ crunch valid_vspace_objs'[wp]: set_simple_ko "valid_vspace_objs'" (wp: crunch_wps) crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ preemption_point \\rv. valid_vspace_objs'\" @@ -213,7 +213,7 @@ lemma perform_asid_pool_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma perform_page_valid_vspace_objs'[wp]: @@ -226,11 +226,12 @@ lemma perform_page_valid_vspace_objs'[wp]: safe intro!: hoare_gen_asm hoare_gen_asm[unfolded K_def], simp_all add: mapM_x_Nil mapM_x_Cons mapM_x_map) apply (wp store_pte_valid_vspace_objs' hoare_vcg_imp_lift[OF set_cap_arch_obj_neg] - hoare_vcg_all_lift + hoare_vcg_all_lift hoare_vcg_const_imp_lift hoare_vcg_if_lift | clarsimp simp: cte_wp_at_weakenE[OF _ TrueI] obj_at_def swp_def valid_page_inv_def valid_slots_def perform_pg_inv_map_def perform_pg_inv_unmap_def perform_pg_inv_get_addr_def perform_flush_def split: pte.splits + split del: if_split | rule conjI | wpc | wp (once) hoare_drop_imps)+ @@ -277,7 +278,7 @@ lemma handle_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: activate_thread,switch_to_thread, handle_hypervisor_fault, switch_to_idle_thread, handle_call, handle_recv, handle_reply, handle_send, handle_yield, handle_interrupt "valid_vspace_objs'" - (simp: crunch_simps wp: crunch_wps alternative_wp select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt OR_choice set_scheduler_action) @@ -288,7 +289,7 @@ lemma handle_event_valid_vspace_objs'[wp]: lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) + apply wp apply simp done diff --git a/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy b/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy index c426986294..76d2dad7f1 100644 --- a/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/AARCH64/ArchVSpace_AI.thy @@ -13,6 +13,14 @@ theory ArchVSpace_AI imports VSpacePre_AI begin +context Arch_p_asid_table_update_eq begin (* FIXME AARCh64: move to ArchInvariants_AI *) + +lemma valid_asid_map_upd[simp]: + "valid_asid_map (f s) = valid_asid_map s" + by (simp add: valid_asid_map_def) + +end + context Arch begin global_naming AARCH64 sublocale @@ -182,7 +190,7 @@ lemma asid_high_bits_shl: lemma valid_asid_map_unmap: "valid_asid_map s \ is_aligned base asid_low_bits \ valid_asid_map(s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid_high_bits_of base := None)\\)" - by (clarsimp simp: valid_asid_map_def) + by (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq pool_for_asid_def) lemma asid_low_bits_word_bits: "asid_low_bits < word_bits" @@ -242,14 +250,14 @@ crunches vgic_update_lr, vcpu_write_reg, vcpu_save_reg, vcpu_disable, vcpu_resto (ignore: vcpu_update simp: vcpu_update_def valid_vcpu_def wp: crunch_wps) lemma set_vcpu_wp: - "\\s. vcpu_at p s \ Q (s\kheap := kheap s(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" + "\\s. vcpu_at p s \ Q (s\kheap := (kheap s)(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" unfolding set_vcpu_def apply (wp set_object_wp_strong) apply (clarsimp simp: obj_at_def split: kernel_object.splits arch_kernel_obj.splits) done lemma set_vcpu_vcpus_of[wp]: - "\\s. vcpus_of s p \ None \ P (vcpus_of s (p \ vcpu)) \ set_vcpu p vcpu \\_ s. P (vcpus_of s)\" + "\\s. vcpus_of s p \ None \ P ((vcpus_of s)(p \ vcpu)) \ set_vcpu p vcpu \\_ s. P (vcpus_of s)\" by (wp set_vcpu_wp) (clarsimp simp: in_omonad obj_at_def) lemma get_vcpu_wp: @@ -672,8 +680,8 @@ lemma vmid_for_asid_upd_eq: \ (\asid'. vmid_for_asid_2 asid' (asid_table s) - (asid_pools_of s(pool_ptr \ ap(asid_low_bits_of asid \ - ASIDPoolVSpace vmid vsp)))) + ((asid_pools_of s)(pool_ptr \ ap(asid_low_bits_of asid \ + ASIDPoolVSpace vmid vsp)))) = (vmid_for_asid s) (asid := vmid)" apply (rule ext) apply (clarsimp simp: vmid_for_asid_2_def entry_for_pool_def pool_for_asid_def obind_def @@ -701,15 +709,21 @@ lemma find_free_vmid_vmid_inv[wp]: dest: inj_on_domD) done +lemma invalidate_vmid_entry_valid_vmid_table[wp]: + "invalidate_vmid_entry vmid \valid_vmid_table\" + unfolding invalidate_vmid_entry_def + by (wpsimp simp: valid_vmid_table_def) + crunches find_free_vmid for valid_global_tables[wp]: "valid_global_tables" + and valid_vmid_table[wp]: valid_vmid_table lemma find_free_vmid_valid_arch [wp]: "find_free_vmid \valid_arch_state\" unfolding valid_arch_state_def by wpsimp lemma entry_for_asid_Some_vmidD: - "entry_for_asid asid s = Some entry \ ap_vmid entry = vmid_for_asid s asid \ 0 < asid" + "entry_for_asid asid s = Some entry \ ap_vmid entry = vmid_for_asid s asid" unfolding entry_for_asid_def vmid_for_asid_def entry_for_pool_def pool_for_asid_def by (auto simp: obind_def opt_map_def if_option split: option.splits) @@ -826,6 +840,26 @@ lemma update_asid_pool_entry_asid_pools[wp]: supply fun_upd_apply[simp del] by wpsimp +lemma valid_vmid_table_None_upd: + "valid_vmid_table_2 table \ valid_vmid_table_2 (table(vmid := None))" + by (simp add: valid_vmid_table_2_def) + +lemma valid_vmid_table_Some_upd: + "\ valid_vmid_table_2 table; asid \ 0 \ \ valid_vmid_table_2 (table (vmid \ asid))" + by (simp add: valid_vmid_table_2_def) + +crunches update_asid_pool_entry, set_asid_pool + for pool_for_asid[wp]: "\s. P (pool_for_asid as s)" + (simp: pool_for_asid_def) + +lemma update_asid_pool_entry_valid_asid_map[wp]: + "update_asid_pool_entry f asid \valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq) + apply (wpsimp wp: hoare_vcg_disj_lift hoare_vcg_ex_lift) + apply (clarsimp simp: pool_for_asid_def entry_for_pool_def obind_None_eq split: if_split_asm) + done + lemma invalidate_asid_entry_invs[wp]: "invalidate_asid_entry asid \invs\" unfolding invalidate_asid_entry_def invalidate_asid_def invalidate_vmid_entry_def invs_def @@ -833,40 +867,44 @@ lemma invalidate_asid_entry_invs[wp]: supply fun_upd_apply[simp del] apply (wpsimp wp: load_vmid_wp valid_irq_handlers_lift valid_irq_node_typ valid_irq_states_triv valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def | wps)+ apply (clarsimp simp: valid_irq_node_def valid_global_refs_def global_refs_def valid_arch_state_def valid_global_objs_def valid_global_arch_objs_def valid_machine_state_def - valid_vspace_objs_def vmid_for_asid_upd_eq comp_upd_simp is_inv_None_upd) + valid_vspace_objs_def vmid_for_asid_upd_eq comp_upd_simp is_inv_None_upd + valid_vmid_table_None_upd) done +crunches find_free_vmid, store_vmid + for valid_asid_map[wp]: valid_asid_map + lemma find_free_vmid_invs[wp]: "find_free_vmid \invs\" unfolding invs_def valid_state_def valid_pspace_def by (wpsimp wp: load_vmid_wp valid_irq_handlers_lift valid_irq_node_typ valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def) lemma store_hw_asid_valid_arch[wp]: - "\valid_arch_state and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None)\ + "\valid_arch_state and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None \ asid \ 0)\ store_vmid asid vmid \\_. valid_arch_state\" unfolding store_vmid_def valid_arch_state_def vmid_inv_def supply fun_upd_apply[simp del] apply (wpsimp simp: valid_global_arch_objs_upd_eq_lift | wps)+ - apply (fastforce simp: vmid_for_asid_upd_eq elim: is_inv_Some_upd) + apply (fastforce simp: vmid_for_asid_upd_eq elim: is_inv_Some_upd intro: valid_vmid_table_Some_upd) done lemma store_vmid_invs[wp]: - "\invs and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None)\ + "\invs and (\s. asid_map s asid = None \ arm_vmid_table (arch_state s) vmid = None \ asid \ 0)\ store_vmid asid vmid \\_. invs\" unfolding invs_def valid_state_def valid_pspace_def by (wpsimp wp: valid_irq_node_typ valid_irq_handlers_lift valid_arch_caps_lift pspace_in_kernel_window_atyp_lift_strong - simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_asid_map_def + simp: valid_kernel_mappings_def equal_kernel_mappings_def valid_global_vspace_mappings_def) lemma invalidate_vmid_entry_None[wp]: @@ -902,12 +940,12 @@ lemma find_free_vmid_None_asid_map[wp]: by wpsimp lemma get_hw_asid_valid_arch[wp]: - "get_vmid asid \valid_arch_state\" + "\valid_arch_state and K (asid \ 0)\ get_vmid asid \\_. valid_arch_state\" unfolding get_vmid_def by wpsimp lemma get_hw_asid_invs[wp]: - "get_vmid asid \invs\" + "\invs and K (asid \ 0)\ get_vmid asid \\_. invs\" unfolding get_vmid_def by (wpsimp wp: store_vmid_invs load_vmid_wp simp: opt_map_def) @@ -924,7 +962,7 @@ crunches invalidate_tlb_by_asid, invalidate_tlb_by_asid_va (ignore: do_machine_op) lemma arm_context_switch_invs [wp]: - "arm_context_switch pt asid \invs\" + "\invs and K (asid \ 0)\ arm_context_switch pt asid \\_. invs\" unfolding arm_context_switch_def by wpsimp crunches set_vm_root @@ -936,6 +974,10 @@ lemma set_global_user_vspace_invs[wp]: unfolding set_global_user_vspace_def by wpsimp +lemma vspace_for_asid_0_None[simp]: + "vspace_for_asid 0 s = None" + by (simp add: vspace_for_asid_def entry_for_asid_def) + lemma set_vm_root_invs[wp]: "set_vm_root t \invs\" unfolding set_vm_root_def @@ -2046,7 +2088,7 @@ lemma perform_pg_inv_map_invs[wp]: unfolding perform_pg_inv_map_def supply if_split[split del] apply (wpsimp wp: store_pte_invs arch_update_cap_invs_map hoare_vcg_all_lift hoare_vcg_imp_lift' - invalidate_tlb_by_asid_va_invs + invalidate_tlb_by_asid_va_invs dmo_invs_lift | strengthen if_pair_imp_strengthen)+ apply (clarsimp simp: valid_page_inv_def cte_wp_at_caps_of_state is_arch_update_def is_cap_simps cap_master_cap_simps parent_for_refs_def valid_slots_def same_ref_def) @@ -2105,7 +2147,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid ptp pt and s' :: "'a::state_ext state" - defines "s' \ s\kheap := kheap s(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" + defines "s' \ s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" assumes ap: "asid_pools_of s ap = Some pool" assumes new: "pool (asid_low_bits_of asid) = None" assumes pt: "pts_of s (ap_vspace ptp) = Some pt" @@ -2344,7 +2386,7 @@ lemma vmid_for_asid_map_None: "\ asid_pools_of s ap = Some pool; pool_for_asid asid s = Some ap; pool (asid_low_bits_of asid) = None; ap_vmid ape = None \ \ (\asid'. vmid_for_asid_2 asid' (asid_table s) - (asid_pools_of s(ap \ pool(asid_low_bits_of asid \ ape)))) = + ((asid_pools_of s)(ap \ pool(asid_low_bits_of asid \ ape)))) = vmid_for_asid s" unfolding vmid_for_asid_def apply (rule ext) @@ -2375,6 +2417,20 @@ lemma set_asid_pool_valid_arch_state: unfolding valid_arch_state_def by (wpsimp wp: set_asid_pool_vmid_inv|wps)+ +lemma set_asid_pool_invs_valid_asid_map[wp]: + "\valid_asid_map and valid_asid_table and + (\s. asid_pools_of s ap = Some pool \ pool_for_asid asid s = Some ap \ asid \ 0)\ + set_asid_pool ap (pool(asid_low_bits_of asid \ ape)) + \\_. valid_asid_map\" + unfolding valid_asid_map_def entry_for_asid_def + apply (clarsimp simp: obind_None_eq) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift) + apply (fastforce simp: asid_high_low_inj pool_for_asid_def valid_asid_table_def entry_for_pool_def + obind_None_eq + dest: inj_on_domD + split: if_split_asm) + done + lemma set_asid_pool_invs_map: "\invs and (\s. asid_pools_of s ap = Some pool \ pool_for_asid asid s = Some ap \ @@ -2384,10 +2440,11 @@ lemma set_asid_pool_invs_map: and K (pool (asid_low_bits_of asid) = None \ 0 < asid \ ap_vmid ape = None)\ set_asid_pool ap (pool(asid_low_bits_of asid \ ape)) \\rv. invs\" - apply (simp add: invs_def valid_state_def valid_pspace_def valid_asid_map_def) + apply (simp add: invs_def valid_state_def valid_pspace_def) apply (wpsimp wp: valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_arch_objs_map valid_irq_handlers_lift set_asid_pool_valid_arch_caps_map set_asid_pool_valid_arch_state) + apply (clarsimp simp: valid_arch_state_def) done lemma ako_asid_pools_of: @@ -2708,12 +2765,12 @@ lemma set_vcpu_sym_refs[wp]: apply (clarsimp simp: obj_at_def) done -lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) a = state_hyp_refs_of s a " +lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_hyp_refs_of s a " by (simp add: state_hyp_refs_of_def) lemma state_hyp_refs_of_simp_eq: "obj_at (\ko'. hyp_refs_of ko' = hyp_refs_of v) p s - \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) p = state_hyp_refs_of s p" + \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) p = state_hyp_refs_of s p" by (clarsimp simp: state_hyp_refs_of_def obj_at_def) lemma set_object_vcpu_sym_refs_hyp: @@ -2752,11 +2809,11 @@ lemma set_vcpu_valid_pspace: done lemma vmid_inv_set_vcpu: - "vcpu_at p s \ vmid_inv (s\kheap := kheap s(p \ ArchObj (VCPU v))\) = vmid_inv s" + "vcpu_at p s \ vmid_inv (s\kheap := (kheap s)(p \ ArchObj (VCPU v))\) = vmid_inv s" by (simp add: vmid_inv_def asid_pools_of_vcpu_None_upd_idem) lemma pt_at_eq_set_vcpu: - "vcpu_at p s \ pt_at pt_t p' (s\kheap := kheap s(p \ ArchObj (VCPU v))\) = pt_at pt_t p' s" + "vcpu_at p s \ pt_at pt_t p' (s\kheap := (kheap s)(p \ ArchObj (VCPU v))\) = pt_at pt_t p' s" by (auto simp add: obj_at_def) lemma set_vcpu_valid_arch_eq_hyp: @@ -2973,7 +3030,7 @@ crunches save_virt_timer, vcpu_disable, vcpu_invalidate_active, vcpu_restore, vc lemma obj_at_hyp_live_vcpu_regs: "vcpus_of s vcpu_ptr = Some v \ - obj_at hyp_live p (s\kheap := kheap s(vcpu_ptr \ ArchObj (VCPU (v\vcpu_regs := x\)))\) = + obj_at hyp_live p (s\kheap := (kheap s)(vcpu_ptr \ ArchObj (VCPU (v\vcpu_regs := x\)))\) = obj_at hyp_live p s" by (clarsimp simp: in_omonad obj_at_def) diff --git a/proof/invariant-abstract/AARCH64/Machine_AI.thy b/proof/invariant-abstract/AARCH64/Machine_AI.thy index b34a001a7c..e6c2328ff7 100644 --- a/proof/invariant-abstract/AARCH64/Machine_AI.thy +++ b/proof/invariant-abstract/AARCH64/Machine_AI.thy @@ -19,7 +19,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -58,7 +58,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -89,7 +89,7 @@ text \Failure on empty result\ crunches loadWord, storeWord, machine_op_lift, clearMemory for (empty_fail) empty_fail[intro!, wp, simp] - (ignore: NonDetMonad.bind mapM_x simp: machine_op_lift_def empty_fail_cond) + (ignore: Nondet_Monad.bind mapM_x simp: machine_op_lift_def empty_fail_cond) lemmas ef_machine_op_lift = machine_op_lift_empty_fail \ \required for generic interface\ @@ -100,7 +100,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv[wp]: "\irq_state_independent P\ \ getActiveIRQ in_kernel \P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -352,7 +352,7 @@ crunches and device_state_inv[wp]: "\ms. P (device_state ms)" and irq_masks[wp]: "\s. P (irq_masks s)" and underlying_memory_inv[wp]: "\s. P (underlying_memory s)" - (wp: no_irq_bind ignore: empty_fail NonDetMonad.bind) + (wp: no_irq_bind ignore: empty_fail Nondet_Monad.bind) crunches getFPUState, getRegister, getRestartPC, setNextPC, ackInterrupt, maskInterrupt for (no_fail) no_fail[intro!, wp, simp] @@ -392,7 +392,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wpsimp wp: alternative_wp select_wp) + apply wpsimp apply (rule ccontr) apply (simp add: linorder_not_le) done diff --git a/proof/invariant-abstract/AInvs.thy b/proof/invariant-abstract/AInvs.thy index 99bee53f95..ba1379f642 100644 --- a/proof/invariant-abstract/AInvs.thy +++ b/proof/invariant-abstract/AInvs.thy @@ -14,7 +14,7 @@ begin lemma st_tcb_at_nostate_upd: "\ get_tcb t s = Some y; tcb_state y = tcb_state y' \ \ - st_tcb_at P t' (s \kheap := kheap s(t \ TCB y')\) = st_tcb_at P t' s" + st_tcb_at P t' (s \kheap := (kheap s)(t \ TCB y')\) = st_tcb_at P t' s" by (clarsimp simp add: pred_tcb_at_def obj_at_def dest!: get_tcb_SomeD) lemma pred_tcb_at_upd_apply: @@ -72,8 +72,8 @@ lemma kernel_entry_invs: (kernel_entry e us) :: (user_context, unit) s_monad \\_ s. invs s \ (ct_running s \ ct_idle s)\" apply (simp add: kernel_entry_def) - apply (wp akernel_invs thread_set_invs_trivial thread_set_ct_in_state select_wp - static_imp_wp hoare_vcg_disj_lift hoare_vcg_imp_lift' + apply (wp akernel_invs thread_set_invs_trivial thread_set_ct_in_state + hoare_weak_lift_imp hoare_vcg_disj_lift hoare_vcg_imp_lift' | clarsimp simp add: tcb_cap_cases_def)+ done @@ -119,7 +119,7 @@ lemma do_user_op_invs: \\_. invs and ct_running\" apply (simp add: do_user_op_def split_def) apply (wp device_update_invs) - apply (wp select_wp dmo_invs | simp add:dom_restrict_plus_eq)+ + apply (wp dmo_invs | simp add:dom_restrict_plus_eq)+ apply (clarsimp simp: user_memory_update_def simpler_modify_def restrict_map_def invs_def cur_tcb_def split: option.splits if_split_asm) diff --git a/proof/invariant-abstract/ARM/ArchAcc_AI.thy b/proof/invariant-abstract/ARM/ArchAcc_AI.thy index 55095465fb..091b63b4bb 100644 --- a/proof/invariant-abstract/ARM/ArchAcc_AI.thy +++ b/proof/invariant-abstract/ARM/ArchAcc_AI.thy @@ -1143,7 +1143,7 @@ lemma valid_objs_caps: lemma simpler_set_pt_def: "set_pt p pt = (\s. if \pt. kheap s p = Some (ArchObj (PageTable pt)) then - ({((), s\kheap := kheap s(p \ ArchObj (PageTable pt))\)}, False) + ({((), s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)}, False) else ({}, True))" apply (rule ext) apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def read_object_def assert_opt_def @@ -1161,7 +1161,7 @@ lemma simpler_set_pt_def: lemma valid_set_ptI: "(!!s opt. \P s; kheap s p = Some (ArchObj (PageTable opt))\ - \ Q () (s\kheap := kheap s(p \ ArchObj (PageTable pt))\)) + \ Q () (s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)) \ \P\ set_pt p pt \Q\" by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm) @@ -1448,7 +1448,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1456,7 +1456,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1494,7 +1494,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1845,7 +1845,7 @@ lemma valid_pde_typ_at: lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -3190,7 +3190,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonadVCG.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/ARM/ArchArch_AI.thy b/proof/invariant-abstract/ARM/ArchArch_AI.thy index 1f377b226e..81d933f819 100644 --- a/proof/invariant-abstract/ARM/ArchArch_AI.thy +++ b/proof/invariant-abstract/ARM/ArchArch_AI.thy @@ -19,7 +19,7 @@ definition cte_wp_at (\cap. \idx. cap = cap.UntypedCap False frame pageBits idx ) parent s \ descendants_of parent (cdt s) = {} \ is_aligned base asid_low_bits \ base \ 2^asid_bits - 1 \ - arm_asid_table (arch_state s) (asid_high_bits_of base) = None" + asid_table s (asid_high_bits_of base) = None" lemma safe_parent_strg: @@ -270,8 +270,8 @@ end locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" - assumes empty: "arm_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + assumes empty: "asid_table s asid = None" + defines "s' \ s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\" begin lemma vs_lookup1' [simp]: @@ -286,7 +286,7 @@ lemma vs_lookup_pages1' [simp]: lemma vs_asid_refs' [simp]: "vs_asid_refs (arm_asid_table (arch_state s')) = - vs_asid_refs (arm_asid_table (arch_state s)) \ {([VSRef (ucast asid) None], ap)}" + vs_asid_refs (asid_table s) \ {([VSRef (ucast asid) None], ap)}" apply (simp add: s'_def) apply (rule set_eqI) apply (rule iffI) @@ -401,8 +401,8 @@ end context Arch begin global_naming ARM lemma valid_arch_state_strg: - "valid_arch_state s \ ap \ ran (arm_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + "valid_arch_state s \ ap \ ran (asid_table s) \ asid_pool_at ap s \ + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -412,11 +412,11 @@ lemma valid_arch_state_strg: lemma valid_vs_lookup_at_upd_strg: "valid_vs_lookup s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ + asid_table s asid = None \ (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -490,7 +490,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -499,7 +499,7 @@ lemma vs_asid_ref_upd: lemma vs_asid_ref_eq: "([VSRef (ucast asid) None] \ ap) s - = (arm_asid_table (arch_state s) asid = Some ap)" + = (asid_table s asid = Some ap)" by (fastforce elim: vs_lookup_atE intro: vs_lookup_atI) @@ -511,12 +511,12 @@ lemma set_cap_reachable_pg_cap: lemma cap_insert_simple_arch_caps_ap: "\valid_arch_caps and (\s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and no_cap_to_obj_with_diff_ref cap {dest} - and (\s. arm_asid_table (arch_state s) (asid_high_bits_of asid) = None) + and (\s. asid_table s (asid_high_bits_of asid) = None) and ko_at (ArchObj (ASIDPool Map.empty)) ap and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -528,7 +528,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -548,8 +548,8 @@ lemma cap_insert_simple_arch_caps_ap: lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_asid_map (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -561,8 +561,8 @@ lemma valid_asid_map_asid_upd_strg: lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -574,8 +574,8 @@ lemma valid_vspace_objs_asid_upd_strg: lemma valid_global_objs_asid_upd_strg: "valid_global_objs s \ ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \ - arm_asid_table (arch_state s) asid = None \ - valid_global_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + asid_table s asid = None \ + valid_global_objs (s\arch_state := arch_state s\arm_asid_table := (asid_table s)(asid \ ap)\\)" by clarsimp @@ -592,11 +592,11 @@ lemma cap_insert_ap_invs: K (cap = cap.ArchObjectCap (arch_cap.ASIDPoolCap ap asid)) and (\s. \irq \ cap_irqs cap. irq_issued irq s) and ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap and - (\s. ap \ ran (arm_asid_table (arch_state s)) \ - arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ + (\s. ap \ ran (asid_table s) \ + asid_table s (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := ((arm_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_asid_map_asid_upd_strg valid_vspace_objs_asid_upd_strg ) @@ -748,17 +748,17 @@ proof - K (cap = ArchObjectCap (ASIDPoolCap ap asid)) and (\s. \irq\cap_irqs cap. irq_issued irq s) and ko_at (ArchObj (ASIDPool Map.empty)) ap and - (\s. ap \ ran (arm_asid_table (arch_state s)) \ - arm_asid_table (arch_state s) (asid_high_bits_of asid) = None))\ + (\s. ap \ ran (asid_table s) \ + asid_table s (asid_high_bits_of asid) = None))\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -951,7 +951,7 @@ lemma create_mapping_entries_inv [wp]: crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1272,7 +1272,7 @@ lemma arch_decode_inv_wf[wp]: apply (rename_tac word1 word2) apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp| + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp| wpc| simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: valid_arch_inv_def valid_apinv_def) @@ -1334,7 +1334,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: asid_bits_def asid_low_bits_def) apply (simp add: asid_bits_def) apply (simp split del: if_split) - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp | wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) @@ -1392,7 +1392,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPageFlushLabel (invocation_type label)") apply (rule hoare_pre) apply simp - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imps) + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imps) apply (simp add: valid_arch_inv_def valid_page_inv_def) apply (wp find_pd_for_asid_pd_at_asid | wpc)+ apply (clarsimp simp: valid_cap_def mask_def) @@ -1474,7 +1474,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPDFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wpsimp wp: whenE_throwError_wp static_imp_wp hoare_drop_imp get_master_pte_wp + apply (wpsimp wp: whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imp get_master_pte_wp get_master_pde_wp whenE_throwError_wp simp: resolve_vaddr_def valid_arch_inv_def valid_pdi_def Let_def) apply (rule_tac Q'="\pd' s. vspace_at_asid x2 pd' s \ x2 \ mask asid_bits \ x2 \ 0" diff --git a/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy b/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy index 72e76d2bf7..d5d6ea1f7a 100644 --- a/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCNodeInv_AI.thy @@ -507,7 +507,7 @@ context Arch begin global_naming ARM lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def arch_cap_cleanup_opt_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -577,7 +577,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy b/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy index 6847e16f6a..6bba80ee84 100644 --- a/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCSpacePre_AI.thy @@ -165,7 +165,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update: @@ -189,28 +189,28 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by auto lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by auto lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by auto lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -243,10 +243,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by auto end diff --git a/proof/invariant-abstract/ARM/ArchCSpace_AI.thy b/proof/invariant-abstract/ARM/ArchCSpace_AI.thy index adebc0efc6..18e27dece6 100644 --- a/proof/invariant-abstract/ARM/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/ARM/ArchCSpace_AI.thy @@ -210,20 +210,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -231,7 +231,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy index fa592d22d4..ad033c1f71 100644 --- a/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/ARM/ArchDetSchedAux_AI.thy @@ -177,7 +177,7 @@ lemma perform_asid_control_invocation_bound_sc_obj_tcb_at[wp]: crunches perform_asid_control_invocation for idle_thread[wp]: "\s. P (idle_thread s)" and valid_blocked[wp]: "valid_blocked" - (wp: static_imp_wp) + (wp: hoare_weak_lift_imp) crunches perform_asid_control_invocation for rqueues[wp]: "\s. P (ready_queues s)" diff --git a/proof/invariant-abstract/ARM/ArchFinalise_AI.thy b/proof/invariant-abstract/ARM/ArchFinalise_AI.thy index 02c651f8ff..d7c7440296 100644 --- a/proof/invariant-abstract/ARM/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/ARM/ArchFinalise_AI.thy @@ -680,7 +680,7 @@ interpretation Finalise_AI_2?: Finalise_AI_2 context Arch begin global_naming ARM crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp,Finalise_AI_asms]: prepare_thread_delete "\s. P (interrupt_irq_node s)" @@ -1181,7 +1181,7 @@ lemma arch_finalise_case_no_lookup: | simp add: vs_cap_ref_simps vs_lookup_pages_eq_at[THEN fun_cong, symmetric] vs_lookup_pages_eq_ap[THEN fun_cong, symmetric])+ - apply (wp hoare_vcg_all_lift unmap_page_unmapped static_imp_wp) + apply (wp hoare_vcg_all_lift unmap_page_unmapped hoare_weak_lift_imp) apply (wpc|wp unmap_page_table_unmapped3 delete_asid_unmapped |simp add:vs_cap_ref_def vs_lookup_pages_eq_at[THEN fun_cong,symmetric] @@ -1408,7 +1408,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ arm_asid_table (arch_state s) ` (dom (arm_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def) apply (rule conjI, fastforce) apply (erule inj_on_fun_upd_strongerI) @@ -1441,8 +1441,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -1474,8 +1474,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -1510,7 +1510,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -1535,7 +1535,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -1557,7 +1557,7 @@ lemma set_asid_pool_empty_valid_asid_map: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\rv s. valid_asid_map (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: valid_asid_map_def vspace_at_asid_def @@ -1589,7 +1589,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def) apply (rule hoare_pre) apply (wp valid_irq_node_typ set_asid_pool_typ_at diff --git a/proof/invariant-abstract/ARM/ArchIpc_AI.thy b/proof/invariant-abstract/ARM/ArchIpc_AI.thy index dbaffe5d0e..0cd9de0c6d 100644 --- a/proof/invariant-abstract/ARM/ArchIpc_AI.thy +++ b/proof/invariant-abstract/ARM/ArchIpc_AI.thy @@ -350,7 +350,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -459,7 +459,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (subst ball_conj_distrib) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ done @@ -480,7 +480,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by auto end diff --git a/proof/invariant-abstract/ARM/ArchKHeap_AI.thy b/proof/invariant-abstract/ARM/ArchKHeap_AI.thy index 91a80e1113..638090815c 100644 --- a/proof/invariant-abstract/ARM/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/ARM/ArchKHeap_AI.thy @@ -800,82 +800,86 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) -lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_ep_update: + "typ_at AEndpoint ep s \ + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def) done -lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_ntfn_update: + "typ_at ANTFN ep s \ + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def) done -lemma state_hyp_refs_of_sc_update: "\s sc val n. typ_at (ASchedContext n) sc s \ - state_hyp_refs_of (s\kheap := kheap s(sc \ SchedContext val n)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_sc_update: + "typ_at (ASchedContext n) sc s \ + state_hyp_refs_of (s\kheap := (kheap s)(sc \ SchedContext val n)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def split: kernel_object.splits) done -lemma state_hyp_refs_of_reply_update: "\s r val. typ_at AReply r s \ - state_hyp_refs_of (s\kheap := kheap s(r \ Reply val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_reply_update: + "typ_at AReply r s \ + state_hyp_refs_of (s\kheap := (kheap s)(r \ Reply val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def ARM.hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_sched_context_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_sched_context := sc\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_sched_context := sc\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_yield_to_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_yield_to := sc\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_yield_to := sc\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_state_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_domain_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_domain := d\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_domain := d\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_priority_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_priority := d\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_priority := d\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type) @@ -889,7 +893,7 @@ lemma default_tcb_not_live: "\ live (TCB (default_tcb d))" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/ARM/ArchRetype_AI.thy b/proof/invariant-abstract/ARM/ArchRetype_AI.thy index 7e8b977b60..2f43538d88 100644 --- a/proof/invariant-abstract/ARM/ArchRetype_AI.thy +++ b/proof/invariant-abstract/ARM/ArchRetype_AI.thy @@ -448,7 +448,7 @@ lemma copy_global_invs_mappings_restricted: apply (simp add: valid_pspace_def pred_conj_def) apply (rule hoare_conjI, wp copy_global_equal_kernel_mappings_restricted) apply (clarsimp simp: global_refs_def) - apply (rule valid_prove_more, rule hoare_vcg_conj_lift, rule hoare_TrueI) + apply (rule hoare_post_add, rule hoare_vcg_conj_lift, rule hoare_TrueI) apply (simp add: copy_global_mappings_def valid_pspace_def) apply (rule hoare_seq_ext [OF _ gets_sp]) apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/ARM/ArchTcb_AI.thy b/proof/invariant-abstract/ARM/ArchTcb_AI.thy index dfcec291aa..2951bb2b43 100644 --- a/proof/invariant-abstract/ARM/ArchTcb_AI.thy +++ b/proof/invariant-abstract/ARM/ArchTcb_AI.thy @@ -308,13 +308,13 @@ lemma install_tcb_frame_cap_invs: \ \non-exception case\ apply wpsimp apply (wpsimp wp: checked_insert_tcb_invs[where ref="tcb_cnode_index 2"]) - apply (wpsimp wp: hoare_vcg_all_lift static_imp_wp + apply (wpsimp wp: hoare_vcg_all_lift hoare_weak_lift_imp thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial[where Q="\x. x", OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid) apply((wpsimp wp: cap_delete_deletes hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R hoare_vcg_all_lift - static_imp_wp static_imp_conj_wp + hoare_weak_lift_imp hoare_weak_lift_imp_conj | strengthen use_no_cap_to_obj_asid_strg | wp cap_delete_ep)+)[1] by (clarsimp simp: is_cap_simps' valid_fault_handler_def) diff --git a/proof/invariant-abstract/ARM/ArchUntyped_AI.thy b/proof/invariant-abstract/ARM/ArchUntyped_AI.thy index 6f54c527e4..7f13cead19 100644 --- a/proof/invariant-abstract/ARM/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/ARM/ArchUntyped_AI.thy @@ -434,7 +434,7 @@ proof - Some (ArchObj (PageDirectory pd))" let ?ko' = "ArchObj (PageDirectory (pd(ucast (pde_ptr && mask pd_bits >> 2) := pde)))" - let ?s' = "s\kheap := kheap s(pde_ptr && ~~ mask pd_bits \ ?ko')\" + let ?s' = "s\kheap := (kheap s)(pde_ptr && ~~ mask pd_bits \ ?ko')\" have typ_at: "\T p. typ_at T p s \ typ_at T p ?s'" using pd by (clarsimp simp: obj_at_def a_type_def) diff --git a/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy index 5000e4b9ed..c9214c58e4 100644 --- a/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/ARM/ArchVSpaceEntries_AI.thy @@ -463,7 +463,7 @@ crunches crunches delete_asid_pool, arch_finalise_cap for valid_pdpt_objs[wp]: "valid_pdpt_objs" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma finalise_cap_valid_pdpt_objs[wp]: "\valid_pdpt_objs\ finalise_cap c b \\rv. valid_pdpt_objs\" @@ -771,10 +771,10 @@ lemma invoke_untyped_valid_pdpt[wp]: done crunch valid_pdpt_objs[wp]: perform_asid_control_invocation "valid_pdpt_objs" - (ignore: delete_objects set_object wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects set_object wp: delete_objects_valid_pdpt hoare_weak_lift_imp) crunch valid_pdpt_objs[wp]: perform_asid_pool_invocation "valid_pdpt_objs" - (ignore: delete_objects set_object wp: delete_objects_valid_pdpt static_imp_wp get_object_wp) + (ignore: delete_objects set_object wp: delete_objects_valid_pdpt hoare_weak_lift_imp get_object_wp) abbreviation (input) "safe_pt_range \ \slots s. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) @@ -1623,7 +1623,7 @@ crunch valid_pdpt[wp]: schedule_choose_new_thread "valid_pdpt_objs" crunch valid_pdpt[wp]: activate_thread, switch_to_thread, switch_to_idle_thread, awaken "valid_pdpt_objs" (simp: crunch_simps - wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp) + wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp) crunch valid_pdpt[wp]: handle_call, handle_recv, handle_send, handle_yield, handle_interrupt, handle_vm_fault, handle_hypervisor_fault @@ -1634,7 +1634,7 @@ crunch valid_pdpt[wp]: handle_call, handle_recv, handle_send, handle_yield, lemma schedule_valid_pdpt[wp]: "\valid_pdpt_objs\ schedule :: (unit,det_ext) s_monad \\_. valid_pdpt_objs\" apply (simp add: schedule_def) - apply (wpsimp wp: alternative_wp select_wp hoare_drop_imps) + apply (wpsimp wp: hoare_drop_imps) done crunches check_domain_time diff --git a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy index 01e4f0e5e8..22f9152135 100644 --- a/proof/invariant-abstract/ARM/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM/ArchVSpace_AI.thy @@ -1688,7 +1688,7 @@ end locale vs_lookup_map_some_pdes = Arch + fixes pd pdp s s' S T pd' - defines "s' \ s\kheap := kheap s(pdp \ ArchObj (PageDirectory pd'))\" + defines "s' \ s\kheap := (kheap s)(pdp \ ArchObj (PageDirectory pd'))\" assumes refs: "vs_refs (ArchObj (PageDirectory pd')) = (vs_refs (ArchObj (PageDirectory pd)) - T) \ S" assumes old: "kheap s pdp = Some (ArchObj (PageDirectory pd))" @@ -1801,7 +1801,7 @@ lemma set_pd_vspace_objs_map: lemma simpler_set_pd_def: "set_pd p pd = (\s. if \pd. kheap s p = Some (ArchObj (PageDirectory pd)) - then ({((), s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\)}, + then ({((), s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\)}, False) else ({}, True))" apply (rule ext) @@ -1858,7 +1858,7 @@ lemma set_pd_valid_vs_lookup_map: apply (drule vs_lookup_pages_apI) apply (simp split: if_split_asm) apply (simp+)[2] - apply (frule_tac s="s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\" + apply (frule_tac s="s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\" in vs_lookup_pages_pdI[rotated -1]) apply (simp del: fun_upd_apply)+ apply (frule vs_lookup_pages_apI) @@ -2773,8 +2773,8 @@ lemma simpler_store_pde_def: "store_pde p pde s = (case kheap s (p && ~~ mask pd_bits) of Some (ArchObj (PageDirectory pd)) => - ({((), s\kheap := (kheap s((p && ~~ mask pd_bits) \ - (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pd_bits \ + ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)}, False) | _ => ({}, True))" by (auto simp: store_pde_def simpler_set_pd_def get_object_def simpler_gets_def assert_def return_def fail_def set_object_def get_def put_def bind_def get_pd_def @@ -2784,7 +2784,7 @@ lemma simpler_store_pde_def: lemma pde_update_valid_vspace_objs: "[|valid_vspace_objs s; valid_pde pde s; pde_ref pde = None; kheap s (p && ~~ mask pd_bits) = Some (ArchObj (PageDirectory pd))|] ==> valid_vspace_objs - (s\kheap := kheap s(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)" + (s\kheap := (kheap s)(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 2) := pde))))\)" apply (cut_tac pde=pde and p=p in store_pde_vspace_objs_unmap) apply (clarsimp simp: valid_def) apply (erule allE[where x=s]) @@ -4556,8 +4556,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid pdp pd s' defines "(s' :: ('a::state_ext) state) \ - s\kheap := kheap s(ap \ ArchObj (ASIDPool - (pool(asid \ pdp))))\" + s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid \ pdp))))\" assumes ap: "kheap s ap = Some (ArchObj (ASIDPool pool))" assumes new: "pool asid = None" assumes pd: "kheap s pdp = Some (ArchObj (PageDirectory pd))" diff --git a/proof/invariant-abstract/ARM/Machine_AI.thy b/proof/invariant-abstract/ARM/Machine_AI.thy index a629625c7e..738253e86d 100644 --- a/proof/invariant-abstract/ARM/Machine_AI.thy +++ b/proof/invariant-abstract/ARM/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -327,7 +327,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -620,7 +620,7 @@ lemma no_irq_clearMemory: "no_irq (clearMemory a b)" lemma getActiveIRQ_le_maxIRQ': "\\s. \irq > maxIRQ. irq_masks s irq\ getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -630,14 +630,14 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply simp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done diff --git a/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy index 519979a92e..1bd1e8f52c 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchAcc_AI.thy @@ -1259,7 +1259,7 @@ lemma valid_objs_caps: lemma simpler_set_pt_def: "set_pt p pt = (\s. if \pt. kheap s p = Some (ArchObj (PageTable pt)) then - ({((), s\kheap := kheap s(p \ ArchObj (PageTable pt))\)}, False) + ({((), s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)}, False) else ({}, True))" apply (rule ext) apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def @@ -1275,7 +1275,7 @@ lemma simpler_set_pt_def: lemma valid_set_ptI: "(!!s opt. \P s; kheap s p = Some (ArchObj (PageTable opt))\ - \ Q () (s\kheap := kheap s(p \ ArchObj (PageTable pt))\)) + \ Q () (s\kheap := (kheap s)(p \ ArchObj (PageTable pt))\)) \ \P\ set_pt p pt \Q\" by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm) @@ -1582,7 +1582,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1590,7 +1590,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1628,7 +1628,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1933,7 +1933,7 @@ lemma set_asid_pool_vspace_objs_unmap': lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -3134,7 +3134,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonadVCG.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy index e94651cdc5..fef2a435b3 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchArch_AI.thy @@ -295,7 +295,7 @@ locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" assumes empty: "arm_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\" + defines "s' \ s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\" context asid_update begin @@ -419,7 +419,7 @@ context Arch begin global_naming ARM_HYP lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (arm_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def split: option.split) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -433,7 +433,7 @@ lemma valid_vs_lookup_at_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -506,7 +506,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -532,7 +532,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \arm_asid_table := arm_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := (arm_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -544,7 +544,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp set_cap_empty_tables[simplified second_level_tables_def, simplified])+ + get_cap_wp hoare_weak_lift_imp set_cap_empty_tables[simplified second_level_tables_def, simplified])+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -565,7 +565,7 @@ lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ arm_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_asid_map (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -578,7 +578,7 @@ lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ arm_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(asid \ ap)\\)" + valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -604,7 +604,7 @@ lemma cap_insert_ap_invs: arm_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \arm_asid_table := ((arm_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg @@ -755,11 +755,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \arm_asid_table := (arm_asid_table \ arch_state) s + \arm_asid_table := ((arm_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -872,7 +872,8 @@ qed lemmas aci_invs[wp] = aci_invs'[where Q=\,simplified hoare_post_taut, OF refl refl refl TrueI TrueI TrueI,simplified] lemma obj_at_upd2: - "obj_at P t' (s\kheap := kheap s(t \ v, x \ v')\) = (if t' = x then P v' else obj_at P t' (s\kheap := kheap s(t \ v)\))" + "obj_at P t' (s\kheap := (kheap s)(t \ v, x \ v')\) = + (if t' = x then P v' else obj_at P t' (s\kheap := (kheap s)(t \ v)\))" by (simp add: obj_at_update obj_at_def) lemma vcpu_invalidate_active_hyp_refs_empty[wp]: @@ -942,7 +943,7 @@ lemma ex_nonz_cap_to_vcpu_udpate[simp]: by (simp add: ex_nonz_cap_to_def) lemma caps_of_state_VCPU_update: - "vcpu_at a s \ caps_of_state (s\kheap := kheap s(a \ ArchObj (VCPU b))\) = caps_of_state s" + "vcpu_at a s \ caps_of_state (s\kheap := (kheap s)(a \ ArchObj (VCPU b))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def) lemma set_vcpu_ex_nonz_cap_to[wp]: @@ -952,7 +953,7 @@ lemma set_vcpu_ex_nonz_cap_to[wp]: done lemma caps_of_state_tcb_arch_update: - "ko_at (TCB y) t' s \ caps_of_state (s\kheap := kheap s(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" + "ko_at (TCB y) t' s \ caps_of_state (s\kheap := (kheap s)(t' \ TCB (y\tcb_arch := f (tcb_arch y)\))\) = caps_of_state s" by (rule ext) (auto simp: caps_of_state_cte_wp_at cte_wp_at_cases obj_at_def tcb_cap_cases_def) lemma arch_thread_set_ex_nonz_cap_to[wp]: @@ -1281,7 +1282,7 @@ crunch inv[wp]: ensure_safe_mapping, create_mapping_entries "P" crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1596,7 +1597,7 @@ lemma arch_decode_inv_wf[wp]: apply (rename_tac word1 word2) apply (simp add: arch_decode_invocation_def Let_def decode_mmu_invocation_def split_def cong: if_cong) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp| + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp| wpc| simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: if_apply_def2 valid_apinv_def) @@ -1657,7 +1658,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: asid_bits_def asid_low_bits_def) apply (simp add: asid_bits_def) apply simp - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp|wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp|wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) @@ -1712,7 +1713,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPageFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imps) + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imps) apply (simp add: valid_arch_inv_def valid_page_inv_def) apply (wp find_pd_for_asid_pd_at_asid | wpc)+ apply (clarsimp simp: valid_cap_def mask_def) @@ -1795,7 +1796,7 @@ lemma arch_decode_inv_wf[wp]: apply (cases "isPDFlushLabel (invocation_type label)") apply simp apply (rule hoare_pre) - apply (wp whenE_throwError_wp static_imp_wp hoare_drop_imp | wpc | simp)+ + apply (wp whenE_throwError_wp hoare_weak_lift_imp hoare_drop_imp | wpc | simp)+ apply (simp add: resolve_vaddr_def) apply (wp get_master_pte_wp get_master_pde_wp whenE_throwError_wp | wpc | simp)+ apply (clarsimp simp: valid_arch_inv_def valid_pdi_def)+ diff --git a/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy index 9a8c9d82b2..36a5a8f3dd 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCNodeInv_AI.thy @@ -556,7 +556,7 @@ context Arch begin global_naming ARM_HYP lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def arch_cap_cleanup_opt_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -633,7 +633,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy index d7eff35935..117d79537e 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCSpacePre_AI.thy @@ -171,7 +171,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update: @@ -195,34 +195,34 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by auto lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by auto lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by auto lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by auto lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True r))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True r))" by auto lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -255,10 +255,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by auto diff --git a/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy index 4fe010cdef..3e80ceac5e 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchCSpace_AI.thy @@ -183,20 +183,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -204,7 +204,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy index 9829850062..35d493914d 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDetSchedAux_AI.thy @@ -102,9 +102,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy index e5314ba752..0c1a3665ac 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchDetSchedSchedule_AI.thy @@ -299,7 +299,7 @@ crunch valid_etcbs [wp, DetSchedSchedule_AI_assms]: crunch simple_sched_action [wp, DetSchedSchedule_AI_assms]: arch_finalise_cap, prepare_thread_delete simple_sched_action - (wp: hoare_drop_imps mapM_x_wp mapM_wp select_wp subset_refl + (wp: hoare_drop_imps mapM_x_wp mapM_wp subset_refl simp: unless_def if_fun_split) crunches arch_finalise_cap, prepare_thread_delete, arch_invoke_irq_handler diff --git a/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy index 38e71b6722..b59af49645 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchFinalise_AI.thy @@ -463,7 +463,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -658,7 +658,7 @@ lemma arch_thread_set_valid_objs_vcpu_Some[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -706,7 +706,7 @@ lemma vcpu_invalidate_tcbs_inv[wp]: lemma sym_refs_vcpu_None: assumes sym_refs: "sym_refs (state_hyp_refs_of s)" assumes tcb: "ko_at (TCB tcb) t s" "tcb_vcpu (tcb_arch tcb) = Some vr" - shows "sym_refs (state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), + shows "sym_refs (state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_arch := tcb_vcpu_update Map.empty (tcb_arch tcb)\), vr \ ArchObj (VCPU (vcpu_tcb_update Map.empty v)))\))" (is "sym_refs (state_hyp_refs_of ?s')") proof - @@ -1397,7 +1397,7 @@ crunches (wp: crunch_wps subset_refl) crunch irq_node[Finalise_AI_asms,wp]: prepare_thread_delete "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch irq_node[wp]: arch_finalise_cap "\s. P (interrupt_irq_node s)" (simp: crunch_simps wp: crunch_wps) @@ -1878,7 +1878,7 @@ lemma arch_finalise_case_no_lookup: | simp add: vs_cap_ref_simps vs_lookup_pages_eq_at[THEN fun_cong, symmetric] vs_lookup_pages_eq_ap[THEN fun_cong, symmetric])+ - apply (wp hoare_vcg_all_lift unmap_page_unmapped static_imp_wp) + apply (wp hoare_vcg_all_lift unmap_page_unmapped hoare_weak_lift_imp) apply (wpc|wp unmap_page_table_unmapped3 delete_asid_unmapped |simp add:vs_cap_ref_def vs_lookup_pages_eq_at[THEN fun_cong,symmetric] @@ -2099,7 +2099,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ arm_asid_table (arch_state s) ` (dom (arm_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def split: option.split) apply (rule conjI; clarsimp) apply (rule conjI, fastforce) @@ -2132,8 +2132,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -2165,8 +2165,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\arm_asid_table := arm_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\arm_asid_table := (arm_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -2201,7 +2201,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -2226,7 +2226,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -2248,7 +2248,7 @@ lemma set_asid_pool_empty_valid_asid_map: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\rv s. valid_asid_map (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: valid_asid_map_def vspace_at_asid_def @@ -2280,7 +2280,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\arm_asid_table := - arm_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (arm_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def) apply (rule hoare_pre) apply (wp valid_irq_node_typ set_asid_pool_typ_at diff --git a/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy index da5902ddaf..c3206fe5a9 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchIpc_AI.thy @@ -321,7 +321,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -495,7 +495,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (simp only: ball_conj_distrib[where P="\x. real_cte_at x s" for s]) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ apply (simp split: kernel_object.split_asm) @@ -521,7 +521,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by auto end diff --git a/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy index 587289b949..f320a301cc 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchKHeap_AI.thy @@ -674,20 +674,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -695,7 +695,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def split: option.splits) @@ -712,19 +712,19 @@ lemma valid_vcpu_lift: lemma valid_vcpu_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: ARM_HYP.state_hyp_refs_of_def obj_at_def ARM_HYP.hyp_refs_of_def) done lemma valid_vcpu_same_type: "\ valid_vcpu v s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_vcpu v (s\kheap := kheap s(p \ k)\)" + \ valid_vcpu v (s\kheap := (kheap s)(p \ k)\)" by (cases v; case_tac vcpu_tcb; clarsimp simp: valid_vcpu_def typ_at_same_type) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type valid_vcpu_same_type) @@ -738,7 +738,7 @@ lemma default_tcb_not_live: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy index e1b6faa54b..74ff7dd51e 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchTcb_AI.thy @@ -263,7 +263,7 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def del: hoare_True_E_R diff --git a/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy index 1076fd9885..c4dcb43011 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVCPU_AI.thy @@ -75,7 +75,7 @@ crunches do_machine_op (wp: valid_cur_vcpu_lift_cur_thread_update valid_cur_vcpu_lift crunch_wps) lemma valid_cur_vcpu_vcpu_update[simp]: - "vcpu_at v s \ valid_cur_vcpu (s\kheap := kheap s(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" + "vcpu_at v s \ valid_cur_vcpu (s\kheap := (kheap s)(v \ ArchObj (VCPU vcpu))\) = valid_cur_vcpu s" by (clarsimp simp: valid_cur_vcpu_def active_cur_vcpu_of_def pred_tcb_at_def obj_at_def) crunches vcpu_save_reg, vcpu_write_reg, save_virt_timer, vgic_update, vcpu_disable @@ -255,7 +255,7 @@ lemma schedule_valid_cur_vcpu[wp]: (schedule :: (unit, unit) s_monad) \\_. valid_cur_vcpu\" unfolding schedule_def allActiveTCBs_def - by (wpsimp wp: alternative_wp select_wp) + by wpsimp crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_all_signals, bind_notification, fast_finalise, deleted_irq_handler, post_cap_deletion, cap_delete_one, @@ -265,7 +265,7 @@ crunches cancel_all_ipc, blocked_cancel_ipc, unbind_maybe_notification, cancel_a restart, reschedule_required, possible_switch_to, thread_set_priority, reply_from_kernel for arch_state[wp]: "\s. P (arch_state s)" and cur_thread[wp]: "\s. P (cur_thread s)" - (wp: mapM_x_wp_inv thread_set.arch_state select_wp crunch_wps + (wp: mapM_x_wp_inv thread_set.arch_state crunch_wps simp: crunch_simps possible_switch_to_def reschedule_required_def) lemma do_unbind_notification_arch_tcb_at[wp]: @@ -297,7 +297,7 @@ crunches blocked_cancel_ipc, cap_delete_one, cancel_signal lemma reply_cancel_ipc_arch_tcb_at[wp]: "reply_cancel_ipc ntfnptr \arch_tcb_at P t\" unfolding reply_cancel_ipc_def thread_set_def - apply (wpsimp wp: set_object_wp select_wp) + apply (wpsimp wp: set_object_wp) by (clarsimp simp: pred_tcb_at_def obj_at_def get_tcb_def) crunches cancel_ipc, send_ipc, receive_ipc @@ -376,7 +376,7 @@ crunches cap_insert, cap_move crunches suspend, unbind_notification, cap_swap_for_delete for state_hyp_refs_of[wp]: "\s. P (state_hyp_refs_of s)" - (wp: crunch_wps thread_set_hyp_refs_trivial select_wp simp: crunch_simps) + (wp: crunch_wps thread_set_hyp_refs_trivial simp: crunch_simps) lemma prepare_thread_delete_valid_cur_vcpu[wp]: "\\s. valid_cur_vcpu s \ sym_refs (state_hyp_refs_of s)\ diff --git a/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy index d4896e1949..28812e081c 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVSpaceEntries_AI.thy @@ -410,7 +410,7 @@ lemma set_simple_ko_valid_pdpt_objs[wp]: done crunch valid_pdpt_objs[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_pdpt_objs" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_pdpt_objs[wp]: "\valid_pdpt_objs\ preemption_point \\rv. valid_pdpt_objs\" @@ -631,7 +631,7 @@ lemma invoke_untyped_valid_pdpt[wp]: crunch valid_pdpt_objs[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_pdpt_objs" - (ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects wp: delete_objects_valid_pdpt hoare_weak_lift_imp) abbreviation (input) "safe_pt_range \ \slots s. obj_at (\ko. \pt. ko = ArchObj (PageTable pt) @@ -1023,7 +1023,7 @@ lemma perform_page_directory_valid_pdpt[wp]: done crunch valid_pdpt_objs[wp]: perform_vcpu_invocation "valid_pdpt_objs" - (ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp) + (ignore: delete_objects wp: delete_objects_valid_pdpt hoare_weak_lift_imp) lemma perform_invocation_valid_pdpt[wp]: @@ -1511,15 +1511,14 @@ lemma handle_invocation_valid_pdpt[wp]: crunch valid_pdpt[wp]: handle_event, activate_thread,switch_to_thread, switch_to_idle_thread "valid_pdpt_objs" - (simp: crunch_simps wp: crunch_wps alternative_wp select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt getFAR getDFSR getIFSR OR_choice set_scheduler_action clearExMonitor) lemma schedule_valid_pdpt[wp]: "\valid_pdpt_objs\ schedule :: (unit,unit) s_monad \\_. valid_pdpt_objs\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_pdpt[wp]: diff --git a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy index c50601c6cd..4f124cc52a 100644 --- a/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/ArchVSpace_AI.thy @@ -1218,7 +1218,7 @@ lemma arch_thread_set_caps_of_state [wp]: by (wpsimp wp: thread_set_caps_of_state_trivial2 simp: arch_thread_set_is_thread_set) lemma arch_thread_set_wp: - "\\s. get_tcb p s \ None \ Q (s\kheap := kheap s(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ + "\\s. get_tcb p s \ None \ Q (s\kheap := (kheap s)(p \ TCB (the (get_tcb p s)\tcb_arch := f (tcb_arch (the (get_tcb p s)))\))\) \ arch_thread_set f p \\_. Q\" apply (simp add: arch_thread_set_def) @@ -1231,7 +1231,7 @@ lemma a_type_VCPU [simp]: by (simp add: a_type_def) lemma set_vcpu_wp: - "\\s. vcpu_at p s \ Q (s\kheap := kheap s(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" + "\\s. vcpu_at p s \ Q (s\kheap := (kheap s)(p \ (ArchObj (VCPU vcpu))) \) \ set_vcpu p vcpu \\_. Q\" unfolding set_vcpu_def apply (wp set_object_wp_strong) apply (clarsimp simp: obj_at_def split: kernel_object.splits arch_kernel_obj.splits) @@ -2337,7 +2337,7 @@ lemma set_vcpu_if_live_then_nonz_cap_Some[wp]: (* FIXME: kind of ugly but hey! it works!! *) -lemma state_refs_of_simp: "\ a \ p \ \ state_refs_of (s\kheap := kheap s(p \ v) \) a = state_refs_of s a " +lemma state_refs_of_simp: "\ a \ p \ \ state_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_refs_of s a " by (simp add: state_refs_of_def) lemma state_refs_of_vcpu_simp: "typ_at (AArch AVCPU) p s \ state_refs_of s p = {}" @@ -2363,12 +2363,12 @@ lemma set_vcpu_sym_refs[wp]: apply (clarsimp simp: obj_at_def) done -lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) a = state_hyp_refs_of s a " +lemma state_hyp_refs_of_simp_neq: "\ a \ p \ \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) a = state_hyp_refs_of s a " by (simp add: state_hyp_refs_of_def) lemma state_hyp_refs_of_simp_eq: "obj_at (\ko'. hyp_refs_of ko' = hyp_refs_of v) p s - \ state_hyp_refs_of (s\kheap := kheap s(p \ v) \) p = state_hyp_refs_of s p" + \ state_hyp_refs_of (s\kheap := (kheap s)(p \ v) \) p = state_hyp_refs_of s p" by (clarsimp simp: state_hyp_refs_of_def obj_at_def) lemma set_object_vcpu_sym_refs_hyp: @@ -2722,7 +2722,7 @@ end locale vs_lookup_map_some_pdes = Arch + fixes pd pdp s s' S T pd' - defines "s' \ s\kheap := kheap s(pdp \ ArchObj (PageDirectory pd'))\" + defines "s' \ s\kheap := (kheap s)(pdp \ ArchObj (PageDirectory pd'))\" assumes refs: "vs_refs (ArchObj (PageDirectory pd')) = (vs_refs (ArchObj (PageDirectory pd)) - T) \ S" assumes old: "kheap s pdp = Some (ArchObj (PageDirectory pd))" @@ -2836,7 +2836,7 @@ lemma set_pd_vspace_objs_map: (* ARMHYP *) lemma simpler_set_pd_def: "set_pd p pd = (\s. if \pd. kheap s p = Some (ArchObj (PageDirectory pd)) - then ({((), s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\)}, + then ({((), s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\)}, False) else ({}, True))" apply (rule ext) @@ -2892,7 +2892,7 @@ lemma set_pd_valid_vs_lookup_map: (* ARMHYP *) apply (drule vs_lookup_pages_apI) apply (simp split: if_split_asm) apply (simp+)[2] - apply (frule_tac s="s\kheap := kheap s(p \ ArchObj (PageDirectory pd))\" + apply (frule_tac s="s\kheap := (kheap s)(p \ ArchObj (PageDirectory pd))\" in vs_lookup_pages_pdI[rotated -1]) apply (simp del: fun_upd_apply)+ apply (frule vs_lookup_pages_apI) @@ -3773,8 +3773,8 @@ lemma simpler_store_pde_def: "store_pde p pde s = (case kheap s (p && ~~ mask pd_bits) of Some (ArchObj (PageDirectory pd)) => - ({((), s\kheap := (kheap s((p && ~~ mask pd_bits) \ - (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pd_bits \ + (ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde)))))\)}, False) | _ => ({}, True))" by (auto simp: store_pde_def simpler_set_pd_def get_object_def simpler_gets_def assert_def return_def fail_def set_object_def get_def put_def bind_def get_pd_def vspace_bits_defs @@ -3784,7 +3784,7 @@ lemma pde_update_valid_vspace_objs: "[|valid_vspace_objs s; valid_pde pde s; pde_ref pde = None; kheap s (p && ~~ mask pd_bits) = Some (ArchObj (PageDirectory pd))|] ==> valid_vspace_objs - (s\kheap := kheap s(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))\)" + (s\kheap := (kheap s)(p && ~~ mask pd_bits \ ArchObj (PageDirectory (pd(ucast (p && mask pd_bits >> 3) := pde))))\)" apply (cut_tac pde=pde and p=p in store_pde_arch_objs_unmap) apply (clarsimp simp: valid_def) apply (erule allE[where x=s]) @@ -5596,8 +5596,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid pdp pd s' defines "(s' :: ('a::state_ext) state) \ - s\kheap := kheap s(ap \ ArchObj (ASIDPool - (pool(asid \ pdp))))\" + s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid \ pdp))))\" assumes ap: "kheap s ap = Some (ArchObj (ASIDPool pool))" assumes new: "pool asid = None" assumes pd: "kheap s pdp = Some (ArchObj (PageDirectory pd))" diff --git a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy index 13a1f68738..5a172f2603 100644 --- a/proof/invariant-abstract/ARM_HYP/Machine_AI.thy +++ b/proof/invariant-abstract/ARM_HYP/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -336,7 +336,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -670,7 +670,7 @@ lemma no_irq_clearMemory: "no_irq (clearMemory a b)" lemma getActiveIRQ_le_maxIRQ': "\\s. \irq > maxIRQ. irq_masks s irq\ getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -680,14 +680,13 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done diff --git a/proof/invariant-abstract/BCorres_AI.thy b/proof/invariant-abstract/BCorres_AI.thy index db94a4e6df..98b62b2881 100644 --- a/proof/invariant-abstract/BCorres_AI.thy +++ b/proof/invariant-abstract/BCorres_AI.thy @@ -58,7 +58,7 @@ lemma OR_choiceE_bcorres[wp]: done crunch_ignore (bcorres) - (add: NonDetMonad.bind gets modify get put do_extended_op empty_slot_ext mapM_x "when" + (add: Nondet_Monad.bind gets modify get put do_extended_op empty_slot_ext mapM_x "when" select unless mapM catch bindE liftE whenE alternative cap_swap_ext cap_insert_ext cap_move_ext liftM create_cap_ext lookup_error_on_failure getActiveIRQ maybeM diff --git a/proof/invariant-abstract/Bits_AI.thy b/proof/invariant-abstract/Bits_AI.thy index f95be2e021..2ed6e0d726 100644 --- a/proof/invariant-abstract/Bits_AI.thy +++ b/proof/invariant-abstract/Bits_AI.thy @@ -14,7 +14,7 @@ lemmas crunch_simps = split_def whenE_def unlessE_def Let_def if_fun_split assertE_def zipWithM_mapM zipWithM_x_mapM lemma in_set_object: - "(rv, s') \ fst (set_object ptr obj s) \ s' = s \ kheap := kheap s (ptr \ obj) \" + "(rv, s') \ fst (set_object ptr obj s) \ s' = s \ kheap := (kheap s) (ptr \ obj) \" by (clarsimp simp: set_object_def get_object_def in_monad) lemma cap_fault_injection: diff --git a/proof/invariant-abstract/CNodeInv_AI.thy b/proof/invariant-abstract/CNodeInv_AI.thy index 86852746fe..c2104e99d3 100644 --- a/proof/invariant-abstract/CNodeInv_AI.thy +++ b/proof/invariant-abstract/CNodeInv_AI.thy @@ -875,7 +875,7 @@ crunches update_time_stamp crunches preemption_point for not_recursive_cspaces[wp]: "\s. P (not_recursive_cspaces s)" and caps_of_state[wp]: "\s. P (caps_of_state s)" - (wp: OR_choiceE_weak_wp alternative_valid hoare_drop_imp simp: preemption_point_def) + (wp: OR_choiceE_weak_wp hoare_drop_imp simp: preemption_point_def) lemma rec_del_termination: "All (rec_del_dom :: rec_del_call \ 'state_ext state \ bool)" @@ -2293,16 +2293,16 @@ lemmas empty_slot_rvk_prog' = empty_slot_rvk_prog[unfolded o_def] crunch rvk_prog: cancel_ipc "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def tcb_cap_cases_def - wp: hoare_drop_imps empty_slot_rvk_prog' select_wp + wp: hoare_drop_imps empty_slot_rvk_prog' thread_set_caps_of_state_trivial) crunch rvk_prog: suspend "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def - wp: crunch_wps empty_slot_rvk_prog' select_wp maybeM_inv ignore: set_tcb_obj_ref) + wp: crunch_wps empty_slot_rvk_prog' maybeM_inv ignore: set_tcb_obj_ref) crunch rvk_prog: deleting_irq_handler "\s. revoke_progress_ord m (\x. option_map cap_to_rpo (caps_of_state s x))" (simp: crunch_simps o_def unless_def is_final_cap_def - wp: crunch_wps empty_slot_rvk_prog' select_wp) + wp: crunch_wps empty_slot_rvk_prog') locale CNodeInv_AI_3 = CNodeInv_AI_2 state_ext_t for state_ext_t :: "'state_ext::state_ext itself" + @@ -2539,7 +2539,7 @@ proof (induct rule: cap_revoke_induct) show ?case apply (subst cap_revoke_simps) apply (wp "1.hyps") - apply (wp x p hoare_drop_imps select_wp)+ + apply (wp x p hoare_drop_imps)+ apply simp_all done qed @@ -2569,7 +2569,7 @@ proof (induct rule: cap_revoke_induct) show ?case apply (subst cap_revoke_simps) apply (wp "1.hyps") - apply (wp x p hoare_drop_imps select_wp)+ + apply (wp x p hoare_drop_imps)+ apply (simp_all add: y) done qed diff --git a/proof/invariant-abstract/CSpaceInv_AI.thy b/proof/invariant-abstract/CSpaceInv_AI.thy index ac994cd574..bb35d173d4 100644 --- a/proof/invariant-abstract/CSpaceInv_AI.thy +++ b/proof/invariant-abstract/CSpaceInv_AI.thy @@ -170,12 +170,12 @@ crunch inv [wp]: lookup_cap P lemma cte_at_tcb_update: - "tcb_at t s \ cte_at slot (s\kheap := kheap s(t \ TCB tcb)\) = cte_at slot s" + "tcb_at t s \ cte_at slot (s\kheap := (kheap s)(t \ TCB tcb)\) = cte_at slot s" by (clarsimp simp add: cte_at_cases obj_at_def is_tcb) lemma valid_cap_tcb_update [simp]: - "tcb_at t s \ (s\kheap := kheap s(t \ TCB tcb)\) \ cap = s \ cap" + "tcb_at t s \ (s\kheap := (kheap s)(t \ TCB tcb)\) \ cap = s \ cap" apply (clarsimp simp: is_tcb elim!: obj_atE) apply (subgoal_tac "a_type (TCB tcba) = a_type (TCB tcb)") apply (rule iffI) @@ -189,7 +189,7 @@ lemma valid_cap_tcb_update [simp]: lemma obj_at_tcb_update: "\ tcb_at t s; \x y. P (TCB x) = P (TCB y)\ \ - obj_at P t' (s\kheap := kheap s(t \ TCB tcb)\) = obj_at P t' s" + obj_at P t' (s\kheap := (kheap s)(t \ TCB tcb)\) = obj_at P t' s" apply (simp add: obj_at_def is_tcb_def) apply clarsimp apply (case_tac ko) @@ -199,7 +199,7 @@ lemma obj_at_tcb_update: lemma valid_thread_state_tcb_update: "\ tcb_at t s \ \ - valid_tcb_state ts (s\kheap := kheap s(t \ TCB tcb)\) = valid_tcb_state ts s" + valid_tcb_state ts (s\kheap := (kheap s)(t \ TCB tcb)\) = valid_tcb_state ts s" apply (unfold valid_tcb_state_def) apply (case_tac ts) apply (auto simp: obj_at_tcb_update is_ep_def is_tcb_def is_ntfn_def is_reply_def @@ -209,7 +209,7 @@ lemma valid_thread_state_tcb_update: lemma valid_objs_tcb_update: "\tcb_at t s; valid_tcb t tcb s; valid_objs s \ - \ valid_objs (s\kheap := kheap s(t \ TCB tcb)\)" + \ valid_objs (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (clarsimp simp: valid_objs_def dom_def elim!: obj_atE) apply (intro conjI impI) @@ -227,7 +227,7 @@ lemma valid_objs_tcb_update: lemma iflive_tcb_update: "\ if_live_then_nonz_cap s; live (TCB tcb) \ ex_nonz_cap_to t s; obj_at (same_caps (TCB tcb)) t s \ - \ if_live_then_nonz_cap (s\kheap := kheap s(t \ TCB tcb)\)" + \ if_live_then_nonz_cap (s\kheap := (kheap s)(t \ TCB tcb)\)" unfolding fun_upd_def apply (simp add: if_live_then_nonz_cap_def, erule allEI) apply safe @@ -238,7 +238,7 @@ lemma iflive_tcb_update: lemma ifunsafe_tcb_update: "\ if_unsafe_then_cap s; obj_at (same_caps (TCB tcb)) t s \ - \ if_unsafe_then_cap (s\kheap := kheap s(t \ TCB tcb)\)" + \ if_unsafe_then_cap (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (simp add: if_unsafe_then_cap_def, elim allEI) apply (clarsimp dest!: caps_of_state_cteD simp: cte_wp_at_after_update fun_upd_def) @@ -249,7 +249,7 @@ lemma ifunsafe_tcb_update: lemma zombies_tcb_update: "\ zombies_final s; obj_at (same_caps (TCB tcb)) t s \ - \ zombies_final (s\kheap := kheap s(t \ TCB tcb)\)" + \ zombies_final (s\kheap := (kheap s)(t \ TCB tcb)\)" apply (simp add: zombies_final_def is_final_cap'_def2, elim allEI) apply (clarsimp simp: cte_wp_at_after_update fun_upd_def) done @@ -263,13 +263,13 @@ lemma valid_idle_tcb_update: tcb_yield_to t = tcb_yield_to t'; tcb_iarch t = tcb_iarch t'; valid_tcb p t' s \ - \ valid_idle (s\kheap := kheap s(p \ TCB t')\)" + \ valid_idle (s\kheap := (kheap s)(p \ TCB t')\)" by (fastforce simp: valid_idle_def pred_tcb_at_def obj_at_def) lemma tcb_state_same_cte_wp_at: "\ ko_at (TCB t) p s; \(getF, v) \ ran tcb_cap_cases. getF t = getF t' \ - \ \P p'. cte_wp_at P p' (s\kheap := kheap s(p \ TCB t')\) + \ \P p'. cte_wp_at P p' (s\kheap := (kheap s)(p \ TCB t')\) = cte_wp_at P p' s" apply (clarsimp simp add: cte_wp_at_cases obj_at_def) apply (case_tac "tcb_cap_cases b") @@ -1443,7 +1443,7 @@ lemma thread_set_mdb: done lemma set_cap_caps_of_state2: - "\\s. P (caps_of_state s (p \ cap)) (cdt s) (is_original_cap s)\ + "\\s. P ((caps_of_state s)(p \ cap)) (cdt s) (is_original_cap s)\ set_cap cap p \\rv s. P (caps_of_state s) (cdt s) (is_original_cap s)\" apply (rule_tac Q="\rv s. \m mr. P (caps_of_state s) m mr @@ -2051,7 +2051,7 @@ lemma cap_insert_obj_at_other: lemma only_idle_tcb_update: "\only_idle s; ko_at (TCB t) p s; tcb_state t = tcb_state t' \ \idle (tcb_state t') \ - \ only_idle (s\kheap := kheap s(p \ TCB t')\)" + \ only_idle (s\kheap := (kheap s)(p \ TCB t')\)" by (clarsimp simp: only_idle_def pred_tcb_at_def obj_at_def) lemma as_user_only_idle : diff --git a/proof/invariant-abstract/CSpace_AI.thy b/proof/invariant-abstract/CSpace_AI.thy index a3dce4a2dc..b70b754f8e 100644 --- a/proof/invariant-abstract/CSpace_AI.thy +++ b/proof/invariant-abstract/CSpace_AI.thy @@ -233,7 +233,7 @@ lemma preemption_point_inv: apply (rule hoare_seq_ext_skipE, wpsimp) apply (rule valid_validE) apply (rule OR_choiceE_weak_wp) - apply (rule alternative_valid; (solves wpsimp)?) + apply (rule alternative_wp[where P=P and P'=P for P, simplified]; (solves wpsimp)?) apply (rule validE_valid) apply (rule hoare_seq_ext_skipE, solves \wpsimp wp: update_time_stamp_wp\)+ apply wpsimp @@ -687,9 +687,9 @@ lemma no_True_set_nth: done lemma set_cap_caps_of_state_monad: - "(v, s') \ fst (set_cap cap p s) \ caps_of_state s' = (caps_of_state s (p \ cap))" + "(v, s') \ fst (set_cap cap p s) \ caps_of_state s' = (caps_of_state s)(p \ cap)" apply (drule use_valid) - apply (rule set_cap_caps_of_state [where P="(=) (caps_of_state s (p\cap))"]) + apply (rule set_cap_caps_of_state [where P="(=) ((caps_of_state s)(p\cap))"]) apply (rule refl) apply simp done @@ -1920,15 +1920,15 @@ lemma set_free_index_valid_mdb: proof(intro conjI impI) fix s bits f r dev assume mdb:"untyped_mdb (cdt s) (caps_of_state s)" - assume cstate:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") - show "untyped_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + assume cstate:"caps_of_state s cref = Some (UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") + show "untyped_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule untyped_mdb_update_free_index [where capa = ?srccap and m = "caps_of_state s" and src = cref, unfolded free_index_update_def,simplified,THEN iffD2]) apply (simp add:cstate mdb)+ done assume arch_mdb:"valid_arch_mdb (is_original_cap s) (caps_of_state s)" - show "valid_arch_mdb (is_original_cap s) (caps_of_state s(cref \ UntypedCap dev r bits idx))" + show "valid_arch_mdb (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule valid_arch_mdb_updates(1)[where capa = ?srccap and m="caps_of_state s" and src=cref, unfolded free_index_update_def, simplified, THEN iffD2]) @@ -1958,7 +1958,7 @@ lemma set_free_index_valid_mdb: done note blah[simp del] = untyped_range.simps usable_untyped_range.simps - show "untyped_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "untyped_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using inc cstate apply (unfold untyped_inc_def) apply (intro allI impI) @@ -1994,13 +1994,13 @@ lemma set_free_index_valid_mdb: apply clarsimp+ done assume "ut_revocable (is_original_cap s) (caps_of_state s)" - thus "ut_revocable (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "ut_revocable (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce simp:ut_revocable_def) assume mdb:"mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" and desc_inc:"descendants_inc (cdt s) (caps_of_state s)" and cte:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" - show "descendants_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "descendants_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using mdb cte apply (clarsimp simp:swp_def cte_wp_at_caps_of_state) apply (erule descendants_inc_minor[OF desc_inc]) @@ -2095,10 +2095,10 @@ lemma cap_insert_mdb [wp]: apply (rule conjI) apply (simp add: no_mloop_def mdb_insert_abs.parency) apply (intro allI impI conjI) - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in iffD2[OF untyped_mdb_update_free_index,rotated,rotated]) apply (simp add:fun_upd_twist)+ - apply (drule_tac cs' = "caps_of_state s(src \ max_free_index_update capa)" in descendants_inc_minor) + apply (drule_tac cs' = "(caps_of_state s)(src \ max_free_index_update capa)" in descendants_inc_minor) apply (clarsimp simp:cte_wp_at_caps_of_state swp_def) apply clarsimp apply (subst upd_commute) @@ -2149,11 +2149,11 @@ lemma cap_insert_mdb [wp]: apply clarsimp apply (erule (1) valid_arch_mdb_updates, clarsimp) apply (intro impI conjI allI) - apply (rule_tac m1 = "caps_of_state s(dest\ cap)" and src1 = src + apply (rule_tac m1 = "(caps_of_state s)(dest\ cap)" and src1 = src in iffD2[OF untyped_mdb_update_free_index, rotated, rotated]) apply (frule mdb_insert_abs_sib.untyped_mdb_sib) apply (simp add: fun_upd_twist)+ - apply (drule_tac cs' = "caps_of_state s(src \ max_free_index_update capa)" + apply (drule_tac cs' = "(caps_of_state s)(src \ max_free_index_update capa)" in descendants_inc_minor) apply (clarsimp simp: cte_wp_at_caps_of_state swp_def) apply clarsimp @@ -2165,7 +2165,7 @@ lemma cap_insert_mdb [wp]: apply (simp add: no_mloop_def) apply (simp add: mdb_insert_abs_sib.parent_n_eq) apply (simp add: mdb_insert_abs.dest_no_parent_trancl) - apply (rule_tac m = "caps_of_state s(dest\ cap)" and src = src + apply (rule_tac m = "(caps_of_state s)(dest\ cap)" and src = src in untyped_inc_update_free_index) apply (simp add: fun_upd_twist)+ apply (frule(3) mdb_insert_abs_sib.untyped_inc) diff --git a/proof/invariant-abstract/DetSchedDomainTime_AI.thy b/proof/invariant-abstract/DetSchedDomainTime_AI.thy index 34f046d77c..eb544f414b 100644 --- a/proof/invariant-abstract/DetSchedDomainTime_AI.thy +++ b/proof/invariant-abstract/DetSchedDomainTime_AI.thy @@ -253,7 +253,7 @@ crunch domain_list[wp]: maybe_donate_sc "\s :: det_state. P (domain_list (wp: crunch_wps) crunch domain_list_inv[wp]: send_signal "\s::det_state. P (domain_list s)" - (wp: hoare_drop_imps mapM_x_wp_inv select_wp maybeM_inv simp: crunch_simps unless_def) + (wp: hoare_drop_imps mapM_x_wp_inv maybeM_inv simp: crunch_simps unless_def) crunch domain_list_inv[wp]: lookup_reply,lookup_cap "\s::det_state. P (domain_list s)" diff --git a/proof/invariant-abstract/DetSchedInvs_AI.thy b/proof/invariant-abstract/DetSchedInvs_AI.thy index 3285ee7c2e..1f1ec7fd65 100644 --- a/proof/invariant-abstract/DetSchedInvs_AI.thy +++ b/proof/invariant-abstract/DetSchedInvs_AI.thy @@ -3292,17 +3292,17 @@ lemmas tcb_ready_times_of_kh_update_indep'[simp] lemma tcb_ready_time_ep_update: "\ ep_at ref s; a_type new = AEndpoint\ \ - tcb_ready_times_of_kh (kheap s(ref \ new)) = tcb_ready_times_of s" + tcb_ready_times_of_kh ((kheap s)(ref \ new)) = tcb_ready_times_of s" by (clarsimp simp: obj_at_def is_ep) lemma tcb_ready_time_reply_update: "\ reply_at ref s; a_type new = AReply\ \ - tcb_ready_times_of_kh (kheap s(ref \ new)) = tcb_ready_times_of s" + tcb_ready_times_of_kh ((kheap s)(ref \ new)) = tcb_ready_times_of s" by (clarsimp simp: obj_at_def is_reply) lemma tcb_ready_time_ntfn_update: "\ ntfn_at ref s; a_type new = ANTFN\ \ - tcb_ready_times_of_kh (kheap s(ref \ new)) = tcb_ready_times_of s" + tcb_ready_times_of_kh ((kheap s)(ref \ new)) = tcb_ready_times_of s" by (clarsimp simp: obj_at_def is_ntfn) lemmas tcb_ready_time_update_indeps[simp] @@ -3314,7 +3314,7 @@ lemmas tcb_ready_time_update_indeps'[simp] lemma tcb_ready_time_thread_state_update[simp]: assumes "kheap s tp = Some (TCB tcb)" assumes "tcb_sched_context tcb' = tcb_sched_context tcb" - shows "tcb_ready_times_of_kh (kheap s(tp \ TCB tcb')) = tcb_ready_times_of s" + shows "tcb_ready_times_of_kh ((kheap s)(tp \ TCB tcb')) = tcb_ready_times_of s" using assms by (simp add: fun_upd_def vs_all_heap_simps) lemmas tcb_ready_time_thread_state_update'[simp] @@ -3326,7 +3326,7 @@ lemma tcb_ready_time_kh_tcb_sc_update: scopt = Some scp'; kheap s scp' = Some (SchedContext sc' n'); r_time (refill_hd sc) = r_time (refill_hd sc') \ \ tcb_ready_times_of_kh - (kheap s(tp \ TCB (tcb\tcb_sched_context := scopt\))) + ((kheap s)(tp \ TCB (tcb\tcb_sched_context := scopt\))) = tcb_ready_times_of s" by (auto intro!: map_eqI simp: fun_upd_def vs_all_heap_simps tcb_ready_times_defs @@ -3335,12 +3335,12 @@ lemma tcb_ready_time_kh_tcb_sc_update: lemma tcb_at_simple_type_update[iff]: "\obj_at is_simple_type epptr s; is_simple_type ko\ \ - tcbs_of_kh (kheap s(epptr \ ko)) = tcbs_of s" + tcbs_of_kh ((kheap s)(epptr \ ko)) = tcbs_of s" by (rule map_eqI, auto simp add: vs_heap_simps obj_at_def) lemma sc_at_simple_type_update[iff]: "\obj_at is_simple_type epptr s; is_simple_type ko\ \ - scs_of_kh (kheap s(epptr \ ko)) = scs_of s" + scs_of_kh ((kheap s)(epptr \ ko)) = scs_of s" by (rule map_eqI, auto simp add: vs_heap_simps obj_at_def) (* lifting lemmas *) @@ -3507,7 +3507,7 @@ lemma switch_in_cur_domain_lift_pre_conj: apply wp_pre apply (rule hoare_lift_Pf_pre_conj[where f=scheduler_action, OF _ b]) apply (rule hoare_lift_Pf_pre_conj[where f=cur_domain, OF _ c]) - by (wpsimp simp: switch_in_cur_domain_def in_cur_domain_def wp: hoare_vcg_all_lift static_imp_wp a)+ + by (wpsimp simp: switch_in_cur_domain_def in_cur_domain_def wp: hoare_vcg_all_lift hoare_weak_lift_imp a)+ lemmas switch_in_cur_domain_lift = switch_in_cur_domain_lift_pre_conj[where R = \, simplified] diff --git a/proof/invariant-abstract/DetSchedSchedule_AI.thy b/proof/invariant-abstract/DetSchedSchedule_AI.thy index 191f706a1c..a8df5fc0b0 100644 --- a/proof/invariant-abstract/DetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/DetSchedSchedule_AI.thy @@ -1005,7 +1005,7 @@ lemma set_thread_state_only_wp: lemma set_thread_state_only_tcb_st_heap: "\\s. pred_map \ (tcbs_of s) t \ - P (tcb_sts_of s(t \ st)) + P ((tcb_sts_of s)(t \ st)) (tcb_scps_of s) (tcb_faults_of s) (scs_of s) (cur_thread s) (scheduler_action s) (release_queue s)\ set_thread_state_only t st @@ -1015,7 +1015,7 @@ lemma set_thread_state_only_tcb_st_heap: lemma set_thread_state_scheduler_action_tcb_st_heap: "\\s. pred_map \ (tcbs_of s) t \ - P (tcb_sts_of s(t \ st)) + P ((tcb_sts_of s)(t \ st)) (if t = cur_thread s \ scheduler_action s = resume_cur_thread \ (runnable st \ active_sc_tcb_at t s \ in_release_q t s) @@ -1037,7 +1037,7 @@ lemma set_thread_state_valid_sched_pred_strong': \ (runnable st \ active_sc_tcb_at t s \ in_release_q t s) then choose_new_thread else scheduler_action s) (etcbs_of s) - (tcb_sts_of s(t \ st)) + ((tcb_sts_of s)(t \ st)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s) (sc_replies_of s)\ set_thread_state t st \\rv. valid_sched_pred_strong P\" @@ -1458,7 +1458,7 @@ lemma set_tcb_sched_context_valid_sched_pred': \ P (consumed_time s) (cur_sc s) (ep_send_qs_of s) (ep_recv_qs_of s) (sc_tcbs_of s) (last_machine_time_of s) (time_state_of s) (cur_time s) (cur_domain s) (cur_thread s) (idle_thread s) (ready_queues s) (release_queue s) (scheduler_action s) (etcbs_of s) (tcb_sts_of s) - (tcb_scps_of s(ref \ scpo)) (tcb_faults_of s) (sc_refill_cfgs_of s) (sc_replies_of s)\ + ((tcb_scps_of s)(ref \ scpo)) (tcb_faults_of s) (sc_refill_cfgs_of s) (sc_replies_of s)\ set_tcb_obj_ref tcb_sched_context_update ref scpo \\rv. valid_sched_pred_strong P\" by (wpsimp wp: set_tcb_obj_ref_wp simp: fun_upd_def obj_at_kh_kheap_simps vs_all_heap_simps) @@ -1763,7 +1763,7 @@ lemma sc_heap_proj_known_sc: by (rule arg_cong[where f=P] arg_cong[where f=Q] arg_cong[where f=R], simp add: vs_all_heap_simps assms)+ lemma update_sched_context_sc_heap: - "\\s. \sc. scs_of s scp = Some sc \ P (scs_of s(scp \ f sc))\ + "\\s. \sc. scs_of s scp = Some sc \ P ((scs_of s)(scp \ f sc))\ update_sched_context scp f \\rv s. P (scs_of s)\" by (wpsimp wp: update_sched_context_wp @@ -3501,7 +3501,7 @@ lemma reply_unlink_tcb_valid_sched_pred[valid_sched_wp]: P (consumed_time s) (cur_sc s) (ep_send_qs_of s) (ep_recv_qs_of s) (sc_tcbs_of s) (last_machine_time_of s) (time_state_of s) (cur_time s) (cur_domain s) (cur_thread s) (idle_thread s) (ready_queues s) (release_queue s) (if t = cur_thread s \ scheduler_action s = resume_cur_thread then choose_new_thread else scheduler_action s) - (etcbs_of s) (tcb_sts_of s(t \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) + (etcbs_of s) ((tcb_sts_of s)(t \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s) (sc_replies_of s)\ reply_unlink_tcb t r \\_. valid_sched_pred_strong P\" @@ -4693,7 +4693,7 @@ crunches cancel_all_ipc, cancel_all_signals lemma sc_at_tcb_update[iff]: "tcb_at epptr s \ - scs_of_kh (kheap s(epptr \ TCB ko)) = scs_of s" + scs_of_kh ((kheap s)(epptr \ TCB ko)) = scs_of s" by (rule map_eqI, auto simp add: vs_heap_simps obj_at_def is_tcb) lemma thread_set_valid_sched_misc[wp]: @@ -4944,7 +4944,7 @@ lemma set_thread_state_possible_switch_to_valid_sched_strong: and ct_in_cur_domain and valid_blocked_except t and valid_idle_etcb - and (\s. released_ipc_queues_2 (cur_time s) (tcb_sts_of s(t \ st)) (tcb_scps_of s) (tcb_faults_of s) + and (\s. released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(t \ st)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s)) and active_reply_scs and active_sc_valid_refills @@ -5367,7 +5367,7 @@ lemma released_ipc_queues_except_strengthen: lemma reply_remove_sched_context_donate_released_ipc_queues: "\released_ipc_queues\ sched_context_donate scp t - \\rv s. released_ipc_queues_2 (cur_time s) (tcb_sts_of s(t \ Inactive)) + \\rv s. released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(t \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s)\" unfolding sched_context_donate_def apply (wpsimp wp: set_tcb_sched_context_valid_sched_pred get_sc_obj_ref_wp) @@ -5647,7 +5647,7 @@ lemma valid_sched_not_schedulable_sc_not_queued: split: option.splits) lemma reply_unlink_tcb_released_ipc_queues: - "\\s. released_ipc_queues_2 (cur_time s) (tcb_sts_of s(t \ Inactive)) + "\\s. released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(t \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s)\ reply_unlink_tcb t rptr \\_. released_ipc_queues\" @@ -5711,7 +5711,7 @@ locale set_thread_state_Inactive_valid_sched_pred_equiv = P (consumed_time s) (cur_sc s) (sc_tcbs_of s) (cur_time s) (cur_domain s) (cur_thread s) (idle_thread s) (ready_queues s) (release_queue s) (if tptr = cur_thread s \ scheduler_action s = resume_cur_thread then choose_new_thread else scheduler_action s) - (etcbs_of s) (tcb_sts_of s(tptr \ Inactive)) (tcb_scps_of s) + (etcbs_of s) ((tcb_sts_of s)(tptr \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s)\ f tptr \\rv s. P (consumed_time s) (cur_sc s) (sc_tcbs_of s) @@ -5794,7 +5794,7 @@ lemma valid_sched: \ ct_in_cur_domain s \ valid_blocked_except tptr s \ valid_idle_etcb s - \ released_ipc_queues_2 (cur_time s) (tcb_sts_of s(tptr \ Inactive)) (tcb_scps_of s) + \ released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(tptr \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s) \ active_reply_scs s \ active_sc_valid_refills s @@ -5812,7 +5812,7 @@ lemma valid_sched_except_blocked: \ valid_sched_action s \ ct_in_cur_domain s \ valid_idle_etcb s - \ released_ipc_queues_2 (cur_time s) (tcb_sts_of s(tptr \ Inactive)) (tcb_scps_of s) + \ released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(tptr \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s) \ active_reply_scs s \ active_sc_valid_refills s @@ -5861,7 +5861,7 @@ global_interpretation cancel_signal: set_thread_state_Inactive_valid_sched_pred_ by (wpsimp wp: set_thread_state_Inactive.valid_sched_pred get_simple_ko_wp simp: fun_upd_def)+ crunch st_tcb_at_not_runnable[wp]: reply_remove_tcb "st_tcb_at (\st. \runnable st) t" - (wp: crunch_wps select_wp sts_st_tcb_at_cases thread_set_no_change_tcb_state maybeM_inv + (wp: crunch_wps sts_st_tcb_at_cases thread_set_no_change_tcb_state maybeM_inv simp: crunch_simps unless_def wp_del: reply_remove_st_tcb_at) lemma reply_remove_tcb_not_runnable[wp]: @@ -5987,7 +5987,7 @@ lemma cancel_ipc_valid_sched[wp]: \ ct_in_cur_domain s \ valid_blocked s \ valid_idle_etcb s - \ released_ipc_queues_2 (cur_time s) (tcb_sts_of s(tptr \ Inactive)) (tcb_scps_of s) + \ released_ipc_queues_2 (cur_time s) ((tcb_sts_of s)(tptr \ Inactive)) (tcb_scps_of s) (tcb_faults_of s) (sc_refill_cfgs_of s) \ active_reply_scs s \ active_sc_valid_refills s" in hoare_seq_ext) @@ -7720,7 +7720,7 @@ method invoke_tcb_install_tcb_cap_helper uses wp = (clarsimp cong: conj_cong)?, (rule hoare_vcg_E_elim, wp)?, ((wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R hoare_vcg_all_lift - install_tcb_cap_invs static_imp_wp static_imp_conj_wp wp + install_tcb_cap_invs hoare_weak_lift_imp hoare_weak_lift_imp_conj wp | strengthen tcb_cap_always_valid_strg | wp install_tcb_cap_cte_wp_at_ep)+)[1]) @@ -8123,7 +8123,7 @@ lemma valid_refills_release_queue_update[iff]: by simp lemma valid_refills_kheap_tcb_update[iff]: - "tcb_at t s \ valid_refills p (s\kheap := kheap s(t \ TCB tcb)\) = valid_refills p s" + "tcb_at t s \ valid_refills p (s\kheap := (kheap s)(t \ TCB tcb)\) = valid_refills p s" by (clarsimp simp: vs_all_heap_simps obj_at_kh_kheap_simps is_tcb) lemma valid_refills_exst [iff]: @@ -10621,7 +10621,7 @@ lemma refill_budget_check_valid_refills[wp]: done lemma valid_refills_sc_update: - "valid_refills p (s\kheap := kheap s(p \ SchedContext sc n)\) = sc_valid_refills sc" + "valid_refills p (s\kheap := (kheap s)(p \ SchedContext sc n)\) = sc_valid_refills sc" by (clarsimp simp: obj_at_def vs_all_heap_simps) lemma update_sc_consumed_valid_refills[wp]: @@ -10633,8 +10633,8 @@ lemma update_sc_consumed_valid_refills[wp]: wp: set_object_wp get_object_wp ) lemma valid_refills_sc_consumed_update[iff]: - "valid_refills p (s\kheap := kheap s(p' \ SchedContext (sc\sc_consumed:=x\) n)\) - = valid_refills p (s\kheap := kheap s(p' \ SchedContext sc n)\)" + "valid_refills p (s\kheap := (kheap s)(p' \ SchedContext (sc\sc_consumed:=x\) n)\) + = valid_refills p (s\kheap := (kheap s)(p' \ SchedContext sc n)\)" by (clarsimp simp: obj_at_def vs_all_heap_simps window_def) lemma valid_refills_domain_time_update[simp]: @@ -11425,7 +11425,7 @@ crunches head_insufficient_loop, handle_overrun_loop lemma sorted_release_q_sc_not_in_sc_update: "\sc_not_in_release_q scp s; \t\ set (release_queue s). tcb_at t s; kheap s scp = Some (SchedContext sc' n)\ \ - sorted_release_q (s\ kheap := (kheap s(scp \ SchedContext sc n)) \) = sorted_release_q s" + sorted_release_q (s\ kheap := ((kheap s)(scp \ SchedContext sc n)) \) = sorted_release_q s" apply (clarsimp simp: sorted_release_q_def not_in_release_q_def obj_at_def is_tcb) apply (rule sorted_wrt_img_ord_eq_lift; simp?) apply (rename_tac tp; drule_tac x=tp in bspec, simp) @@ -11436,7 +11436,7 @@ lemma sorted_release_q_sc_not_in_sc_update: lemma valid_release_q_sc_not_in_sc_update: "\valid_release_q (s::('a::state_ext state)); sc_not_in_release_q scp s; kheap s scp = Some (SchedContext sc' n)\ \ - valid_release_q (s\ kheap := (kheap s(scp \ SchedContext sc n)) \) " + valid_release_q (s\ kheap := ((kheap s)(scp \ SchedContext sc n)) \) " apply (clarsimp simp: valid_release_q_def sorted_release_q_sc_not_in_sc_update) apply (rule conjI) apply (fastforce simp: not_in_release_q_def vs_all_heap_simps) @@ -12628,7 +12628,7 @@ lemma schedule_valid_sched: crunches cancel_ipc for not_cur_thread[wp]: "not_cur_thread thread" - (wp: hoare_drop_imps select_wp mapM_x_wp simp: unless_def if_fun_split) + (wp: hoare_drop_imps mapM_x_wp simp: unless_def if_fun_split) lemma cancel_ipc_sc_tcb_sc_at_eq[wp]: "cancel_ipc thread \sc_tcb_sc_at ((=) tcb_opt) x\" @@ -13521,7 +13521,7 @@ lemma refill_unblock_check_released_if_bound[wp]: by (wpsimp wp: hoare_vcg_disj_lift) lemma set_simple_ko_pred_tcb_at_state: - "\ \s. P (pred_tcb_at proj (f s) t s) \ (\new. f s = f (s\kheap := kheap s(ep \ new)\))\ + "\ \s. P (pred_tcb_at proj (f s) t s) \ (\new. f s = f (s\kheap := (kheap s)(ep \ new)\))\ set_simple_ko g ep v \ \_ s. P (pred_tcb_at proj (f s) t s) \" unfolding set_simple_ko_def @@ -15092,7 +15092,7 @@ lemma valid_ep_remove1_RecvEP: done lemma valid_objs_ep_update: - "\ep_at epptr s; valid_ep ep s; valid_objs s\ \ valid_objs (s\kheap := kheap s(epptr \ Endpoint ep)\)" + "\ep_at epptr s; valid_ep ep s; valid_objs s\ \ valid_objs (s\kheap := (kheap s)(epptr \ Endpoint ep)\)" apply (clarsimp simp: valid_objs_def dom_def elim!: obj_atE) apply (intro conjI impI) @@ -20538,7 +20538,7 @@ crunches activate_thread, schedule_choose_new_thread, awaken lemma schedule_valid_list[wp]: "\valid_list\ Schedule_A.schedule \\_. valid_list\" apply (simp add: Schedule_A.schedule_def) - apply (wp add: tcb_sched_action_valid_list alternative_wp select_wp gts_wp hoare_drop_imps + apply (wp add: tcb_sched_action_valid_list gts_wp hoare_drop_imps is_schedulable_wp hoare_vcg_all_lift | wpc | simp)+ done @@ -21776,7 +21776,7 @@ lemma preemption_point_cur_sc_offset_ready[wp]: apply (rule hoare_seq_ext_skipE, wpsimp) apply (rule valid_validE) apply (rule OR_choiceE_weak_wp) - apply (rule alternative_valid; (solves wpsimp)?) + apply (rule alternative_wp[where P=P and P'=P for P, simplified]; (solves wpsimp)?) apply (rule validE_valid) apply (rule hoare_seq_ext_skipE, wpsimp)+ apply wpsimp @@ -24755,7 +24755,7 @@ lemma reply_push_cur_sc_in_release_q_imp_zero_consumed[wp]: lemma sorted_release_q_not_in_release_q_update: "\tcb_at t s; not_in_release_q t s; valid_release_q s\ \ - sorted_release_q (s\ kheap := (kheap s(t \ TCB tcb)) \) = sorted_release_q s" + sorted_release_q (s\ kheap := ((kheap s)(t \ TCB tcb)) \) = sorted_release_q s" apply (unfold not_in_release_q_def sorted_release_q_2_def) apply (prop_tac "release_queue (s\kheap := (\x. if x = t then Some (TCB tcb) else kheap s x)\) = release_queue s") apply simp @@ -24770,7 +24770,7 @@ lemma sorted_release_q_not_in_release_q_update: lemma valid_release_q_not_in_release_q_update: "\valid_release_q s; not_in_release_q t s; tcb_at t s\ \ - valid_release_q (s\ kheap := (kheap s(t \ TCB tcb)) \) " + valid_release_q (s\ kheap := ((kheap s)(t \ TCB tcb)) \) " apply (frule (2) sorted_release_q_not_in_release_q_update[where tcb=tcb]) apply (clarsimp simp add: valid_release_q_def) by (fastforce simp: not_in_release_q_def vs_all_heap_simps) @@ -26038,7 +26038,7 @@ lemma sched_context_donate_heap_refs_inv[wp]: apply (rule valid_sched_wp) apply (rule valid_sched_wp) apply (rule_tac Q="\_ s. heap_refs_inv (heap_upd (\a. Some tptr) scp (sc_tcbs_of s)) - (tcb_scps_of s(tptr \ Some scp))" in hoare_post_imp) + ((tcb_scps_of s)(tptr \ Some scp))" in hoare_post_imp) apply (clarsimp simp: fun_upd_def) apply (rule valid_sched_wp) apply (clarsimp simp: sc_at_kh_simps tcb_at_kh_simps pred_map_eq_normalise) diff --git a/proof/invariant-abstract/Deterministic_AI.thy b/proof/invariant-abstract/Deterministic_AI.thy index 5b0d0833e5..90fc76a4a2 100644 --- a/proof/invariant-abstract/Deterministic_AI.thy +++ b/proof/invariant-abstract/Deterministic_AI.thy @@ -1474,7 +1474,7 @@ end crunch exst[wp]: set_cap "(\s. P (exst s))" (wp: crunch_wps simp: crunch_simps) lemma set_cap_caps_of_state3: - "\\s. P (caps_of_state s (p \ cap)) (cdt s) (exst s) (is_original_cap s)\ + "\\s. P ((caps_of_state s) (p \ cap)) (cdt s) (exst s) (is_original_cap s)\ set_cap cap p \\rv s. P (caps_of_state s) (cdt s) (exst s) (is_original_cap s)\" apply (rule_tac Q="\rv s. \m mr t. P (caps_of_state s) m t mr @@ -3119,7 +3119,7 @@ lemma empty_slot_valid_list[wp]: apply (simp add: empty_slot_def) apply (simp add: set_cdt_def update_cdt_list_def set_cdt_list_def empty_slot_ext_def bind_assoc cong: if_cong) - apply (wp get_cap_wp static_imp_wp | wpc | wp (once) hoare_vcg_all_lift)+ + apply (wp get_cap_wp hoare_weak_lift_imp | wpc | wp (once) hoare_vcg_all_lift)+ apply (clarsimp simp del: fun_upd_apply) apply (frule mdb_empty_abs_simple.intro) apply(case_tac "cdt s sl") @@ -3975,7 +3975,7 @@ lemma preemption_point_inv': \ \P\ preemption_point \\_. P\" apply (clarsimp simp: preemption_point_def) apply (wpsimp simp: reset_work_units_def update_work_units_def - wp: OR_choiceE_weak_wp alternative_valid update_time_stamp_wp hoare_drop_imps + wp: OR_choiceE_weak_wp update_time_stamp_wp hoare_drop_imps hoare_vcg_all_lift) done diff --git a/proof/invariant-abstract/EmptyFail_AI.thy b/proof/invariant-abstract/EmptyFail_AI.thy index 640e34875d..d1a97a500c 100644 --- a/proof/invariant-abstract/EmptyFail_AI.thy +++ b/proof/invariant-abstract/EmptyFail_AI.thy @@ -58,7 +58,7 @@ lemma without_preemption_empty_fail[wp]: by simp crunch_ignore (empty_fail) - (add: NonDetMonad.bind bindE lift liftE liftM "when" whenE unless unlessE return fail + (add: Nondet_Monad.bind bindE lift liftE liftM "when" whenE unless unlessE return fail assert_opt mapM mapM_x sequence_x catch handleE do_extended_op returnOk throwError cap_insert_ext empty_slot_ext create_cap_ext cap_swap_ext cap_move_ext OR_choice OR_choiceE getRegister lookup_error_on_failure diff --git a/proof/invariant-abstract/Finalise_AI.thy b/proof/invariant-abstract/Finalise_AI.thy index 52b9934093..51ac34509a 100644 --- a/proof/invariant-abstract/Finalise_AI.thy +++ b/proof/invariant-abstract/Finalise_AI.thy @@ -1191,7 +1191,7 @@ crunches sched_context_unbind_all_tcbs, sched_context_unbind_yield_from, sched_context_unbind_reply, sched_context_unbind_ntfn, sched_context_zero_refill_max for irq_node[wp]: "\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp maybeM_inv simp: crunch_simps) + (wp: crunch_wps maybeM_inv simp: crunch_simps) lemmas cancel_all_ipc_cte_irq_node[wp] = hoare_use_eq_irq_node [OF cancel_all_ipc_irq_node cancel_all_ipc_cte_wp_at] @@ -1846,7 +1846,7 @@ crunches sched_context_update_consumed, set_message_info, store_word_offs lemma state_refs_of_tcb_arch_update[simp]: "kheap s thread = Some (TCB tcb) \ state_refs_of - (s\kheap := kheap s(thread \ + (s\kheap := (kheap s)(thread \ TCB (tcb_arch_update f tcb))\) = state_refs_of s" by (clarsimp simp: state_refs_of_def get_refs_def2 refs_of_def obj_at_def is_tcb split: option.split intro!: ext) diff --git a/proof/invariant-abstract/Include_AI.thy b/proof/invariant-abstract/Include_AI.thy index db548ebebb..adff087625 100644 --- a/proof/invariant-abstract/Include_AI.thy +++ b/proof/invariant-abstract/Include_AI.thy @@ -9,7 +9,7 @@ imports Lib.Lib ArchCrunchSetup_AI Monads.Eisbach_WP - Monads.Strengthen_Setup + Monads.Nondet_Strengthen_Setup ASpec.Syscall_A Lib.LemmaBucket Lib.ListLibLemmas @@ -25,7 +25,7 @@ unbundle l4v_word_context (* Clagged from Bits_R *) -crunch_ignore (add: NonDetMonad.bind return "when" get gets fail assert put modify +crunch_ignore (add: Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE liftE whenE unlessE throw_opt assertE liftM liftME sequence_x zipWithM_x mapM_x sequence mapM sequenceE_x sequenceE mapME mapME_x diff --git a/proof/invariant-abstract/Invariants_AI.thy b/proof/invariant-abstract/Invariants_AI.thy index 868ffac079..c106e62948 100644 --- a/proof/invariant-abstract/Invariants_AI.thy +++ b/proof/invariant-abstract/Invariants_AI.thy @@ -3619,7 +3619,7 @@ lemma valid_bound_tcb_typ_at: "\p. \\s. typ_at ATCB p s\ f \\_ s. typ_at ATCB p s\ \ \\s. valid_bound_tcb tcb s\ f \\_ s. valid_bound_tcb tcb s\" apply (clarsimp simp: valid_bound_obj_def split: option.splits) - apply (wpsimp wp: hoare_vcg_all_lift tcb_at_typ_at static_imp_wp) + apply (wpsimp wp: hoare_vcg_all_lift tcb_at_typ_at hoare_weak_lift_imp) done lemma valid_idle_lift: @@ -3741,8 +3741,7 @@ lemma real_cte_at_typ_valid: lemma dmo_aligned[wp]: "do_machine_op f \pspace_aligned\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) - apply (clarsimp simp: pspace_aligned_def) + apply wpsimp done lemma cte_wp_at_eqD2: @@ -4671,6 +4670,10 @@ lemma cur_sc_tcb_sc_at_cur_sc: simp: cur_sc_tcb_def sc_at_pred_n_def obj_at_def is_sc_obj_def) done +lemma invs_pspace_in_kernel_window[elim!]: + "invs s \ pspace_in_kernel_window s" + by (simp add: invs_def valid_state_def) + lemmas invs_implies = invs_equal_kernel_mappings invs_arch_state @@ -4694,6 +4697,7 @@ lemmas invs_implies = invs_hyp_sym_refs invs_sym_refs tcb_at_invs + invs_pspace_in_kernel_window (* Pull invs out of a complex goal and prove it only once. Use as (strengthen invs_strengthen)+, best in combination with simp and potentially conj_cong. *) diff --git a/proof/invariant-abstract/IpcCancel_AI.thy b/proof/invariant-abstract/IpcCancel_AI.thy index 7d8f2dde33..e85a8336c1 100644 --- a/proof/invariant-abstract/IpcCancel_AI.thy +++ b/proof/invariant-abstract/IpcCancel_AI.thy @@ -171,7 +171,7 @@ lemma blocked_ipc_st_tcb_at_general: apply (rule hoare_seq_ext[OF _ get_simple_ko_inv]) apply (rule hoare_seq_ext[OF _ get_ep_queue_inv]) apply (wpsimp simp: blocked_cancel_ipc_def - wp: sts_st_tcb_at_cases_strong reply_unlink_tcb_st_tcb_at static_imp_wp + wp: sts_st_tcb_at_cases_strong reply_unlink_tcb_st_tcb_at hoare_weak_lift_imp hoare_vcg_all_lift get_ep_queue_inv get_simple_ko_wp set_simple_ko_wps hoare_drop_imp[where R="\rv. tcb_at t"]) apply (rule conjI; clarsimp simp: obj_at_def pred_tcb_at_def is_ep elim: bool_to_boolE) @@ -179,10 +179,10 @@ lemma blocked_ipc_st_tcb_at_general: lemma cancel_signal_st_tcb_at_general: "\\s. if t'=t then P (P' Inactive) else P (st_tcb_at P' t' s) \ - cancel_signal t ntfn + cancel_signal t ntfn \\rv s. P (st_tcb_at P' t' s)\" by (wpsimp simp: cancel_signal_def - wp: sts_st_tcb_at_cases_strong ntfn_cases_weak_wp static_imp_wp + wp: sts_st_tcb_at_cases_strong ntfn_cases_weak_wp hoare_weak_lift_imp set_simple_ko_pred_tcb_at hoare_drop_imp[where R="\rv. tcb_at t"]) lemma sched_context_maybe_unbind_ntfn_st_tcb_at[wp]: @@ -428,7 +428,7 @@ lemma tcb_at_no_sc_reply: lemma endpoint_state_refs_of_subset: "kheap s epptr = Some (Endpoint (RecvEP queue)) \ - (y, tp) \ state_refs_of (s\kheap := kheap s(epptr \ Endpoint ( + (y, tp) \ state_refs_of (s\kheap := (kheap s)(epptr \ Endpoint ( case remove1 t queue of [] \ IdleEP | a # list \ update_ep_queue (RecvEP queue) (remove1 t queue)))\) x \ (y, tp) \ state_refs_of s x \ False" @@ -654,9 +654,8 @@ lemma reply_unlink_sc_sc_tcb_sc_at [wp]: lemma reply_unlink_tcb_reply_at [wp]: "\reply_at rp'\ reply_unlink_tcb t rp \\_. reply_at rp'\" - by (wpsimp simp: reply_unlink_tcb_def update_sk_obj_ref_def get_thread_state_def - thread_get_def - wp: get_simple_ko_wp) + by (wpsimp simp: reply_unlink_tcb_def update_sk_obj_ref_def get_thread_state_def thread_get_def + wp: get_simple_ko_wp) lemma reply_unlink_sc_hyp_refs_of [wp]: "\\s. P (state_hyp_refs_of s)\ reply_unlink_sc scp rp \\_ s. P (state_hyp_refs_of s)\" @@ -1262,7 +1261,7 @@ lemma sc_replies_update_valid_replies_cons: lemma replies_with_sc_takeWhile_subset: "(sc_replies_sc_at (\rs. rs = replies) sc_ptr s) \ r' \ set replies \ replies_with_sc - (s\kheap := kheap s(sc_ptr \ + (s\kheap := (kheap s)(sc_ptr \ SchedContext (x\sc_replies := takeWhile (\r. r \ r') replies\) xa)\) \ replies_with_sc s" apply (clarsimp simp: image_def replies_with_sc_def sc_replies_sc_at_def obj_at_def replies_blocked_def st_tcb_at_def) @@ -1271,7 +1270,7 @@ lemma replies_with_sc_takeWhile_subset: lemma replies_blocked_takeWhile_eq: "(sc_replies_sc_at (\rs. rs = replies) sc_ptr s) \ r' \ set replies \ replies_blocked - (s\kheap := kheap s(sc_ptr \ + (s\kheap := (kheap s)(sc_ptr \ SchedContext (x\sc_replies := takeWhile (\r. r \ r') replies\) xa)\) = replies_blocked s" apply (clarsimp simp: image_def replies_blocked_def st_tcb_at_def obj_at_def) by (fastforce simp: sc_replies_sc_at_def obj_at_def) @@ -1279,7 +1278,7 @@ lemma replies_blocked_takeWhile_eq: lemma sc_replies_update_replies_blocked: "ko_at (SchedContext sc n) scp s \ {(r, t). st_tcb_at (\st. st = BlockedOnReply r) t - (s\kheap := kheap s(scp \ SchedContext (sc_replies_update f sc) n)\)} + (s\kheap := (kheap s)(scp \ SchedContext (sc_replies_update f sc) n)\)} = replies_blocked s" apply (clarsimp simp: replies_blocked_def pred_tcb_at_def obj_at_def) apply fastforce @@ -1288,7 +1287,7 @@ lemma sc_replies_update_replies_blocked: lemma replies_with_sc_subset: "\ko_at (SchedContext sc n) scp s; \list. set (f list) \ set list\ \ {(r, sc'). sc_replies_sc_at (\rs. r \ set rs) sc' - (s\kheap := kheap s(scp \ SchedContext (sc_replies_update f sc) n)\)} + (s\kheap := (kheap s)(scp \ SchedContext (sc_replies_update f sc) n)\)} \ replies_with_sc s" apply (clarsimp simp: sc_at_ppred_def obj_at_def replies_with_sc_def) apply blast @@ -1458,19 +1457,21 @@ lemma cancel_all_ipc_it[wp]: wp: mapM_x_wp' hoare_drop_imp) lemma cancel_signal_it[wp]: - "\\s. P (idle_thread s)\ cancel_signal tcb_ptr ntfnptr - \\_ s. P (idle_thread s)\" + "\\s. P (idle_thread s)\ + cancel_signal tcb_ptr ntfnptr + \\_ s. P (idle_thread s)\" by (wpsimp simp: cancel_signal_def set_thread_state_def set_simple_ko_def set_object_def get_object_def get_simple_ko_def) lemma cancel_all_signals_it[wp]: - "\\s. P (idle_thread s)\ cancel_all_signals tcb_ptr - \\_ s. P (idle_thread s)\" + "\\s. P (idle_thread s)\ + cancel_all_signals tcb_ptr + \\_ s. P (idle_thread s)\" by (wpsimp simp: cancel_all_signals_def set_thread_state_def get_simple_ko_def get_object_def wp: mapM_x_wp') crunch it[wp]: unbind_notification "\s. P (idle_thread s)" - (wp: crunch_wps select_wp maybeM_inv simp: unless_def crunch_simps) + (wp: crunch_wps maybeM_inv simp: unless_def crunch_simps) crunches unbind_notification, fast_finalise, sched_context_unbind_all_tcbs, diff --git a/proof/invariant-abstract/IpcDet_AI.thy b/proof/invariant-abstract/IpcDet_AI.thy index 06cf1efb22..b964ee9104 100644 --- a/proof/invariant-abstract/IpcDet_AI.thy +++ b/proof/invariant-abstract/IpcDet_AI.thy @@ -23,27 +23,27 @@ end lemma replies_with_sc_kh_update_sc: "sc_replies (f sc v) = sc_replies sc - \ replies_with_sc (s\kheap := kheap s(p \ SchedContext (f sc v) n)\) - = replies_with_sc (s\kheap := kheap s(p \ SchedContext sc n)\)" + \ replies_with_sc (s\kheap := (kheap s)(p \ SchedContext (f sc v) n)\) + = replies_with_sc (s\kheap := (kheap s)(p \ SchedContext sc n)\)" by (clarsimp simp: replies_with_sc_def sc_replies_sc_at_def obj_at_def, fastforce?) lemma replies_blocked_kh_update_sc: - "replies_blocked (s\kheap := kheap s(p \ SchedContext (f sc v) n)\) - = replies_blocked (s\kheap := kheap s(p \ SchedContext sc n)\)" + "replies_blocked (s\kheap := (kheap s)(p \ SchedContext (f sc v) n)\) + = replies_blocked (s\kheap := (kheap s)(p \ SchedContext sc n)\)" by (clarsimp simp: replies_blocked_def st_tcb_at_def obj_at_def, fastforce?) lemma replies_with_sc_kh_update_tcb: - "replies_with_sc (s\kheap := kheap s(p \ TCB (f tcb v))\) - = replies_with_sc (s\kheap := kheap s(p \ TCB tcb)\)" + "replies_with_sc (s\kheap := (kheap s)(p \ TCB (f tcb v))\) + = replies_with_sc (s\kheap := (kheap s)(p \ TCB tcb)\)" by (clarsimp simp: replies_with_sc_def sc_replies_sc_at_def obj_at_def, fastforce?) lemma replies_blocked_kh_update_tcb: "tcb_state (f tcb v) = tcb_state tcb - \ replies_blocked (s\kheap := kheap s(p \ TCB (f tcb v))\) - = replies_blocked (s\kheap := kheap s(p \ TCB tcb)\)" + \ replies_blocked (s\kheap := (kheap s)(p \ TCB (f tcb v))\) + = replies_blocked (s\kheap := (kheap s)(p \ TCB tcb)\)" by (clarsimp simp: replies_blocked_def st_tcb_at_def obj_at_def, fastforce?) @@ -68,7 +68,7 @@ lemmas replies_blocked_safe_kheap_updates[simp] = replies_blocked_kh_update_tcb[where f="\sc v. sc\tcb_arch := v\", simplified] lemma ko_at_kheap_upd_id[simp]: - "ko_at ko p s \ (s\kheap := kheap s(p \ ko)\ = s)" + "ko_at ko p s \ (s\kheap := (kheap s)(p \ ko)\ = s)" unfolding obj_at_def fun_upd_def by (rule abstract_state.equality, rule ext; simp) @@ -280,12 +280,12 @@ lemma get_blocking_object_wp: by (cases st; wpsimp simp: get_blocking_object_def ep_blocked_def) lemma reply_tcb_reply_at_kheap_update: - "reply_tcb_reply_at P r (s\kheap := kheap s(p \ v)\) = + "reply_tcb_reply_at P r (s\kheap := (kheap s)(p \ v)\) = (if p = r then \r. v = Reply r \ P (reply_tcb r) else reply_tcb_reply_at P r s)" by (simp add: reply_tcb_reply_at_def obj_at_update) lemma reply_tcb_reply_at_kheap_update': - "p \ q \ reply_tcb_reply_at P r (s\kheap := kheap s(p \ v, q \ w)\) = + "p \ q \ reply_tcb_reply_at P r (s\kheap := (kheap s)(p \ v, q \ w)\) = (if p = r then \r. v = Reply r \ P (reply_tcb r) else if q = r then \r. w = Reply r \ P (reply_tcb r) else reply_tcb_reply_at P r s)" @@ -2441,7 +2441,7 @@ lemma valid_bound_sc_typ_at: "\p. \\s. sc_at p s\ f \\_ s. sc_at p s\ \ \\s. valid_bound_sc sc s\ f \\_ s. valid_bound_sc sc s\" apply (clarsimp simp: valid_bound_obj_def split: option.splits) - apply (wpsimp wp: hoare_vcg_all_lift static_imp_wp) + apply (wpsimp wp: hoare_vcg_all_lift hoare_weak_lift_imp) defer apply assumption apply fastforce @@ -2622,7 +2622,7 @@ lemma rai_invs': apply (wpsimp simp: do_nbrecv_failed_transfer_def wp: valid_irq_node_typ) apply (wpsimp simp: if_cond_refill_unblock_check_def wp: hoare_vcg_all_lift hoare_drop_imp) apply (wpsimp simp: invs_def valid_state_def valid_pspace_def wp: valid_ioports_lift) - apply (wpsimp simp: valid_ntfn_def tcb_at_typ wp: static_imp_wp valid_irq_node_typ valid_ioports_lift) + apply (wpsimp simp: valid_ntfn_def tcb_at_typ wp: hoare_weak_lift_imp valid_irq_node_typ valid_ioports_lift) apply (fastforce simp: invs_def valid_state_def valid_pspace_def state_refs_of_def valid_obj_def valid_ntfn_def tcb_at_typ elim!: obj_at_valid_objsE delta_sym_refs diff --git a/proof/invariant-abstract/Ipc_AI.thy b/proof/invariant-abstract/Ipc_AI.thy index c4d8bd1e4d..54f1c65d59 100644 --- a/proof/invariant-abstract/Ipc_AI.thy +++ b/proof/invariant-abstract/Ipc_AI.thy @@ -475,7 +475,7 @@ lemma cap_insert_weak_cte_wp_at2: cap_insert cap src dest \\uu. cte_wp_at P p\" unfolding cap_insert_def - by (wp set_cap_cte_wp_at get_cap_wp static_imp_wp + by (wp set_cap_cte_wp_at get_cap_wp hoare_weak_lift_imp | simp add: cap_insert_def | unfold set_untyped_cap_as_full_def | auto simp: cte_wp_at_def dest!:imp)+ @@ -562,10 +562,10 @@ lemma transfer_caps_loop_presM: apply (clarsimp simp add: Let_def split_def whenE_def cong: if_cong list.case_cong split del: if_split) apply (rule hoare_pre) - apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift static_imp_wp + apply (wp eb hoare_vcg_const_imp_lift hoare_vcg_const_Ball_lift hoare_weak_lift_imp | assumption | simp split del: if_split)+ apply (rule cap_insert_assume_null) - apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at static_imp_wp)+ + apply (wp x hoare_vcg_const_Ball_lift cap_insert_cte_wp_at hoare_weak_lift_imp)+ apply (rule hoare_vcg_conj_liftE_R) apply (rule derive_cap_is_derived_foo) apply (rule_tac Q' ="\cap' s. (vo \ cap'\ cap.NullCap \ @@ -1410,9 +1410,7 @@ lemmas get_tcb_ko_atI = get_tcb_ko_at [THEN iffD1] crunch "distinct" [wp]: set_mrs pspace_distinct - (wp: select_wp hoare_vcg_split_case_option mapM_wp - hoare_drop_imps refl - simp: zipWithM_x_mapM) + (wp: mapM_wp simp: zipWithM_x_mapM) crunch "distinct" [wp]: copy_mrs pspace_distinct @@ -1766,7 +1764,7 @@ lemma set_mrs_valid_ioc[wp]: apply (simp add: set_mrs_def) apply (wp | wpc)+ apply (simp only: zipWithM_x_mapM_x split_def) - apply (wp mapM_x_wp' set_object_valid_ioc_caps static_imp_wp + apply (wp mapM_x_wp' set_object_valid_ioc_caps hoare_weak_lift_imp | simp)+ apply (clarsimp simp: obj_at_def get_tcb_def valid_ioc_def split: option.splits Structures_A.kernel_object.splits) @@ -2554,7 +2552,7 @@ lemma pred_tcb_clear: by (simp add: pred_tcb_at_def obj_at_def pspace_clear_def) lemma pred_tcb_upd_apply: - "pred_tcb_at proj P t (s\kheap := kheap s(r \ TCB v)\) = + "pred_tcb_at proj P t (s\kheap := (kheap s)(r \ TCB v)\) = (if t = r then P (proj (tcb_to_itcb v)) else pred_tcb_at proj P t s)" by (simp add: pred_tcb_at_def obj_at_def) @@ -2711,8 +2709,8 @@ lemma valid_bound_tcb_exst[iff]: by (auto simp: valid_bound_obj_def split:option.splits) crunch bound_tcb[wp]: set_message_info, set_mrs "valid_bound_tcb t" -(wp: valid_bound_tcb_typ_at set_object_typ_at mapM_wp ignore: set_object - simp: zipWithM_x_mapM) + (wp: valid_bound_tcb_typ_at set_object_typ_at mapM_wp ignore: set_object + simp: zipWithM_x_mapM) lemma pspace_clear_update1: "t \ t' \ @@ -2779,7 +2777,7 @@ crunch ex_nonz_cap_to[wp]: set_message_info "ex_nonz_cap_to p" lemmas is_derived_not_Null = derived_not_Null(1) crunch mdb[wp]: set_message_info valid_mdb - (wp: select_wp crunch_wps mapM_wp') + (wp: crunch_wps mapM_wp') lemma ep_queue_cap_to: "\ ko_at (Endpoint ep) p s; invs s; diff --git a/proof/invariant-abstract/KHeapPre_AI.thy b/proof/invariant-abstract/KHeapPre_AI.thy index 40acb7c55e..6294074638 100644 --- a/proof/invariant-abstract/KHeapPre_AI.thy +++ b/proof/invariant-abstract/KHeapPre_AI.thy @@ -151,7 +151,7 @@ lemma get_tcb_at: "tcb_at t s \ (\tcb. get_tcb t s = Som lemma typ_at_same_type: assumes "typ_at T p s" "a_type k = a_type ko" "kheap s p' = Some ko" - shows "typ_at T p (s\kheap := kheap s(p' \ k)\)" + shows "typ_at T p (s\kheap := (kheap s)(p' \ k)\)" using assms by (clarsimp simp: obj_at_def) @@ -163,12 +163,12 @@ lemma hoare_to_pure_kheap_upd: assumes typ_eq: "a_type k = a_type ko" assumes valid: "P (s :: ('z :: state_ext) state)" assumes at: "ko_at ko p s" - shows "P (s\kheap := kheap s(p \ k)\)" + shows "P (s\kheap := (kheap s)(p \ k)\)" apply (rule use_valid[where f=" do s' <- get; assert (s' = s); - (modify (\s. s\kheap := kheap s(p \ k)\)); + (modify (\s. s\kheap := (kheap s)(p \ k)\)); return undefined od", OF _ hoare valid]) apply (fastforce simp add: simpler_modify_def get_def bind_def @@ -180,7 +180,7 @@ lemma hoare_to_pure_kheap_upd: by (auto simp add: obj_at_def a_type_def split: kernel_object.splits if_splits) lemma set_object_wp: - "\\s. Q (s\ kheap := kheap s (p \ v)\) \ set_object p v \\_. Q\" + "\\s. Q (s\ kheap := (kheap s) (p \ v)\) \ set_object p v \\_. Q\" apply (simp add: set_object_def get_object_def) apply wp apply blast diff --git a/proof/invariant-abstract/KHeap_AI.thy b/proof/invariant-abstract/KHeap_AI.thy index a4099e0bc8..fbbddf2463 100644 --- a/proof/invariant-abstract/KHeap_AI.thy +++ b/proof/invariant-abstract/KHeap_AI.thy @@ -100,7 +100,7 @@ lemma pspace_aligned_obj_update: assumes obj: "obj_at P t s" assumes pa: "pspace_aligned s" assumes R: "\k. P k \ a_type k = a_type k'" - shows "pspace_aligned (s\kheap := kheap s(t \ k')\)" + shows "pspace_aligned (s\kheap := (kheap s)(t \ k')\)" using pa obj apply (simp add: pspace_aligned_def cong: conj_cong) apply (clarsimp simp: obj_at_def obj_bits_T dest!: R) @@ -110,7 +110,7 @@ lemma pspace_aligned_obj_update: lemma cte_at_same_type: "\cte_at t s; a_type k = a_type ko; kheap s p = Some ko\ - \ cte_at t (s\kheap := kheap s(p \ k)\)" + \ cte_at t (s\kheap := (kheap s)(p \ k)\)" apply (clarsimp simp: cte_at_cases del: disjCI) apply (elim exE disjE) apply (clarsimp simp: a_type_def well_formed_cnode_n_def length_set_helper @@ -122,13 +122,13 @@ lemma cte_at_same_type: lemma untyped_same_type: "\valid_untyped (cap.UntypedCap dev r n f) s; a_type k = a_type ko; kheap s p = Some ko\ - \ valid_untyped (cap.UntypedCap dev r n f) (s\kheap := kheap s(p \ k)\)" + \ valid_untyped (cap.UntypedCap dev r n f) (s\kheap := (kheap s)(p \ k)\)" unfolding valid_untyped_def by (clarsimp simp: obj_range_def obj_bits_T) lemma valid_cap_same_type: "\ s \ cap; a_type k = a_type ko; kheap s p = Some ko \ - \ s\kheap := kheap s(p \ k)\ \ cap" + \ s\kheap := (kheap s)(p \ k)\ \ cap" apply (simp add: valid_cap_def split: cap.split) apply (auto elim!: typ_at_same_type untyped_same_type simp: ntfn_at_typ ep_at_typ sc_obj_at_typ reply_at_typ tcb_at_typ cap_table_at_typ @@ -139,7 +139,7 @@ lemma valid_cap_same_type: lemma typ_at_foldl: "foldl (\) True (map (\r. typ_at T r s) xs) \ a_type k = a_type ko \ kheap s p = Some ko \ - foldl (\) True (map (\r. typ_at T r (s\kheap := kheap s(p \ k)\)) xs)" + foldl (\) True (map (\r. typ_at T r (s\kheap := (kheap s)(p \ k)\)) xs)" apply (induct xs) apply (auto simp: foldl_conj_Cons typ_at_same_type simp del: foldl_Cons) @@ -147,7 +147,7 @@ lemma typ_at_foldl: lemma valid_obj_same_type: "\ valid_obj p' obj s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_obj p' obj (s\kheap := kheap s(p \ k)\)" + \ valid_obj p' obj (s\kheap := (kheap s)(p \ k)\)" apply (cases obj; simp) apply (clarsimp simp add: valid_obj_def valid_cs_def) apply (drule (1) bspec) @@ -176,7 +176,7 @@ lemma valid_obj_same_type: lemma valid_objs_same_type: "\valid_objs s; obj_at (\ko'. a_type ko' = a_type ko) ptr s; valid_obj ptr ko s\ - \ valid_objs (s\kheap := kheap s(ptr \ ko)\)" + \ valid_objs (s\kheap := (kheap s)(ptr \ ko)\)" apply (clarsimp simp: valid_objs_def dom_def elim!: obj_atE) apply (intro conjI impI) apply (rule valid_obj_same_type) @@ -342,12 +342,12 @@ lemma set_object_cte_at: by (wpsimp wp: set_object_wp_strong simp: obj_at_def cte_at_same_type) lemma obj_at_update: - "obj_at P t' (s \kheap := kheap s (t \ v)\) = + "obj_at P t' (s \kheap := (kheap s)(t \ v)\) = (if t = t' then P v else obj_at P t' s)" by (simp add: obj_at_def) lemma obj_at_update': - "p \ q \ obj_at P t (s \kheap := kheap s (p \ v, q \ w)\) = + "p \ q \ obj_at P t (s \kheap := (kheap s)(p \ v, q \ w)\) = (if p = t then P v else if q = t then P w else obj_at P t s)" @@ -381,7 +381,7 @@ lemmas inj_simple_kos[simp, intro!] = inj_Endpoint inj_Notification inj_Reply lemma set_simple_ko_wp: - "\ \s. inj C \ (simple_obj_at C \ p s \ Q (s\kheap := kheap s(p \ C r)\)) \ + "\ \s. inj C \ (simple_obj_at C \ p s \ Q (s\kheap := (kheap s)(p \ C r)\)) \ set_simple_ko C p r \ \rv. Q \" apply (wpsimp simp: simple_obj_at_def set_simple_ko_def wp: set_object_wp get_object_wp) @@ -405,7 +405,7 @@ lemma get_sk_obj_ref_sp: by (wpsimp wp: get_sk_obj_ref_wp simp: obj_at_def sk_obj_at_pred_def) blast lemma update_sk_obj_ref_wp: - "\ \s. inj C \ (\obj. ko_at (C obj) p s \ Q (s\kheap := kheap s(p \ C (f (K v) obj))\)) \ + "\ \s. inj C \ (\obj. ko_at (C obj) p s \ Q (s\kheap := (kheap s)(p \ C (f (K v) obj))\)) \ update_sk_obj_ref C f p v \ \rv. Q \" by (wpsimp simp: update_sk_obj_ref_def wp: set_simple_ko_wp get_simple_ko_wp) @@ -679,7 +679,7 @@ lemma set_simple_ko_hyp_refs_of[wp]: lemma pspace_distinct_same_type: "\ kheap s t = Some ko; a_type ko = a_type ko'; pspace_distinct s\ - \ pspace_distinct (s\kheap := kheap s(t \ ko')\)" + \ pspace_distinct (s\kheap := (kheap s)(t \ ko')\)" apply (clarsimp simp add: pspace_distinct_def obj_bits_T) apply fastforce done @@ -814,7 +814,7 @@ lemma cte_wp_at_after_update: lemma cte_wp_at_after_update': "\ obj_at (same_caps val) p' s \ - \ cte_wp_at P p (s\kheap := kheap s(p' \ val)\) + \ cte_wp_at P p (s\kheap := (kheap s)(p' \ val)\) = cte_wp_at P p s" by (fastforce simp: obj_at_def cte_wp_at_cases split: if_split_asm dest: bspec [OF _ ranI]) @@ -825,7 +825,7 @@ lemma ex_cap_to_after_update: lemma ex_cap_to_after_update': "\ ex_nonz_cap_to p s; obj_at (same_caps val) p' s \ - \ ex_nonz_cap_to p (s\kheap := kheap s(p' \ val)\)" + \ ex_nonz_cap_to p (s\kheap := (kheap s)(p' \ val)\)" by (clarsimp simp: ex_nonz_cap_to_def cte_wp_at_after_update') lemma ex_cte_cap_to_after_update: @@ -1032,7 +1032,7 @@ lemma set_simple_ko_tcb[wp]: by (simp add: tcb_at_typ) wp lemma set_simple_ko_wp': - "\ \s. Q (s\kheap := kheap s(p \ C r)\) \ + "\ \s. Q (s\kheap := (kheap s)(p \ C r)\) \ set_simple_ko C p r \ \rv. Q \" by (wpsimp simp: simple_obj_at_def set_simple_ko_def wp: set_object_wp get_object_wp) @@ -2310,7 +2310,7 @@ lemma get_sc_obj_ref_wp: lemma update_sched_context_wp: "\ \s. \sc n. ko_at (SchedContext sc n) sc_ptr s - \ Q (s\kheap := kheap s(sc_ptr \ SchedContext (f sc) n)\) \ + \ Q (s\kheap := (kheap s)(sc_ptr \ SchedContext (f sc) n)\) \ update_sched_context sc_ptr f \ \rv. Q \" by (wpsimp simp: update_sched_context_def wp: set_object_wp get_object_wp) @@ -2324,7 +2324,7 @@ lemma update_sched_context_obj_at_trivial: lemma set_tcb_obj_ref_wp: "\\s. \tcb. ko_at (TCB tcb) t s - \ Q (s\kheap := kheap s(t \ TCB (f (K v) tcb))\)\ + \ Q (s\kheap := (kheap s)(t \ TCB (f (K v) tcb))\)\ set_tcb_obj_ref f t v \\rv. Q\" by (wpsimp simp: set_tcb_obj_ref_def wp: set_object_wp) @@ -2503,7 +2503,7 @@ lemma set_aobject_valid_idle[wp]: lemma state_refs_of_tcb_fault_update: "ko_at (TCB tcb) thread s \ state_refs_of - (s\kheap := kheap s(thread \ + (s\kheap := (kheap s)(thread \ TCB (tcb_fault_update Map.empty tcb))\) = state_refs_of s" by (clarsimp simp: state_refs_of_def get_refs_def2 refs_of_def obj_at_def is_tcb split: option.split intro!: ext) @@ -2511,21 +2511,21 @@ lemma state_refs_of_tcb_fault_update: lemma replies_with_sc_tcb_fault_update: "ko_at (TCB tcb) thread s \ replies_with_sc - (s\kheap := kheap s(thread \ + (s\kheap := (kheap s)(thread \ TCB (tcb_fault_update Map.empty tcb))\) = replies_with_sc s" by (auto simp: replies_with_sc_def get_refs_def2 refs_of_def obj_at_def sc_replies_sc_at_def) lemma replies_blocked_tcb_fault_update: "ko_at (TCB tcb) thread s \ replies_blocked - (s\kheap := kheap s(thread \ + (s\kheap := (kheap s)(thread \ TCB (tcb_fault_update Map.empty tcb))\) = replies_blocked s" by (auto simp: replies_blocked_def get_refs_def2 refs_of_def obj_at_def st_tcb_at_def) lemma valid_replies_tcb_fault_update: "ko_at (TCB tcb) thread s \ valid_replies - (s\kheap := kheap s(thread \ + (s\kheap := (kheap s)(thread \ TCB (tcb_fault_update Map.empty tcb))\) = valid_replies s" by (auto simp: get_refs_def2 refs_of_def obj_at_def sc_replies_sc_at_def replies_with_sc_tcb_fault_update diff --git a/proof/invariant-abstract/LevityCatch_AI.thy b/proof/invariant-abstract/LevityCatch_AI.thy index af51e4b1b0..f17cce99d3 100644 --- a/proof/invariant-abstract/LevityCatch_AI.thy +++ b/proof/invariant-abstract/LevityCatch_AI.thy @@ -70,12 +70,12 @@ lemma const_on_failure_wp: happen in goals that are stated by crunch. *) lemma select_ext_weak_wp[wp]: "\\s. \x\S. Q x s\ select_ext a S \Q\" - by (wpsimp simp: select_ext_def wp: select_wp) + by (wpsimp simp: select_ext_def) (* The "real" wp rule for select_ext, requires det_ext state: *) lemma select_ext_wp[wp]: "\\s. a s \ S \ Q (a s) s\ select_ext a S \Q\" unfolding select_ext_def unwrap_ext_det_ext_ext_def - by (wpsimp simp: select_switch_det_ext_ext_def wp: select_wp) + by (wpsimp simp: select_switch_det_ext_ext_def) end diff --git a/proof/invariant-abstract/README.md b/proof/invariant-abstract/README.md index 7ff8d820a0..ac02cb7f4d 100644 --- a/proof/invariant-abstract/README.md +++ b/proof/invariant-abstract/README.md @@ -9,7 +9,7 @@ Abstract Spec Invariant Proof This proof defines and proves the global invariants of seL4's [abstract specification](../../spec/abstract/). The invariants are -phrased and proved using a [monadic Hoare logic](../../lib/Monads/NonDetMonad.thy) +phrased and proved using a [monadic Hoare logic](../../lib/Monads/nondet/Nondet_Monad.thy) described in a TPHOLS '08 [paper][1]. [1]: https://trustworthy.systems/publications/nictaabstracts/Cock_KS_08.abstract "Secure Microkernels, State Monads and Scalable Refinement" diff --git a/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy b/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy index 20eb2f7e1a..8a25c52e67 100644 --- a/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchAcc_AI.thy @@ -1009,7 +1009,7 @@ lemma set_object_caps_of_state: done lemma set_pt_aobjs_of: - "\\s. aobjs_of s p \ None \ P (aobjs_of s(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" + "\\s. aobjs_of s p \ None \ P ((aobjs_of s)(p \ PageTable pt)) \ set_pt p pt \\_ s. P (aobjs_of s)\" unfolding set_pt_def supply fun_upd_apply[simp del] by (wpsimp wp: set_object_wp) @@ -1132,7 +1132,7 @@ lemma pt_walk_upd_idem: \ pt_walk top_level level' pt_ptr vptr (ptes_of s) = Some (level', pt_ptr') \ pt_ptr' \ obj_ref; is_aligned pt_ptr pt_bits \ - \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := kheap s(obj_ref \ ko)\)) + \ pt_walk top_level level pt_ptr vptr (ptes_of (s\kheap := (kheap s)(obj_ref \ ko)\)) = pt_walk top_level level pt_ptr vptr (ptes_of s)" by (rule pt_walk_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1198,7 +1198,7 @@ lemma vs_lookup_table_upd_idem: \ vs_lookup_table level' asid vref s = Some (level', p') \ p' \ obj_ref; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_eqI; simp split del: if_split) (clarsimp simp: opt_map_def split: option.splits) @@ -1207,7 +1207,7 @@ lemma vs_lookup_table_Some_upd_idem: "\ vs_lookup_table level asid vref s = Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (subst vs_lookup_table_upd_idem; simp?) (fastforce dest: no_loop_vs_lookup_table) @@ -1216,7 +1216,7 @@ lemma ex_vs_lookup_upd_idem: "\ \\ (level, p) s; pspace_aligned s; valid_vspace_objs s; valid_asid_table s; unique_table_refs s; valid_vs_lookup s; valid_caps (caps_of_state s) s \ - \ \\ (level, p) (s\kheap := kheap s(p \ ko)\) = \\ (level, p) s" + \ \\ (level, p) (s\kheap := (kheap s)(p \ ko)\) = \\ (level, p) s" apply (rule iffI; clarsimp) apply (rule_tac x=asid in exI) apply (rule_tac x=vref in exI) @@ -1293,7 +1293,7 @@ lemma pt_lookup_target_pt_upd_eq: by (rule pt_lookup_target_pt_eqI; clarsimp) lemma kheap_pt_upd_simp[simp]: - "(kheap s(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) + "((kheap s)(p \ ArchObj (PageTable pt)) |> aobj_of |> pt_of) = (kheap s |> aobj_of |> pt_of)(p \ pt)" unfolding aobj_of_def opt_map_def by (auto split: kernel_object.split) @@ -1453,7 +1453,7 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1461,7 +1461,7 @@ lemma in_user_frame_same_type_upd: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1499,7 +1499,7 @@ lemma valid_machine_state_heap_updI: assumes vm : "valid_machine_state s" assumes tyat : "typ_at type p s" shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + " a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1642,7 +1642,7 @@ crunch interrupt_states[wp]: set_asid_pool "\s. P (interrupt_states s)" lemma vs_lookup_table_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" apply (subst vs_lookup_table_upd_idem; fastforce) done @@ -1650,14 +1650,14 @@ lemma vs_lookup_table_unreachable_upd_idem: lemma vs_lookup_table_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_table level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_table level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_table level asid vref s" by (rule vs_lookup_table_unreachable_upd_idem; fastforce) lemma vs_lookup_target_unreachable_upd_idem: "\ \level. vs_lookup_table level asid vref s \ Some (level, obj_ref); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" supply fun_upd_apply[simp del] apply (clarsimp simp: vs_lookup_target_def vs_lookup_slot_def obind_assoc) @@ -1692,12 +1692,12 @@ lemma vs_lookup_target_unreachable_upd_idem: lemma vs_lookup_target_unreachable_upd_idem': "\ \(\level. \\ (level, obj_ref) s); vref \ user_region; pspace_aligned s; valid_vspace_objs s; valid_asid_table s \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(obj_ref \ ko)\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(obj_ref \ ko)\) = vs_lookup_target level asid vref s" by (rule vs_lookup_target_unreachable_upd_idem; fastforce) lemma vs_lookup_table_fun_upd_deep_idem: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ko)\) = Some (level, p'); + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ko)\) = Some (level, p'); vs_lookup_table level' asid vref s = Some (level', p); level' \ level; vref \ user_region; unique_table_refs s; valid_vs_lookup s; valid_vspace_objs s; valid_asid_table s; pspace_aligned s; valid_caps (caps_of_state s) s \ @@ -1790,8 +1790,8 @@ lemma vs_lookup_target_pt_levelI: lemma vs_lookup_target_asid_pool_level_upd_helper: "\ graph_of ap \ graph_of ap'; kheap s p = Some (ArchObj (ASIDPool ap')); vref \ user_region; - vspace_for_pool pool_ptr asid (asid_pools_of s(p \ ap)) = Some pt_ptr; - pool_for_asid asid (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ + vspace_for_pool pool_ptr asid ((asid_pools_of s)(p \ ap)) = Some pt_ptr; + pool_for_asid asid (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some pool_ptr\ \ vs_lookup_target asid_pool_level asid vref s = Some (asid_pool_level, pt_ptr)" apply (clarsimp simp: pool_for_asid_vs_lookup vspace_for_pool_def in_omonad) apply (clarsimp split: if_splits) @@ -1802,7 +1802,7 @@ lemma vs_lookup_target_asid_pool_level_upd_helper: done lemma vs_lookup_target_None_upd_helper: - "\ vs_lookup_table level asid vref (s\kheap := kheap s(p \ ArchObj (ASIDPool ap))\) = + "\ vs_lookup_table level asid vref (s\kheap := (kheap s)(p \ ArchObj (ASIDPool ap))\) = Some (level, table_ptr); ((\pa. pte_of pa ((pts_of s)(p := None))) |> pte_ref) (pt_slot_offset level table_ptr vref) = Some target; @@ -1917,7 +1917,7 @@ lemma set_asid_pool_equal_mappings[wp]: lemma translate_address_asid_pool_upd: "pts_of s p = None \ translate_address pt_ptr vref - (\pa. pte_of pa (kheap s(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) + (\pa. pte_of pa ((kheap s)(p \ ArchObj (ASIDPool ap)) |> aobj_of |> pt_of)) = translate_address pt_ptr vref (ptes_of s)" by simp @@ -2920,7 +2920,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "machine_op_lift f \\ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonadVCG.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/RISCV64/ArchArch_AI.thy b/proof/invariant-abstract/RISCV64/ArchArch_AI.thy index 8488a68b28..db7dc71a29 100644 --- a/proof/invariant-abstract/RISCV64/ArchArch_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchArch_AI.thy @@ -442,7 +442,7 @@ context Arch begin global_naming RISCV64 lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (asid_table s) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\riscv_asid_table := (asid_table s)(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI simp: asid_pools_at_eq) @@ -467,7 +467,7 @@ lemma valid_asid_pool_caps_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ obj_refs cap = {ap} \ vs_cap_ref cap = Some (ucast asid << asid_low_bits, 0)) \ - valid_asid_pool_caps_2 (caps_of_state s) (asid_table s(asid \ ap))" + valid_asid_pool_caps_2 (caps_of_state s) ((asid_table s)(asid \ ap))" apply clarsimp apply (prop_tac "asid_update ap asid s", (unfold_locales; assumption)) apply (fastforce dest: asid_update.valid_asid_pool_caps') @@ -561,7 +561,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid) \ is_aligned asid asid_low_bits) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \riscv_asid_table := riscv_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \riscv_asid_table := (asid_table s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg valid_asid_pool_caps_upd_strg) @@ -574,7 +574,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -653,7 +653,7 @@ lemma cap_insert_ap_invs: asid_table s (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \riscv_asid_table := ((riscv_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg equal_kernel_mappings_asid_upd_strg valid_asid_map_asid_upd_strg @@ -813,11 +813,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s + \riscv_asid_table := ((riscv_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \riscv_asid_table := (riscv_asid_table \ arch_state) s + \riscv_asid_table := ((riscv_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -1012,7 +1012,7 @@ crunch_ignore (add: select_ext find_vspace_for_asid) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) declare lookup_slot_for_cnode_op_cap_to [wp] @@ -1285,7 +1285,7 @@ lemma decode_asid_control_invocation_wf[wp]: apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def) - apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp)+ + apply (wpsimp wp: ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp)+ apply (rule conjI, fastforce) apply (cases excaps, simp) apply (case_tac list, simp) diff --git a/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy index bcce25d1cc..5f9a0e3115 100644 --- a/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCNodeInv_AI.thy @@ -518,7 +518,7 @@ context Arch begin global_naming RISCV64 lemma post_cap_delete_pre_is_final_cap': "\s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -588,7 +588,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy index 2404a2d73e..4b5bd203f0 100644 --- a/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCSpacePre_AI.thy @@ -152,7 +152,7 @@ lemma arch_derived_is_device: lemma valid_arch_mdb_simple: "\ valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ - valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) (caps_of_state s(dest \ cap))" + valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -177,28 +177,28 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (clarsimp simp: valid_arch_mdb_def) lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def) lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -231,10 +231,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def)+ end diff --git a/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy b/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy index 428c29a027..51210bec3c 100644 --- a/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchCSpace_AI.thy @@ -201,7 +201,7 @@ lemma is_derived_is_cap: lemma vs_lookup_pages_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_pages (s\kheap := kheap s(p \ ko')\) = vs_lookup_pages s" + \ vs_lookup_pages (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_pages s" unfolding vs_lookup_target_def vs_lookup_slot_def apply (frule aobjs_of_non_aobj_upd[where ko'=ko'], simp+) apply (rule ext)+ @@ -216,7 +216,7 @@ lemma vs_lookup_pages_non_aobj_upd: lemma vs_lookup_target_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ vs_lookup_target level asid vref (s\kheap := kheap s(p \ ko')\) + \ vs_lookup_target level asid vref (s\kheap := (kheap s)(p \ ko')\) = vs_lookup_target level asid vref s" by (drule vs_lookup_pages_non_aobj_upd[where ko'=ko'], auto dest: fun_cong) diff --git a/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy index 798b1d319e..32a99f7c32 100644 --- a/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchDetSchedAux_AI.thy @@ -169,7 +169,7 @@ lemma perform_asid_control_invocation_bound_sc_obj_tcb_at[wp]: crunches perform_asid_control_invocation for idle_thread[wp]: "\s. P (idle_thread s)" and valid_blocked[wp]: "valid_blocked" - (wp: static_imp_wp) + (wp: hoare_weak_lift_imp) crunches perform_asid_control_invocation for rqueues[wp]: "\s. P (ready_queues s)" diff --git a/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy b/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy index 4435e430d6..fd28bb2c88 100644 --- a/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchFinalise_AI.thy @@ -375,7 +375,7 @@ lemma arch_thread_set_cur_tcb[wp]: "\cur_tcb\ arch_thread_set p lemma cte_wp_at_update_some_tcb: "\kheap s v = Some (TCB tcb) ; tcb_cnode_map tcb = tcb_cnode_map (f tcb)\ - \ cte_wp_at P p (s\kheap := kheap s (v \ TCB (f tcb))\) = cte_wp_at P p s" + \ cte_wp_at P p (s\kheap := (kheap s)(v \ TCB (f tcb))\) = cte_wp_at P p s" apply (clarsimp simp: cte_wp_at_cases2 dest!: get_tcb_SomeD) done @@ -516,7 +516,7 @@ lemma arch_thread_set_valid_objs_context[wp]: lemma sym_refs_update_some_tcb: "\kheap s v = Some (TCB tcb) ; refs_of (TCB tcb) = refs_of (TCB (f tcb))\ - \ sym_refs (state_refs_of (s\kheap := kheap s (v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" + \ sym_refs (state_refs_of (s\kheap := (kheap s)(v \ TCB (f tcb))\)) = sym_refs (state_refs_of s)" apply (rule_tac f=sym_refs in arg_cong) apply (rule all_ext) apply (clarsimp simp: sym_refs_def state_refs_of_def) @@ -665,7 +665,7 @@ lemmas reachable_frame_cap_simps = reachable_frame_cap_def[unfolded is_frame_cap_def arch_cap_fun_lift_def, split_simps cap.split] lemma vs_lookup_slot_non_PageTablePTE: - "\ ptes_of s p \ None; ptes_of s' = ptes_of s(p \ pte); \ is_PageTablePTE pte; + "\ ptes_of s p \ None; ptes_of s' = (ptes_of s)(p \ pte); \ is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; valid_asid_table s; pspace_aligned s\ \ vs_lookup_slot level asid vref s' = @@ -1269,7 +1269,7 @@ lemma (* replace_cap_invs_arch_update *)[Finalise_AI_asms]: lemma dmo_pred_tcb_at[wp]: "do_machine_op mop \\s. P (pred_tcb_at f Q t s)\" apply (simp add: do_machine_op_def split_def) - apply (wp select_wp) + apply wp apply (clarsimp simp: pred_tcb_at_def obj_at_def) done @@ -1348,7 +1348,7 @@ lemma set_asid_pool_obj_at_ptr: locale_abbrev "asid_table_update asid ap s \ - s\arch_state := arch_state s\riscv_asid_table := riscv_asid_table (arch_state s)(asid \ ap)\\" + s\arch_state := arch_state s\riscv_asid_table := (asid_table s)(asid \ ap)\\" lemma valid_table_caps_table [simp]: "valid_table_caps (s\arch_state := arch_state s\riscv_asid_table := table'\\) = valid_table_caps s" diff --git a/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy b/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy index f1dbabfbed..8715744f84 100644 --- a/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchInvariants_AI.thy @@ -2623,7 +2623,7 @@ lemma vs_lookup_table_eq_lift: lemma aobjs_of_non_aobj_upd: "\ kheap s p = Some ko; \ is_ArchObj ko; \ is_ArchObj ko' \ - \ kheap s(p \ ko') |> aobj_of = aobjs_of s" + \ (kheap s)(p \ ko') |> aobj_of = aobjs_of s" by (rule ext) (auto simp: opt_map_def is_ArchObj_def aobj_of_def split: kernel_object.splits if_split_asm) diff --git a/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy b/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy index fd681edefb..e914bc21ab 100644 --- a/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchIpc_AI.thy @@ -338,7 +338,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -486,7 +486,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (wpsimp simp: do_ipc_transfer_def do_normal_transfer_def transfer_caps_def bind_assoc wp: hoare_vcg_all_lift hoare_drop_imps)+ apply (subst ball_conj_distrib) - apply (wpsimp wp: get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wpsimp wp: get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift simp: obj_at_def is_tcb_def)+ done @@ -509,7 +509,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" by (auto simp: valid_arch_mdb_def) end diff --git a/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy b/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy index e03c83d6da..b1062393eb 100644 --- a/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchKHeap_AI.thy @@ -290,7 +290,7 @@ lemma translate_address_lift_weak: done lemma set_pt_pts_of: - "\\s. pts_of s p \ None \ P (pts_of s (p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" + "\\s. pts_of s p \ None \ P ((pts_of s)(p \ pt)) \ set_pt p pt \\_ s. P (pts_of s)\" unfolding set_pt_def by (wpsimp wp: set_object_wp) (auto elim!: rsubst[where P=P] simp: opt_map_def split: option.splits) @@ -312,7 +312,7 @@ lemma pte_ptr_eq: by (fastforce simp: not_le bit_simps) lemma store_pte_ptes_of: - "\\s. ptes_of s p \ None \ P (ptes_of s (p \ pte)) \ store_pte p pte \\_ s. P (ptes_of s)\" + "\\s. ptes_of s p \ None \ P ((ptes_of s)(p \ pte)) \ store_pte p pte \\_ s. P (ptes_of s)\" unfolding store_pte_def pte_of_def apply (wpsimp wp: set_pt_pts_of simp: in_omonad) by (auto simp: obind_def opt_map_def split: option.splits dest!: pte_ptr_eq elim!: rsubst[where P=P]) @@ -373,7 +373,7 @@ lemma vs_lookup_slot_no_asid: If performing a shallower lookup than the one requested results in p, then any deeper lookup in the updated state will return a higher level result along the original path. *) lemma vs_lookup_non_PageTablePTE: - "\ ptes_of s p \ None; ptes_of s' = ptes_of s (p \ pte); + "\ ptes_of s p \ None; ptes_of s' = (ptes_of s)(p \ pte); \ is_PageTablePTE pte; asid_pools_of s' = asid_pools_of s; asid_table s' = asid_table s; @@ -416,7 +416,7 @@ lemma vs_lookup_non_PageTablePTE: apply (subst pt_walk.simps) apply (subst (2) pt_walk.simps) apply (simp add: less_imp_le cong: if_cong) - apply (subgoal_tac "(ptes_of s(p \ pte)) (pt_slot_offset (x + 1) b vref) + apply (subgoal_tac "((ptes_of s)(p \ pte)) (pt_slot_offset (x + 1) b vref) = ptes_of s (pt_slot_offset (x + 1) b vref)") apply (simp add: obind_def split: option.splits) apply clarsimp @@ -455,7 +455,7 @@ lemma store_pte_non_PageTablePTE_vs_lookup: lemma store_pte_not_ao[wp]: "\\s. \pt. aobjs_of s (p && ~~mask pt_bits) = Some (PageTable pt) \ - P (aobjs_of s (p && ~~mask pt_bits \ + P ((aobjs_of s)(p && ~~mask pt_bits \ PageTable (pt (ucast (p && mask pt_bits >> pte_bits) := pte))))\ store_pte p pte \\_ s. P (aobjs_of s)\" @@ -726,75 +726,79 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) -lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_ep_update: + "typ_at AEndpoint ep s \ + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done -lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_ntfn_update: + "typ_at ANTFN ep s \ + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done -lemma state_hyp_refs_of_sc_update: "\s sc val n. typ_at (ASchedContext n) sc s \ - state_hyp_refs_of (s\kheap := kheap s(sc \ SchedContext val n)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_sc_update: + "typ_at (ASchedContext n) sc s \ + state_hyp_refs_of (s\kheap := (kheap s)(sc \ SchedContext val n)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp: RISCV64.state_hyp_refs_of_def obj_at_def RISCV64.hyp_refs_of_def split: kernel_object.splits) done -lemma state_hyp_refs_of_reply_update: "\s r val. typ_at AReply r s \ - state_hyp_refs_of (s\kheap := kheap s(r \ Reply val)\) = state_hyp_refs_of s" +lemma state_hyp_refs_of_reply_update: + "typ_at AReply r s \ + state_hyp_refs_of (s\kheap := (kheap s)(r \ Reply val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: RISCV64.state_hyp_refs_of_def obj_at_def RISCV64.hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_sched_context_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_sched_context := sc\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_sched_context := sc\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: RISCV64.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_yield_to_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_yield_to := sc\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_yield_to := sc\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: RISCV64.state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_state_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_domain_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_domain := d\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_domain := d\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done lemma state_hyp_refs_of_tcb_priority_update: - "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_priority := d\))\) - = state_hyp_refs_of s" + "kheap s t = Some (TCB tcb) \ + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_priority := d\))\) + = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) done @@ -808,7 +812,7 @@ lemma default_tcb_not_live[simp]: "\ live (TCB (default_tcb d))" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) @@ -829,12 +833,12 @@ lemma valid_arch_mdb_lift: (* interface lemma *) lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by simp lemma valid_vspace_obj_same_type: "\valid_vspace_obj l ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj l ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj l ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) diff --git a/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy b/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy index 44222f083e..d8eff067d1 100644 --- a/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchTcb_AI.thy @@ -298,13 +298,13 @@ lemma install_tcb_frame_cap_invs: \ \non-exception case\ apply wpsimp apply (wpsimp wp: checked_insert_tcb_invs[where ref="tcb_cnode_index 2"]) - apply (wpsimp wp: hoare_vcg_all_lift static_imp_wp + apply (wpsimp wp: hoare_vcg_all_lift hoare_weak_lift_imp thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial[where Q="\x. x", OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid) apply((wpsimp wp: cap_delete_deletes hoare_vcg_const_imp_lift_R hoare_vcg_all_lift_R hoare_vcg_all_lift - static_imp_wp static_imp_conj_wp + hoare_weak_lift_imp hoare_weak_lift_imp_conj | strengthen use_no_cap_to_obj_asid_strg | wp cap_delete_ep)+)[1] by (clarsimp simp: is_cap_simps' valid_fault_handler_def is_cnode_or_valid_arch_def) diff --git a/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy index d4192b762e..1ec1ca0830 100644 --- a/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchVSpaceEntries_AI.thy @@ -114,7 +114,7 @@ lemma set_ntfn_obj_ref_valid_vspace_objs'[wp]: by (wpsimp simp: update_sk_obj_ref_def) crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv hoare_vcg_all_lift + (wp: crunch_wps preemption_point_inv hoare_vcg_all_lift simp: crunch_simps unless_def ignore:set_object set_thread_state_act update_sk_obj_ref) lemma preemption_point_valid_vspace_objs'[wp]: @@ -241,7 +241,7 @@ lemma perform_asid_pool_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma pte_range_interD: @@ -341,7 +341,7 @@ crunches awaken, sc_and_timer lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" - unfolding schedule_def by (wpsimp wp: alternative_wp select_wp hoare_drop_imps) + unfolding schedule_def by (wpsimp wp: hoare_drop_imps) (* FIXME RT: clean up the duplication here (also in ARM); factor out handle_event? *) lemma call_kernel_valid_vspace_objs'[wp]: diff --git a/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy b/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy index 0d9a9c8d53..51be95e4d6 100644 --- a/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/RISCV64/ArchVSpace_AI.thy @@ -1441,7 +1441,7 @@ end locale asid_pool_map = Arch + fixes s ap pool asid ptp pt and s' :: "'a::state_ext state" - defines "s' \ s\kheap := kheap s(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" + defines "s' \ s\kheap := (kheap s)(ap \ ArchObj (ASIDPool (pool(asid_low_bits_of asid \ ptp))))\" assumes ap: "asid_pools_of s ap = Some pool" assumes new: "pool (asid_low_bits_of asid) = None" assumes pt: "pts_of s ptp = Some pt" diff --git a/proof/invariant-abstract/RISCV64/Machine_AI.thy b/proof/invariant-abstract/RISCV64/Machine_AI.thy index f389c824dd..ae8669cba3 100644 --- a/proof/invariant-abstract/RISCV64/Machine_AI.thy +++ b/proof/invariant-abstract/RISCV64/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -184,7 +184,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -336,7 +336,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -345,7 +345,7 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done diff --git a/proof/invariant-abstract/Retype_AI.thy b/proof/invariant-abstract/Retype_AI.thy index 27ca6c5012..6fab5eaf66 100644 --- a/proof/invariant-abstract/Retype_AI.thy +++ b/proof/invariant-abstract/Retype_AI.thy @@ -1004,7 +1004,7 @@ lemma non_disjoing_subset: "\A \ B; A \ C \ {}\< lemma pspace_no_overlap_same_type: "\pspace_no_overlap S s; ko_at k p s; a_type ko = a_type k\ - \ pspace_no_overlap S (kheap_update (\_. (kheap s(p \ ko))) s)" + \ pspace_no_overlap S (kheap_update (\_. (kheap s)(p \ ko)) s)" unfolding pspace_no_overlap_def by (clarsimp simp: obj_at_def obj_bits_T) diff --git a/proof/invariant-abstract/SchedContextInv_AI.thy b/proof/invariant-abstract/SchedContextInv_AI.thy index 21fb8ac576..dca0a01029 100644 --- a/proof/invariant-abstract/SchedContextInv_AI.thy +++ b/proof/invariant-abstract/SchedContextInv_AI.thy @@ -148,7 +148,7 @@ lemma valid_idle_sc_update: sc_badge sc = sc_badge sc'; sc_yield_from sc = sc_yield_from sc'; sc_replies sc = sc_replies sc' \ - \ valid_idle (s\kheap := kheap s(p \ SchedContext sc' n)\)" + \ valid_idle (s\kheap := (kheap s)(p \ SchedContext sc' n)\)" by (fastforce simp: valid_idle_def pred_tcb_at_def obj_at_def) lemma sched_context_cancel_yield_to_valid_idle[wp]: diff --git a/proof/invariant-abstract/SchedContext_AI.thy b/proof/invariant-abstract/SchedContext_AI.thy index b016fa1880..ef2fc42553 100644 --- a/proof/invariant-abstract/SchedContext_AI.thy +++ b/proof/invariant-abstract/SchedContext_AI.thy @@ -195,8 +195,8 @@ lemma schedule_used_non_nil: lemma set_refills_wp: "\\s. \sc n. obj_at ((=) (SchedContext sc n)) sc_ptr s - \ P (s\kheap := kheap s(sc_ptr \ SchedContext (sc\sc_refills := refills\) n)\)\ - set_refills sc_ptr refills + \ P (s\kheap := (kheap s)(sc_ptr \ SchedContext (sc\sc_refills := refills\) n)\)\ + set_refills sc_ptr refills \\r. P\" unfolding set_refills_def by (wpsimp wp: update_sched_context_wp) @@ -405,7 +405,7 @@ definition replies_with_sc_upd_replies :: {p. if snd p = sc then fst p \ set rs else p \ rs_with_sc}" lemma replies_with_sc_replies_upd: - "replies_with_sc (s\kheap := kheap s(sc_ptr \ SchedContext sc n)\) + "replies_with_sc (s\kheap := (kheap s)(sc_ptr \ SchedContext sc n)\) = replies_with_sc_upd_replies (sc_replies sc) sc_ptr (replies_with_sc s)" by (auto simp: replies_with_sc_upd_replies_def replies_with_sc_def sc_replies_sc_at_def obj_at_def) @@ -622,7 +622,7 @@ lemma update_sched_context_valid_irq_node [wp]: lemma valid_sc_kheap_update': "sc_at p s \ a_type ko = ASchedContext n \ - valid_sched_context sc (s\kheap := kheap s(p \ ko)\) + valid_sched_context sc (s\kheap := (kheap s)(p \ ko)\) = valid_sched_context sc s" apply (clarsimp simp: valid_sched_context_def valid_bound_obj_def obj_at_def is_obj_defs split: if_split_asm option.splits kernel_object.splits) @@ -632,7 +632,7 @@ lemma valid_sc_kheap_update': lemma valid_sc_kheap_update[simp]: "sc_at p s \ - valid_sched_context sc (s\kheap := kheap s(p \ SchedContext sc' n)\) + valid_sched_context sc (s\kheap := (kheap s)(p \ SchedContext sc' n)\) = valid_sched_context sc s" apply (clarsimp simp: valid_sched_context_def valid_bound_obj_def obj_at_def is_obj_defs split: if_split_asm option.splits kernel_object.splits) @@ -1127,7 +1127,7 @@ lemma ssc_refs_of_None[wp]: lemma zombies_kheap_update: "\ zombies_final s; obj_at (same_caps ko) t s \ - \ zombies_final (s\kheap := kheap s(t \ ko)\)" + \ zombies_final (s\kheap := (kheap s)(t \ ko)\)" apply (simp add: zombies_final_def is_final_cap'_def2, elim allEI) apply (clarsimp simp: cte_wp_at_after_update fun_upd_def) done diff --git a/proof/invariant-abstract/Schedule_AI.thy b/proof/invariant-abstract/Schedule_AI.thy index e0954550ec..c158aa5aeb 100644 --- a/proof/invariant-abstract/Schedule_AI.thy +++ b/proof/invariant-abstract/Schedule_AI.thy @@ -155,8 +155,8 @@ locale Schedule_AI_U = Schedule_AI "TYPE(unit)" lemma (in Schedule_AI_U) schedule_invs[wp]: "\invs\ (Schedule_A.schedule :: (unit,unit) s_monad) \\rv. invs\" apply (simp add: Schedule_A.schedule_def allActiveTCBs_def) - apply (wp OR_choice_weak_wp alternative_wp dmo_invs thread_get_inv sc_and_timer_invs - do_machine_op_tcb select_ext_weak_wp select_wp when_def + apply (wp OR_choice_weak_wp dmo_invs thread_get_inv sc_and_timer_invs + do_machine_op_tcb select_ext_weak_wp when_def | clarsimp simp: getActiveTCB_def get_tcb_def)+ done @@ -171,9 +171,8 @@ lemma (in Schedule_AI_U) schedule_ct_activateable[wp]: done show ?thesis apply (simp add: Schedule_A.schedule_def allActiveTCBs_def) - apply (wp alternative_wp sc_and_timer_activatable - select_ext_weak_wp select_wp stt_activatable stit_activatable - | simp add: P Q)+ + apply (wp sc_and_timer_activatable select_ext_weak_wp stt_activatable stit_activatable + | simp add: P Q)+ apply (clarsimp simp: getActiveTCB_def ct_in_state_def) apply (rule conjI) apply clarsimp diff --git a/proof/invariant-abstract/Syscall_AI.thy b/proof/invariant-abstract/Syscall_AI.thy index aa7f62e97e..6800b165c0 100644 --- a/proof/invariant-abstract/Syscall_AI.thy +++ b/proof/invariant-abstract/Syscall_AI.thy @@ -418,7 +418,7 @@ lemma (in Systemcall_AI_Pre) handle_fault_reply_cte_wp_at: done have NC: "\p' s tcb P nc. get_tcb p' s = Some tcb - \ cte_wp_at P p (s\kheap := kheap s(p' \ TCB (tcb\tcb_arch := arch_tcb_context_set nc (tcb_arch tcb)\))\) + \ cte_wp_at P p (s\kheap := (kheap s)(p' \ TCB (tcb\tcb_arch := arch_tcb_context_set nc (tcb_arch tcb)\))\) = cte_wp_at P p s" apply (drule_tac nc=nc in SC) apply (drule_tac P=P and p=p in cte_wp_at_after_update) @@ -984,7 +984,7 @@ lemma lookup_extra_caps_eq [wp]: "\\\ lookup_extra_caps thread xb info \\rv s. \x\set rv. cte_wp_at ((=) (fst x)) (snd x) s\,-" by (wpsimp simp: lookup_extra_caps_def wp: mapME_set) -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +(*FIXME: move to Nondet_VCG.valid_validE_R *) lemma valid_validE_R_gen: "\\rv s. Q' (Inr rv) s \ Q rv s; \P\ f \Q'\\ \ \P\ f \Q\, -" by (fastforce simp: validE_R_def validE_def valid_def split_def) @@ -1024,11 +1024,8 @@ lemma lcs_ex_cap_to2[wp]: apply (wp lsft_ex_cte_cap_to | simp)+ done -lemma hoare_vcg_const_imp_lift_E[wp]: - "\P\ f -, \Q\ \ \\s. F \ P s\ f -, \\rv s. F \ Q rv s\" - apply (cases F) apply auto - apply wp - done +(* FIXME AARCH64: this should really not be wp *) +declare hoare_vcg_const_imp_lift_E[wp] context Syscall_AI begin diff --git a/proof/invariant-abstract/TcbAcc_AI.thy b/proof/invariant-abstract/TcbAcc_AI.thy index c2570d6d4b..3c96913beb 100644 --- a/proof/invariant-abstract/TcbAcc_AI.thy +++ b/proof/invariant-abstract/TcbAcc_AI.thy @@ -116,7 +116,7 @@ lemma (in TcbAcc_AI_arch_tcb_context_set_eq) thread_get_as_user: gets_def put_def bind_def get_def return_def select_f_def gets_the_def assert_opt_def get_tcb_def split: option.split_asm kernel_object.split_asm) - apply (rename_tac v s; subgoal_tac "kheap s(t \ TCB v) = kheap s", simp) + apply (rename_tac v s; subgoal_tac "(kheap s)(t \ TCB v) = kheap s", simp) apply fastforce done @@ -328,7 +328,7 @@ lemma thread_set_obj_at_impossible: done lemma thread_set_wp: - "\ \s. \tcb. get_tcb t s = Some tcb \ Q (s\kheap := kheap s(t \ TCB (f tcb))\) \ + "\ \s. \tcb. get_tcb t s = Some tcb \ Q (s\kheap := (kheap s)(t \ TCB (f tcb))\) \ thread_set f t \ \_. Q \" by (wpsimp simp: thread_set_def wp: set_object_wp) @@ -1896,7 +1896,7 @@ definition replies_blocked_upd_tcb_st :: {(r,t'). if t' = t then st = BlockedOnReply r else (r,t') \ rs_blocked}" lemma replies_blocked_upd_tcb_st: - "replies_blocked (s\kheap := kheap s(t \ TCB (tcb\tcb_state := st\))\) + "replies_blocked (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := st\))\) = replies_blocked_upd_tcb_st st t (replies_blocked s)" by (fastforce simp: replies_blocked_upd_tcb_st_def replies_blocked_def st_tcb_at_def obj_at_def) diff --git a/proof/invariant-abstract/Tcb_AI.thy b/proof/invariant-abstract/Tcb_AI.thy index e30fde3f7e..1ec8de5b59 100644 --- a/proof/invariant-abstract/Tcb_AI.thy +++ b/proof/invariant-abstract/Tcb_AI.thy @@ -177,7 +177,7 @@ lemma (in Tcb_AI_1) copyreg_invs: invoke_tcb (tcb_invocation.CopyRegisters dest src susp resume frames ints arch) \\rv. invs\" apply (wpsimp simp: if_apply_def2 - wp: mapM_x_wp' suspend_invs suspend_nonz_cap_to_tcb static_imp_wp) + wp: mapM_x_wp' suspend_invs suspend_nonz_cap_to_tcb hoare_weak_lift_imp) apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_idle_def suspend_def dest!: idle_no_ex_cap) done @@ -1159,9 +1159,6 @@ lemma (in Tcb_AI) decode_set_tls_base_wf: apply wpsimp done -declare alternativeE_wp[wp] -declare alternativeE_R_wp[wp] - (*FIXME Move up*) lemma OR_choice_E_weak_wp: "\P\ f \ g \Q\,- \ \P\ OR_choice b f g \Q\,-" apply (simp add: validE_R_def validE_def OR_choice_weak_wp) diff --git a/proof/invariant-abstract/Untyped_AI.thy b/proof/invariant-abstract/Untyped_AI.thy index 54aec406c0..de448079ed 100644 --- a/proof/invariant-abstract/Untyped_AI.thy +++ b/proof/invariant-abstract/Untyped_AI.thy @@ -36,7 +36,7 @@ primrec \ 'z::state_ext state \ bool" where "valid_untyped_inv_wcap (Retype slot reset ptr_base ptr ty us slots dev) - = (\co s. \sz idx. (cte_wp_at (\c. c = (cap.UntypedCap dev ptr_base sz idx) + = (\co s. \sz idx. (cte_wp_at (\c. c = (UntypedCap dev ptr_base sz idx) \ (co = None \ co = Some c)) slot s \ range_cover ptr sz (obj_bits_api ty us) (length slots) \ (idx \ unat (ptr - ptr_base) \ (reset \ ptr = ptr_base)) @@ -162,7 +162,7 @@ lemma compute_free_index_wp: lemma dui_inv[wp]: - "\P\ decode_untyped_invocation label args slot (cap.UntypedCap dev w n idx) cs \\rv. P\" + "\P\ decode_untyped_invocation label args slot (UntypedCap dev w n idx) cs \\rv. P\" apply (simp add: decode_untyped_invocation_def whenE_def split_def data_to_obj_type_def unlessE_def split del: if_split cong: if_cong) @@ -261,11 +261,11 @@ locale Untyped_AI_arch = assumes data_to_obj_type_sp: "\P x. \P\ data_to_obj_type x \\ts (s::'state_ext state). ts \ ArchObject ASIDPoolObj \ P s\, -" assumes dui_inv_wf[wp]: - "\w sz idx slot cs label args dev.\invs and cte_wp_at ((=) (cap.UntypedCap dev w sz idx)) slot + "\w sz idx slot cs label args dev.\invs and cte_wp_at ((=) (UntypedCap dev w sz idx)) slot and (\(s::'state_ext state). \cap \ set cs. is_cnode_cap cap \ (\r\cte_refs cap (interrupt_irq_node s). ex_cte_cap_wp_to is_cnode_cap r s)) and (\s. \x \ set cs. s \ x)\ - decode_untyped_invocation label args slot (cap.UntypedCap dev w sz idx) cs + decode_untyped_invocation label args slot (UntypedCap dev w sz idx) cs \valid_untyped_inv\,-" assumes retype_ret_valid_caps_captable: "\ptr sz dev us n s.\pspace_no_overlap_range_cover ptr sz (s::'state_ext state) \ 0 < us \ range_cover ptr sz (obj_bits_api CapTableObject us) n \ ptr \ 0 @@ -542,7 +542,7 @@ end lemma cte_wp_at_range_cover: "\bits < word_bits; rv\ 2^ sz; invs s; - cte_wp_at ((=) (cap.UntypedCap dev w sz idx)) p s; + cte_wp_at ((=) (UntypedCap dev w sz idx)) p s; 0 < n; n \ unat ((2::machine_word) ^ sz - of_nat rv >> bits)\ \ range_cover (alignUp (w + of_nat rv) bits) sz bits n" apply (clarsimp simp: cte_wp_at_caps_of_state) @@ -573,7 +573,7 @@ lemma diff_neg_mask[simp]: lemma cte_wp_at_caps_descendants_range_inI: - "\ invs s;cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + "\ invs s;cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz);sz < word_bits \ \ descendants_range_in {ptr .. (ptr && ~~mask sz) + 2^sz - 1} cref s" apply (frule invs_mdb) apply (frule(1) le_mask_le_2p) @@ -723,15 +723,15 @@ lemma of_nat_shiftR: lemma valid_untypedD: - "\ s \ cap.UntypedCap dev ptr bits idx; kheap s p = Some ko; pspace_aligned s\ \ - obj_range p ko \ cap_range (cap.UntypedCap dev ptr bits idx) \ {} \ - obj_range p ko \ cap_range (cap.UntypedCap dev ptr bits idx) - \ obj_range p ko \ usable_untyped_range (cap.UntypedCap dev ptr bits idx) = {}" + "\ s \ UntypedCap dev ptr bits idx; kheap s p = Some ko; pspace_aligned s\ \ + obj_range p ko \ cap_range (UntypedCap dev ptr bits idx) \ {} \ + obj_range p ko \ cap_range (UntypedCap dev ptr bits idx) + \ obj_range p ko \ usable_untyped_range (UntypedCap dev ptr bits idx) = {}" by (clarsimp simp: valid_untyped_def valid_cap_def cap_range_def obj_range_def) (meson order_trans) lemma pspace_no_overlap_detype': - "\ s \ cap.UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ + "\ s \ UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ \ pspace_no_overlap {ptr .. ptr + 2 ^ bits - 1} (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (clarsimp simp: obj_range_def add_diff_eq[symmetric] pspace_no_overlap_def) apply (frule(2) valid_untypedD) @@ -740,7 +740,7 @@ lemma pspace_no_overlap_detype': done lemma pspace_no_overlap_detype: - "\ s \ cap.UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ + "\ s \ UntypedCap dev ptr bits idx; pspace_aligned s; valid_objs s \ \ pspace_no_overlap_range_cover ptr bits (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (drule(2) pspace_no_overlap_detype'[rotated]) apply (drule valid_cap_aligned) @@ -1382,8 +1382,8 @@ lemma set_zip_helper: lemma ex_cte_cap_protects: - "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range_in S p' s; untyped_children_in_mdb s; S\ untyped_range (cap.UntypedCap dev ptr bits idx); + "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range_in S p' s; untyped_children_in_mdb s; S\ untyped_range (UntypedCap dev ptr bits idx); valid_global_refs s \ \ fst p \ S" apply (drule ex_cte_cap_to_obj_ref_disj, erule disjE) @@ -1574,7 +1574,7 @@ crunch mdb[wp]: do_machine_op "\s. P (cdt s)" lemmas dmo_valid_cap[wp] = valid_cap_typ [OF do_machine_op_obj_at] lemma delete_objects_pspace_no_overlap[wp]: - "\\s. (\dev idx. s \ (cap.UntypedCap dev ptr bits idx)) + "\\s. (\dev idx. s \ (UntypedCap dev ptr bits idx)) \ pspace_aligned s \ valid_objs s \ (S = {ptr .. ptr + 2 ^ bits - 1})\ delete_objects ptr bits \\_. pspace_no_overlap S\" @@ -1659,7 +1659,7 @@ lemma caps_overlap_reserved_def2: lemma set_cap_valid_mdb_simple: "\\s. valid_objs s \ valid_mdb s \ descendants_range_in {ptr .. ptr+2^sz - 1} cref s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ obj_ref_of c = ptr \ cap_is_device c = dev) cref s\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s'. valid_mdb s'\" apply (simp add: valid_mdb_def) apply (rule hoare_pre) @@ -1674,8 +1674,8 @@ lemma set_cap_valid_mdb_simple: fix s f r bits dev assume obj:"valid_objs s" assume mdb:"untyped_mdb (cdt s) (caps_of_state s)" - assume cstate:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") - show "untyped_mdb (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + assume cstate:"caps_of_state s cref = Some (UntypedCap dev r bits f)" (is "?m cref = Some ?srccap") + show "untyped_mdb (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply (rule untyped_mdb_update_free_index [where capa = ?srccap and m = "caps_of_state s" and src = cref, unfolded free_index_update_def,simplified,THEN iffD2]) @@ -1683,11 +1683,11 @@ lemma set_cap_valid_mdb_simple: done assume inc: "untyped_inc (cdt s) (caps_of_state s)" assume drange: "descendants_range_in {r..r + 2 ^ bits - 1} cref s" - have untyped_range_simp: "untyped_range (cap.UntypedCap dev r bits f) = untyped_range (cap.UntypedCap dev r bits idx)" + have untyped_range_simp: "untyped_range (UntypedCap dev r bits f) = untyped_range (UntypedCap dev r bits idx)" by simp note blah[simp del] = untyped_range.simps usable_untyped_range.simps - show "untyped_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + show "untyped_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using inc cstate drange apply (unfold untyped_inc_def) apply (intro allI impI) @@ -1754,18 +1754,18 @@ lemma set_cap_valid_mdb_simple: apply simp+ done assume "ut_revocable (is_original_cap s) (caps_of_state s)" - thus "ut_revocable (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "ut_revocable (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce simp: ut_revocable_def) assume "valid_arch_mdb (is_original_cap s) (caps_of_state s)" - thus "valid_arch_mdb (is_original_cap s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + thus "valid_arch_mdb (is_original_cap s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" using cstate by (fastforce elim!: valid_arch_mdb_untypeds) assume misc: "mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" "descendants_inc (cdt s) (caps_of_state s)" - "caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" - thus "descendants_inc (cdt s) (caps_of_state s(cref \ cap.UntypedCap dev r bits idx))" + "caps_of_state s cref = Some (UntypedCap dev r bits f)" + thus "descendants_inc (cdt s) ((caps_of_state s)(cref \ UntypedCap dev r bits idx))" apply - apply (erule descendants_inc_minor) apply (clarsimp simp: swp_def cte_wp_at_caps_of_state) @@ -1780,7 +1780,7 @@ lemma set_free_index_valid_pspace_simple: \ descendants_range_in {ptr .. ptr+2^sz - 1} cref s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ obj_ref_of c = ptr) cref s \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s'. valid_pspace s'\" apply (clarsimp simp: valid_pspace_def) apply (wp set_cap_valid_objs update_cap_iflive set_cap_zombies') @@ -1813,9 +1813,9 @@ lemma set_untyped_cap_refs_respects_device_simple: lemma set_untyped_cap_caps_overlap_reserved: "\\s. invs s \ S \ {ptr..ptr + 2 ^ sz - 1} \ - usable_untyped_range (cap.UntypedCap dev ptr sz idx') \ S = {} \ - descendants_range_in S cref s \ cte_wp_at ((=) (cap.UntypedCap dev ptr sz idx)) cref s\ - set_cap (cap.UntypedCap dev ptr sz idx') cref + usable_untyped_range (UntypedCap dev ptr sz idx') \ S = {} \ + descendants_range_in S cref s \ cte_wp_at ((=) (UntypedCap dev ptr sz idx)) cref s\ + set_cap (UntypedCap dev ptr sz idx') cref \\rv s. caps_overlap_reserved S s\" apply (unfold caps_overlap_reserved_def) apply wp @@ -1940,7 +1940,7 @@ lemma descendants_range_in_subseteq: lemma cte_wp_at_pspace_no_overlapI: "\invs s; - cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz); sz < word_bits\ \ pspace_no_overlap_range_cover ptr sz s" apply (clarsimp simp: cte_wp_at_caps_of_state) @@ -1974,7 +1974,7 @@ lemma cte_wp_at_pspace_no_overlapI: lemma descendants_range_caps_no_overlapI: - "\invs s; cte_wp_at ((=) (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s; + "\invs s; cte_wp_at ((=) (UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s; descendants_range_in {ptr .. (ptr && ~~ mask sz) +2^sz - 1} cref s\ \ caps_no_overlap ptr sz s" apply (frule invs_mdb) apply (clarsimp simp: valid_mdb_def cte_wp_at_caps_of_state) @@ -2013,7 +2013,7 @@ lemma shiftr_then_mask_commute: lemma cte_wp_at_caps_no_overlapI: - "\ invs s;cte_wp_at (\c. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; + "\ invs s;cte_wp_at (\c. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s; idx \ unat (ptr && mask sz);sz < word_bits \ \ caps_no_overlap ptr sz s" apply (frule invs_mdb) apply (frule(1) le_mask_le_2p) @@ -2134,7 +2134,7 @@ lemma subset_stuff[simp]: done lemma cte_wp_at: - "cte_wp_at ((=) (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s" + "cte_wp_at ((=) (UntypedCap dev (ptr && ~~ mask sz) sz idx)) cref s" using vui by (clarsimp simp: cte_wp_at_caps_of_state) @@ -2167,7 +2167,7 @@ proof - by (rule descendants_range_in_subseteq[OF _ subset_stuff]) qed -lemma vc[simp] : "s \cap.UntypedCap dev (ptr && ~~ mask sz) sz idx" +lemma vc[simp] : "s \UntypedCap dev (ptr && ~~ mask sz) sz idx" using misc cte_wp_at apply (clarsimp simp: cte_wp_at_caps_of_state) apply (erule caps_of_state_valid) @@ -2248,7 +2248,7 @@ lemma slots_invD: "\x. x \ set slots \ done lemma usable_range_disjoint: - "usable_untyped_range (cap.UntypedCap dev (ptr && ~~ mask sz) sz + "usable_untyped_range (UntypedCap dev (ptr && ~~ mask sz) sz (unat ((ptr && mask sz) + of_nat (length slots) * 2 ^ obj_bits_api tp us))) \ {ptr..ptr + of_nat (length slots) * 2 ^ obj_bits_api tp us - 1} = {}" proof - @@ -2270,7 +2270,7 @@ lemma usable_range_disjoint: qed lemma detype_locale:"ptr && ~~ mask sz = ptr - \ detype_locale (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s" + \ detype_locale (UntypedCap dev (ptr && ~~ mask sz) sz idx) cref s" using cte_wp_at descendants_range misc by (simp add:detype_locale_def descendants_range_def2 blah invs_untyped_children) @@ -2345,9 +2345,9 @@ crunch tcb[wp]: create_cap "tcb_at t" lemma valid_untyped_cap_inc: - "\s \ cap.UntypedCap dev (ptr&&~~ mask sz) sz idx; + "\s \ UntypedCap dev (ptr&&~~ mask sz) sz idx; idx \ unat (ptr && mask sz); range_cover ptr sz sb n\ - \ s \ cap.UntypedCap dev (ptr && ~~ mask sz) sz + \ s \ UntypedCap dev (ptr && ~~ mask sz) sz (unat ((ptr && mask sz) + of_nat n * 2 ^ sb))" apply (clarsimp simp: valid_cap_def cap_aligned_def valid_untyped_def simp del: usable_untyped_range.simps) apply (intro conjI allI impI) @@ -2370,8 +2370,8 @@ lemma valid_untyped_cap_inc: (* FIXME: move maybe *) lemma tcb_cap_valid_untyped_cong: - "tcb_cap_valid (cap.UntypedCap dev1 a1 b1 c) = - tcb_cap_valid (cap.UntypedCap dev2 a2 b2 c2)" + "tcb_cap_valid (UntypedCap dev1 a1 b1 c) = + tcb_cap_valid (UntypedCap dev2 a2 b2 c2)" apply (rule ext)+ apply (clarsimp simp:tcb_cap_valid_def valid_ipc_buffer_cap_def split:option.splits) apply (simp add: tcb_cap_cases_def is_reply_cap_def @@ -2380,7 +2380,7 @@ lemma tcb_cap_valid_untyped_cong: done lemma tcb_cap_valid_untyped_to_thread: - "tcb_cap_valid (cap.UntypedCap dev a1 b1 c) = + "tcb_cap_valid (UntypedCap dev a1 b1 c) = tcb_cap_valid (cap.ThreadCap 0)" apply (rule ext)+ apply (clarsimp simp:tcb_cap_valid_def valid_ipc_buffer_cap_def split:option.splits) @@ -2410,9 +2410,9 @@ lemma ex_nonz_cap_to_overlap: lemma detype_valid_untyped: - "\invs s; detype S s \ cap.UntypedCap dev ptr sz idx1; + "\invs s; detype S s \ UntypedCap dev ptr sz idx1; {ptr .. ptr + 2 ^ sz - 1} \ S; idx2 \ 2 ^ sz\ - \ detype S s \ cap.UntypedCap dev ptr sz idx2" + \ detype S s \ UntypedCap dev ptr sz idx2" apply (clarsimp simp: detype_def valid_cap_def valid_untyped_def cap_aligned_def) apply (drule_tac x = p in spec) apply clarsimp @@ -2626,7 +2626,7 @@ lemmas unat_of_nat_word_bits = unat_of_nat_eq[where 'a = machine_word_len, unfolded word_bits_len_of, simplified] lemma caps_of_state_pspace_no_overlapD: - "\ caps_of_state s cref = Some (cap.UntypedCap dev ptr sz idx); invs s; + "\ caps_of_state s cref = Some (UntypedCap dev ptr sz idx); invs s; idx < 2 ^ sz \ \ pspace_no_overlap_range_cover (ptr + of_nat idx) sz s" apply (frule(1) caps_of_state_valid) @@ -2647,7 +2647,7 @@ lemma set_untyped_cap_invs_simple: \ pspace_no_overlap_range_cover ptr sz s \ invs s \ cte_wp_at (\c. is_untyped_cap c \ cap_bits c = sz \ cap_is_device c = dev\ obj_ref_of c = ptr) cref s \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv s. invs s\" apply (rule hoare_name_pre_state) apply (clarsimp simp:cte_wp_at_caps_of_state invs_def valid_state_def) @@ -3068,11 +3068,6 @@ lemma create_cap_ex_cap_to[wp]: apply (clarsimp elim!: cte_wp_at_weakenE) done -(* FIXME: move *) -lemma hoare_vcg_split_lift[wp]: - "\P\ f x y \Q\ \ \P\ case (x, y) of (a, b) \ f a b \Q\" - by simp - lemma create_cap_no_cap[wp]: "\\s. (\p'. \ cte_wp_at P p' s) \ \ P (default_cap tp oref sz dev)\ create_cap tp sz p dev (cref, oref) @@ -3315,8 +3310,8 @@ lemma retype_region_refs_distinct[wp]: lemma unsafe_protected: - "\ cte_wp_at P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range_in S p' s; invs s; S \ untyped_range (cap.UntypedCap dev ptr bits idx); + "\ cte_wp_at P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range_in S p' s; invs s; S \ untyped_range (UntypedCap dev ptr bits idx); \cap. P cap \ cap \ cap.NullCap \ \ fst p \ S" apply (rule ex_cte_cap_protects) @@ -3328,8 +3323,8 @@ lemma unsafe_protected: done lemma cap_to_protected: - "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (cap.UntypedCap dev ptr bits idx)) p' s; - descendants_range (cap.UntypedCap dev ptr bits idx) p' s; invs s \ + "\ ex_cte_cap_wp_to P p s; cte_wp_at ((=) (UntypedCap dev ptr bits idx)) p' s; + descendants_range (UntypedCap dev ptr bits idx) p' s; invs s \ \ ex_cte_cap_wp_to P p (detype {ptr .. ptr + 2 ^ bits - 1} s)" apply (clarsimp simp: ex_cte_cap_wp_to_def, simp add: detype_def descendants_range_def2) apply (intro exI conjI, assumption) @@ -3559,14 +3554,14 @@ lemma invoke_untyp_invs': assumes init_arch_Q: "\tp slot reset sz slots ptr us refs dev. ui = Invocations_A.Retype slot reset (ptr && ~~ mask sz) ptr tp us slots dev \ \Q and post_retype_invs tp refs - and cte_wp_at (\c. \idx. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) slot + and cte_wp_at (\c. \idx. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) slot and K (refs = retype_addrs ptr tp (length slots) us \ range_cover ptr sz (obj_bits_api tp us) (length slots))\ init_arch_objects tp ptr (length slots) us refs \\_. Q\" assumes retype_region_Q: "\ptr us tp slot reset sz slots dev. ui = Invocations_A.Retype slot reset (ptr && ~~ mask sz) ptr tp us slots dev \ \\s. invs s \ Q s - \ cte_wp_at (\c. \idx. c = cap.UntypedCap dev (ptr && ~~ mask sz) sz idx) slot s + \ cte_wp_at (\c. \idx. c = UntypedCap dev (ptr && ~~ mask sz) sz idx) slot s \ pspace_no_overlap {ptr..(ptr && ~~ mask sz) + (2 ^ sz - 1)} s \ range_cover ptr sz (obj_bits_api tp us) (length slots) \ (tp = CapTableObject \ 0 < us) @@ -3580,7 +3575,7 @@ lemma invoke_untyp_invs': \ (case ui of Invocations_A.Retype slot reset ptr' ptr tp us slots dev' \ cref = slot \ dev' = dev) \ idx \ 2^ sz\ - set_cap (cap.UntypedCap dev ptr sz idx) cref + set_cap (UntypedCap dev ptr sz idx) cref \\rv. Q\" assumes reset_Q: "\Q'\ reset_untyped_cap (case ui of Retype src_slot _ _ _ _ _ _ _ \ src_slot) \\_. Q\" shows @@ -3623,7 +3618,7 @@ lemma invoke_untyp_invs': note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] note set_cap_free_index_invs_spec = set_free_index_invs[where - cap = "cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)", + cap = "UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)", unfolded free_index_update_def free_index_of_def,simplified] have slot_not_in: "(cref, oref) \ set slots" @@ -3853,7 +3848,7 @@ lemma update_untyped_cap_valid_objs: lemma valid_untyped_pspace_no_overlap: "pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} s - \ valid_untyped (cap.UntypedCap dev ptr sz idx) s" + \ valid_untyped (UntypedCap dev ptr sz idx) s" apply (clarsimp simp: valid_untyped_def split del: if_split) apply (drule(1) pspace_no_overlap_obj_range) apply simp diff --git a/proof/invariant-abstract/VSpaceEntries_AI.thy b/proof/invariant-abstract/VSpaceEntries_AI.thy index 785ad0e407..8b38b532ff 100644 --- a/proof/invariant-abstract/VSpaceEntries_AI.thy +++ b/proof/invariant-abstract/VSpaceEntries_AI.thy @@ -164,25 +164,8 @@ lemma mapME_x_wp2: lemmas mapME_x_wp_inv = mapME_x_wp2[where S=UNIV, simplified] -lemma hoare_vcg_all_liftE: - "\ \x. \P x\ f \Q x\,\E\ \ \ \\s. \x. P x s\ f \\rv s. \x. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - -lemma hoare_vcg_const_Ball_liftE: - "\ \x. x \ S \ \P x\ f \Q x\,\E\; \\s. True\ f \\r s. True\, \E\ \ \ \\s. \x\S. P x s\ f \\rv s. \x\S. Q x rv s\,\E\" - by (fastforce simp: validE_def valid_def split: sum.splits) - lemmas hoare_post_conjE = hoare_validE_pred_conj (* FIXME: eliminate *) -lemma hoare_vcg_conj_liftE: (* FIXME: move *) - assumes x: "\P\ f \Q\,\E\" - assumes y: "\P'\ f \Q'\,\E\" - shows "\\s. P s \ P' s\ f \\rv s. Q rv s \ Q' rv s\,\E\" - apply (subst pred_conj_def[symmetric], subst pred_conj_def[symmetric], rule hoare_post_conjE) - apply (rule hoare_vcg_precond_impE [OF x], simp) - apply (rule hoare_vcg_precond_impE [OF y], simp) - done - lemma mapME_x_accumulate_checks: assumes P: "\x. x \ set xs \ \Q\ f x \\rv. P x\, \E\" and Q : "\x. x \ set xs \ \Q\ f x \\rv. Q\, \E\" @@ -199,7 +182,7 @@ lemma mapME_x_accumulate_checks: show ?case apply (simp add: mapME_x_Cons) apply wp - apply (rule hoare_vcg_conj_liftE) + apply (rule hoare_vcg_conj_liftE_weaker) apply (wp mapME_x_wp' P P' hoare_vcg_const_Ball_liftE | simp add:Q @@ -213,7 +196,7 @@ lemma mapME_x_accumulate_checks: using Cons.prems apply fastforce apply (rule hoare_pre) - apply (rule hoare_vcg_conj_liftE) + apply (rule hoare_vcg_conj_liftE_weaker) apply (wp Cons.prems| simp)+ done qed diff --git a/proof/invariant-abstract/X64/ArchAcc_AI.thy b/proof/invariant-abstract/X64/ArchAcc_AI.thy index 26b6e3d504..1a1745e2f2 100644 --- a/proof/invariant-abstract/X64/ArchAcc_AI.thy +++ b/proof/invariant-abstract/X64/ArchAcc_AI.thy @@ -1210,17 +1210,16 @@ lemma valid_machine_stateE: lemma in_user_frame_same_type_upd: "\typ_at type p s; type = a_type obj; in_user_frame q s\ - \ in_user_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_user_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_user_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) done lemma valid_machine_state_heap_updI: -assumes vm : "valid_machine_state s" -assumes tyat : "typ_at type p s" -shows - " a_type obj = type \ valid_machine_state (s\kheap := kheap s(p \ obj)\)" + assumes vm : "valid_machine_state s" + assumes tyat : "typ_at type p s" + shows "a_type obj = type \ valid_machine_state (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: valid_machine_state_def) subgoal for p apply (rule valid_machine_stateE[OF vm,where p = p]) @@ -1355,7 +1354,7 @@ lemma vs_ref_lvl_obj_same_type: lemma valid_vspace_obj_kheap_upd: "\typ_at (a_type (ArchObj obj)) ptr s; valid_vspace_obj ao s\ - \ valid_vspace_obj ao (s\kheap := kheap s(ptr \ ArchObj obj)\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(ptr \ ArchObj obj)\)" apply (cases ao, simp_all) apply (fastforce simp: a_type_simps obj_at_def valid_pte_def)+ apply (clarsimp) @@ -1421,7 +1420,7 @@ lemma set_object_valid_vspace_objs[wp]: apply simp apply simp apply (rule vs_lookup1_wellformed.wellformed_lookup_axioms - [where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s,simplified]) + [where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s,simplified]) apply (clarsimp simp: obj_at_def cong:vs_ref_lvl_obj_same_type) apply clarsimp apply (rule valid_vspace_obj_kheap_upd) @@ -1486,7 +1485,7 @@ lemma set_object_valid_vs_lookup[wp]: apply simp apply simp apply (rule vs_lookup_pages1_wellformed.wellformed_lookup_axioms - [where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s, simplified]) + [where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s, simplified]) apply (clarsimp simp: obj_at_def cong:vs_ref_lvl_obj_same_type) apply (clarsimp simp: fun_upd_def) apply (subst caps_of_state_after_update) @@ -1591,7 +1590,7 @@ lemma valid_global_refsD: lemma in_device_frame_same_type_upd: "\typ_at type p s; type = a_type obj ; in_device_frame q s\ - \ in_device_frame q (s\kheap := kheap s(p \ obj)\)" + \ in_device_frame q (s\kheap := (kheap s)(p \ obj)\)" apply (clarsimp simp: in_device_frame_def obj_at_def) apply (rule_tac x=sz in exI) apply (auto simp: a_type_simps) @@ -1642,7 +1641,7 @@ lemma vs_lookup_pages_pt_eq: lemma valid_vspace_obj_same_type: "\valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\ - \ valid_vspace_obj ao (s\kheap := kheap s(p \ ko')\)" + \ valid_vspace_obj ao (s\kheap := (kheap s)(p \ ko')\)" apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ]) by (auto simp: obj_at_def) @@ -2800,7 +2799,7 @@ lemma cap_refs_respects_device_region_dmo: lemma machine_op_lift_device_state[wp]: "\\ms. P (device_state ms)\ machine_op_lift f \\_ ms. P (device_state ms)\" - by (clarsimp simp: machine_op_lift_def NonDetMonadVCG.valid_def bind_def + by (clarsimp simp: machine_op_lift_def Nondet_VCG.valid_def bind_def machine_rest_lift_def gets_def simpler_modify_def get_def return_def select_def ignore_failure_def select_f_def split: if_splits) diff --git a/proof/invariant-abstract/X64/ArchArch_AI.thy b/proof/invariant-abstract/X64/ArchArch_AI.thy index 92c9e31146..1a19b99356 100644 --- a/proof/invariant-abstract/X64/ArchArch_AI.thy +++ b/proof/invariant-abstract/X64/ArchArch_AI.thy @@ -286,7 +286,7 @@ locale asid_update = Arch + fixes ap asid s s' assumes ko: "ko_at (ArchObj (ASIDPool Map.empty)) ap s" assumes empty: "x64_asid_table (arch_state s) asid = None" - defines "s' \ s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\" + defines "s' \ s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\" context asid_update begin @@ -402,7 +402,7 @@ context Arch begin global_naming X64 lemma valid_arch_state_strg: "valid_arch_state s \ ap \ ran (x64_asid_table (arch_state s)) \ asid_pool_at ap s \ - valid_arch_state (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_arch_state (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply (clarsimp simp: valid_arch_state_def) apply (clarsimp simp: valid_asid_table_def ran_def) apply (fastforce intro!: inj_on_fun_updI) @@ -416,7 +416,7 @@ lemma valid_vs_lookup_at_upd_strg: (\ptr cap. caps_of_state s ptr = Some cap \ ap \ obj_refs cap \ vs_cap_ref cap = Some [VSRef (ucast asid) None]) \ - valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -489,7 +489,7 @@ lemma valid_table_caps_asid_upd [iff]: lemma vs_asid_ref_upd: "([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') - (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\) + (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\) = (if asid_high_bits_of asid' = asid_high_bits_of asid then ap' = ap else ([VSRef (ucast (asid_high_bits_of asid')) None] \ ap') s)" @@ -514,7 +514,7 @@ lemma cap_insert_simple_arch_caps_ap: and K (cap = ArchObjectCap (ASIDPoolCap ap asid)) \ cap_insert cap src dest \\rv s. valid_arch_caps (s\arch_state := arch_state s - \x64_asid_table := x64_asid_table (arch_state s)(asid_high_bits_of asid \ ap)\\)\" + \x64_asid_table := (x64_asid_table (arch_state s))(asid_high_bits_of asid \ ap)\\)\" apply (simp add: cap_insert_def update_cdt_def set_cdt_def valid_arch_caps_def set_untyped_cap_as_full_def bind_assoc) apply (strengthen valid_vs_lookup_at_upd_strg) @@ -526,7 +526,7 @@ lemma cap_insert_simple_arch_caps_ap: hoare_vcg_disj_lift set_cap_reachable_pg_cap set_cap.vs_lookup_pages | clarsimp)+ apply (wp set_cap_arch_obj set_cap_valid_table_caps hoare_vcg_ball_lift - get_cap_wp static_imp_wp)+ + get_cap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) apply (rule conjI) apply (clarsimp simp: vs_cap_ref_def) @@ -547,7 +547,7 @@ lemma valid_asid_map_asid_upd_strg: "valid_asid_map s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_asid_map (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_asid_map (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -560,7 +560,7 @@ lemma valid_vspace_objs_asid_upd_strg: "valid_vspace_objs s \ ko_at (ArchObj (ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" apply clarsimp apply (subgoal_tac "asid_update ap asid s") prefer 2 @@ -573,7 +573,7 @@ lemma valid_global_objs_asid_upd_strg: "valid_global_objs s \ ko_at (ArchObj (arch_kernel_obj.ASIDPool Map.empty)) ap s \ x64_asid_table (arch_state s) asid = None \ - valid_global_objs (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(asid \ ap)\\)" + valid_global_objs (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(asid \ ap)\\)" by clarsimp lemma safe_parent_cap_is_device: @@ -604,7 +604,7 @@ lemma cap_insert_ap_invs: x64_asid_table (arch_state s) (asid_high_bits_of asid) = None)\ cap_insert cap src dest \\rv s. invs (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s(asid_high_bits_of asid \ ap)\\)\" + \x64_asid_table := ((x64_asid_table \ arch_state) s)(asid_high_bits_of asid \ ap)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def) apply (strengthen valid_arch_state_strg valid_vspace_objs_asid_upd_strg valid_asid_map_asid_upd_strg ) @@ -758,11 +758,11 @@ proof - \\rv s. invs (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s + \x64_asid_table := ((x64_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\) \ Q (s\arch_state := arch_state s - \x64_asid_table := (x64_asid_table \ arch_state) s + \x64_asid_table := ((x64_asid_table \ arch_state) s) (asid_high_bits_of asid \ ap)\\)\" apply (wp cap_insert_ap_invs) apply simp @@ -1011,7 +1011,7 @@ lemma create_mapping_entries_inv [wp]: crunch_ignore (add: select_ext) crunch inv [wp]: arch_decode_invocation "P" - (wp: crunch_wps select_wp select_ext_weak_wp simp: crunch_simps) + (wp: crunch_wps select_ext_weak_wp simp: crunch_simps) lemma create_mappings_empty [wp]: @@ -1576,7 +1576,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: arch_decode_invocation_def Let_def split_def cong: if_cong split del: if_split) apply (rule hoare_pre) - apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_wp select_ext_weak_wp + apply ((wp whenE_throwError_wp check_vp_wpR ensure_empty_stronger select_ext_weak_wp | wpc | simp add: valid_arch_inv_def valid_apinv_def)+)[1] apply (simp add: valid_arch_inv_def valid_apinv_def) apply (intro allI impI ballI) @@ -1620,7 +1620,7 @@ lemma arch_decode_inv_wf[wp]: apply (simp add: lookup_target_slot_def) apply wp apply (clarsimp simp: cte_wp_at_def asid_wf_high) - apply (wp ensure_no_children_sp select_ext_weak_wp select_wp whenE_throwError_wp | wpc | simp)+ + apply (wp ensure_no_children_sp select_ext_weak_wp whenE_throwError_wp | wpc | simp)+ apply clarsimp apply (rule conjI, fastforce) apply (cases excaps, simp) diff --git a/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy b/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy index f9b005b6cc..7dd7439a5a 100644 --- a/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy +++ b/proof/invariant-abstract/X64/ArchCNodeInv_AI.thy @@ -556,7 +556,7 @@ context Arch begin global_naming X64 lemma post_cap_delete_pre_is_final_cap': "\rv s'' rva s''a s. \valid_ioports s; caps_of_state s slot = Some cap; is_final_cap' cap s; cap_cleanup_opt cap \ NullCap\ - \ post_cap_delete_pre (cap_cleanup_opt cap) (caps_of_state s(slot \ NullCap))" + \ post_cap_delete_pre (cap_cleanup_opt cap) ((caps_of_state s)(slot \ NullCap))" apply (clarsimp simp: cap_cleanup_opt_def cte_wp_at_def post_cap_delete_pre_def split: cap.split_asm if_split_asm elim!: ranE dest!: caps_of_state_cteD) @@ -646,7 +646,7 @@ next apply (rule "2.hyps"[simplified rec_del_call.simps slot_rdcall.simps conj_assoc], assumption+) apply (simp add: cte_wp_at_eq_simp | wp replace_cap_invs set_cap_sets final_cap_same_objrefs - set_cap_cte_cap_wp_to static_imp_wp + set_cap_cte_cap_wp_to hoare_weak_lift_imp | erule finalise_cap_not_reply_master)+ apply (wp hoare_vcg_const_Ball_lift)+ apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy b/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy index bee60b2a66..2353cdb6d8 100644 --- a/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy +++ b/proof/invariant-abstract/X64/ArchCSpacePre_AI.thy @@ -127,7 +127,7 @@ lemma masked_as_full_test_function_stuff[simp]: lemma same_aobject_as_commute: "same_aobject_as x y \ same_aobject_as y x" - by (cases x; cases y; clarsimp simp: same_aobject_as_def) + by (cases x; cases y; clarsimp) lemmas wellformed_cap_simps = wellformed_cap_def [simplified wellformed_acap_def, split_simps cap.split arch_cap.split] @@ -175,7 +175,7 @@ lemma valid_arch_mdb_simple: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_simple_cap cap; caps_of_state s src = Some capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_revocable_def arch_is_cap_revocable_def is_simple_cap_def safe_parent_for_def is_cap_simps) @@ -217,14 +217,14 @@ lemma set_untyped_cap_as_full_valid_arch_mdb: lemma valid_arch_mdb_not_arch_cap_update: "\s cap capa. \\is_arch_cap cap; valid_arch_mdb (is_original_cap s) (caps_of_state s)\ \ valid_arch_mdb ((is_original_cap s)(dest := True)) - (caps_of_state s(src \ cap, dest\capa))" + ((caps_of_state s)(src \ cap, dest\capa))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps) lemma valid_arch_mdb_derived_cap_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); is_derived (cdt s) src cap capa\ \ valid_arch_mdb ((is_original_cap s)(dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap))" + ((caps_of_state s)(dest \ cap))" apply (clarsimp simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps is_cap_revocable_def arch_is_cap_revocable_def) by (clarsimp simp: is_derived_def is_cap_simps is_derived_arch_def split: if_split_asm) @@ -233,7 +233,7 @@ lemma valid_arch_mdb_free_index_update': "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; is_untyped_cap cap\ \ valid_arch_mdb ((is_original_cap s) (dest := is_cap_revocable cap capa)) - (caps_of_state s(dest \ cap, src \ max_free_index_update capa))" + ((caps_of_state s)(dest \ cap, src \ max_free_index_update capa))" by (auto simp: valid_arch_mdb_def ioport_revocable_def is_cap_simps is_cap_revocable_def arch_is_cap_revocable_def free_index_update_def split: cap.splits) @@ -247,7 +247,7 @@ lemma valid_arch_mdb_weak_derived_update: "\s capa. \valid_arch_mdb (is_original_cap s) (caps_of_state s); caps_of_state s src = Some capa; weak_derived cap capa\ \ valid_arch_mdb ((is_original_cap s) (dest := is_original_cap s src, src := False)) - (caps_of_state s(dest \ cap, src \ NullCap))" + ((caps_of_state s)(dest \ cap, src \ NullCap))" by (auto simp: valid_arch_mdb_def ioport_revocable_def split: if_split_asm simp del: split_paired_All) @@ -255,7 +255,7 @@ lemma valid_arch_mdb_weak_derived_update: lemma valid_arch_mdb_tcb_cnode_update: "valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb ((is_original_cap s) ((t, tcb_cnode_index 2) := True)) - (caps_of_state s((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" + ((caps_of_state s)((t, tcb_cnode_index 2) \ ReplyCap t True canReplyGrant))" by (clarsimp simp: valid_arch_mdb_def ioport_revocable_def) lemmas valid_arch_mdb_updates = valid_arch_mdb_free_index_update valid_arch_mdb_not_arch_cap_update @@ -295,10 +295,10 @@ lemma valid_arch_mdb_null_filter: lemma valid_arch_mdb_untypeds: "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (\x. x \ cref \ is_original_cap s x) - (caps_of_state s(cref \ default_cap tp oref sz dev))" + ((caps_of_state s)(cref \ default_cap tp oref sz dev))" "\s. valid_arch_mdb (is_original_cap s) (caps_of_state s) \ valid_arch_mdb (is_original_cap s) - (caps_of_state s(cref \ UntypedCap dev ptr sz idx))" + ((caps_of_state s)(cref \ UntypedCap dev ptr sz idx))" by (clarsimp simp: valid_arch_mdb_def ioport_revocable_def)+ lemma same_object_as_ioports: diff --git a/proof/invariant-abstract/X64/ArchCSpace_AI.thy b/proof/invariant-abstract/X64/ArchCSpace_AI.thy index 2e0a2921cf..06e159f149 100644 --- a/proof/invariant-abstract/X64/ArchCSpace_AI.thy +++ b/proof/invariant-abstract/X64/ArchCSpace_AI.thy @@ -186,20 +186,20 @@ lemma is_derived_is_cap: (* FIXME: move to CSpace_I near lemma vs_lookup1_tcb_update *) lemma vs_lookup_pages1_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) (* FIXME: move to CSpace_I near lemma vs_lookup_tcb_update *) lemma vs_lookup_pages_tcb_update: "kheap s p = Some (TCB t) \ - vs_lookup_pages (s\kheap := kheap s(p \ TCB t')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ TCB t')\) = vs_lookup_pages s" by (clarsimp simp add: vs_lookup_pages_def vs_lookup_pages1_tcb_update) (* FIXME: move to CSpace_I near lemma vs_lookup1_cnode_update *) lemma vs_lookup_pages1_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ CNode m cs')\) = + vs_lookup_pages1 (s\kheap := (kheap s)(p \ CNode m cs')\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) @@ -207,7 +207,7 @@ lemma vs_lookup_pages1_cnode_update: (* FIXME: move to CSpace_I near lemma vs_lookup_cnode_update *) lemma vs_lookup_pages_cnode_update: "kheap s p = Some (CNode n cs) \ - vs_lookup_pages (s\kheap := kheap s(p \ CNode n cs')\) = vs_lookup_pages s" + vs_lookup_pages (s\kheap := (kheap s)(p \ CNode n cs')\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def dest!: vs_lookup_pages1_cnode_update[where m=n and cs'=cs']) diff --git a/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy b/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy index 30f448c6a4..760b927c57 100644 --- a/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy +++ b/proof/invariant-abstract/X64/ArchDetSchedAux_AI.thy @@ -99,9 +99,9 @@ crunch ct[wp]: perform_asid_control_invocation "\s. P (cur_thread s)" crunch idle_thread[wp]: perform_asid_control_invocation "\s. P (idle_thread s)" -crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: static_imp_wp) +crunch valid_etcbs[wp]: perform_asid_control_invocation valid_etcbs (wp: hoare_weak_lift_imp) -crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: static_imp_wp) +crunch valid_blocked[wp]: perform_asid_control_invocation valid_blocked (wp: hoare_weak_lift_imp) crunch schedact[wp]: perform_asid_control_invocation "\s :: det_ext state. P (scheduler_action s)" (wp: crunch_wps simp: detype_def detype_ext_def wrap_ext_det_ext_ext_def cap_insert_ext_def ignore: freeMemory) diff --git a/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy b/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy index 5f8793a5b8..579035e2a1 100644 --- a/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy +++ b/proof/invariant-abstract/X64/ArchDetSchedSchedule_AI.thy @@ -243,7 +243,7 @@ lemma flush_table_simple_sched_action[wp]: "\simple_sched_action\valid_sched\ flush_table a b c d \\rv. valid_sched\" diff --git a/proof/invariant-abstract/X64/ArchFinalise_AI.thy b/proof/invariant-abstract/X64/ArchFinalise_AI.thy index b8799f4e05..13c7f7874a 100644 --- a/proof/invariant-abstract/X64/ArchFinalise_AI.thy +++ b/proof/invariant-abstract/X64/ArchFinalise_AI.thy @@ -665,7 +665,7 @@ lemma flush_table_pred_tcb_at: "\\s. pred_tcb_at proj P t s\s. P (interrupt_irq_node s)" - (wp: crunch_wps select_wp simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch pred_tcb_at[wp]: arch_finalise_cap "pred_tcb_at proj P t" (simp: crunch_simps set_arch_obj_simps wp: crunch_wps set_aobject_pred_tcb_at @@ -1369,7 +1369,7 @@ lemma set_asid_pool_obj_at_ptr: lemma valid_arch_state_table_strg: "valid_arch_state s \ asid_pool_at p s \ Some p \ x64_asid_table (arch_state s) ` (dom (x64_asid_table (arch_state s)) - {x}) \ - valid_arch_state (s\arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\)" + valid_arch_state (s\arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\)" apply (clarsimp simp: valid_arch_state_def valid_asid_table_def ran_def) apply (rule conjI, fastforce) apply (erule inj_on_fun_upd_strongerI) @@ -1402,8 +1402,8 @@ lemma vs_lookup1_arch [simp]: lemma vs_lookup_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (erule vs_lookupE) apply clarsimp @@ -1435,8 +1435,8 @@ lemma vs_lookup_empty_table: lemma vs_lookup_pages_empty_table: "(rs \ q) - (s\kheap := kheap s(p \ ArchObj (ASIDPool Map.empty)), - arch_state := arch_state s\x64_asid_table := x64_asid_table (arch_state s)(x \ p)\\) \ + (s\kheap := (kheap s)(p \ ArchObj (ASIDPool Map.empty)), + arch_state := arch_state s\x64_asid_table := (x64_asid_table (arch_state s))(x \ p)\\) \ (rs \ q) s \ (rs = [VSRef (ucast x) None] \ q = p)" apply (subst (asm) vs_lookup_pages_def) apply (clarsimp simp: Image_def) @@ -1471,7 +1471,7 @@ lemma set_asid_pool_empty_table_objs: set_asid_pool p Map.empty \\rv s. valid_vspace_objs (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of word2 \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of word2 \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vspace_objs_def @@ -1496,7 +1496,7 @@ lemma set_asid_pool_empty_table_lookup: set_asid_pool p Map.empty \\rv s. valid_vs_lookup (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: set_asid_pool_def set_object_def) apply (wp get_object_wp) apply (clarsimp simp: obj_at_def valid_vs_lookup_def @@ -1515,7 +1515,7 @@ lemma set_asid_pool_empty_table_lookup: lemma valid_ioports_asid_table_upd[iff]: "valid_ioports (s\arch_state := arch_state s - \x64_asid_table := x64_asid_table (arch_state s) + \x64_asid_table := (x64_asid_table (arch_state s)) (asid_high_bits_of base \ p)\\) = valid_ioports s" by (clarsimp simp: valid_ioports_def all_ioports_issued_def issued_ioports_def) @@ -1526,7 +1526,7 @@ lemma set_asid_pool_invs_table: \ (\p'. \ ([VSRef (ucast (asid_high_bits_of base)) None] \ p') s)\ set_asid_pool p Map.empty \\x s. invs (s\arch_state := arch_state s\x64_asid_table := - x64_asid_table (arch_state s)(asid_high_bits_of base \ p)\\)\" + (x64_asid_table (arch_state s))(asid_high_bits_of base \ p)\\)\" apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def valid_asid_map_def) apply (wp valid_irq_node_typ set_asid_pool_typ_at set_asid_pool_empty_table_objs valid_ioports_lift diff --git a/proof/invariant-abstract/X64/ArchIpc_AI.thy b/proof/invariant-abstract/X64/ArchIpc_AI.thy index ea5bd40c0d..cbbebda211 100644 --- a/proof/invariant-abstract/X64/ArchIpc_AI.thy +++ b/proof/invariant-abstract/X64/ArchIpc_AI.thy @@ -319,7 +319,7 @@ lemma transfer_caps_non_null_cte_wp_at: unfolding transfer_caps_def apply simp apply (rule hoare_pre) - apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at static_imp_wp + apply (wp hoare_vcg_ball_lift transfer_caps_loop_cte_wp_at hoare_weak_lift_imp | wpc | clarsimp simp:imp)+ apply (rule hoare_strengthen_post [where Q="\rv s'. (cte_wp_at ((\) cap.NullCap) ptr) s' @@ -435,7 +435,7 @@ lemma do_ipc_transfer_respects_device_region[Ipc_AI_cont_assms]: apply (rule hoare_drop_imps) apply wp apply (subst ball_conj_distrib) - apply (wp get_rs_cte_at2 thread_get_wp static_imp_wp grs_distinct + apply (wp get_rs_cte_at2 thread_get_wp hoare_weak_lift_imp grs_distinct hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_conj_lift | simp)+ apply (rule hoare_strengthen_post[where Q = "\r s. cap_refs_respects_device_region s \ valid_objs s \ valid_mdb s \ obj_at (\ko. \tcb. ko = TCB tcb) t s"]) @@ -465,7 +465,7 @@ lemma valid_arch_mdb_cap_swap: \ valid_arch_mdb ((is_original_cap s) (a := is_original_cap s b, b := is_original_cap s a)) - (caps_of_state s(a \ c', b \ c))" + ((caps_of_state s)(a \ c', b \ c))" apply (clarsimp simp: valid_arch_mdb_def ioport_revocable_def simp del: split_paired_All) apply (intro conjI impI allI) apply (simp del: split_paired_All) diff --git a/proof/invariant-abstract/X64/ArchKHeap_AI.thy b/proof/invariant-abstract/X64/ArchKHeap_AI.thy index 80aabdb69c..8536e57d88 100644 --- a/proof/invariant-abstract/X64/ArchKHeap_AI.thy +++ b/proof/invariant-abstract/X64/ArchKHeap_AI.thy @@ -835,20 +835,20 @@ crunch device_state_inv: storeWord "\ms. P (device_state ms)" (* some hyp_ref invariants *) lemma state_hyp_refs_of_ep_update: "\s ep val. typ_at AEndpoint ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Endpoint val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Endpoint val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_ntfn_update: "\s ep val. typ_at ANTFN ep s \ - state_hyp_refs_of (s\kheap := kheap s(ep \ Notification val)\) = state_hyp_refs_of s" + state_hyp_refs_of (s\kheap := (kheap s)(ep \ Notification val)\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def hyp_refs_of_def) done lemma state_hyp_refs_of_tcb_bound_ntfn_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_bound_notification := ntfn\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -856,7 +856,7 @@ lemma state_hyp_refs_of_tcb_bound_ntfn_update: lemma state_hyp_refs_of_tcb_state_update: "kheap s t = Some (TCB tcb) \ - state_hyp_refs_of (s\kheap := kheap s(t \ TCB (tcb\tcb_state := ts\))\) + state_hyp_refs_of (s\kheap := (kheap s)(t \ TCB (tcb\tcb_state := ts\))\) = state_hyp_refs_of s" apply (rule all_ext) apply (clarsimp simp add: state_hyp_refs_of_def obj_at_def split: option.splits) @@ -864,7 +864,7 @@ lemma state_hyp_refs_of_tcb_state_update: lemma arch_valid_obj_same_type: "\ arch_valid_obj ao s; kheap s p = Some ko; a_type k = a_type ko \ - \ arch_valid_obj ao (s\kheap := kheap s(p \ k)\)" + \ arch_valid_obj ao (s\kheap := (kheap s)(p \ k)\)" by (induction ao rule: arch_kernel_obj.induct; clarsimp simp: typ_at_same_type) @@ -878,7 +878,7 @@ lemma default_tcb_not_live: "\ live (TCB default_tcb)" lemma valid_arch_tcb_same_type: "\ valid_arch_tcb t s; valid_obj p k s; kheap s p = Some ko; a_type k = a_type ko \ - \ valid_arch_tcb t (s\kheap := kheap s(p \ k)\)" + \ valid_arch_tcb t (s\kheap := (kheap s)(p \ k)\)" by (auto simp: valid_arch_tcb_def obj_at_def) lemma valid_ioports_lift: diff --git a/proof/invariant-abstract/X64/ArchRetype_AI.thy b/proof/invariant-abstract/X64/ArchRetype_AI.thy index d6a94df8c8..1ae5748c44 100644 --- a/proof/invariant-abstract/X64/ArchRetype_AI.thy +++ b/proof/invariant-abstract/X64/ArchRetype_AI.thy @@ -389,7 +389,7 @@ lemma copy_global_invs_mappings_restricted: apply (simp add: valid_pspace_def pred_conj_def) apply (rule hoare_conjI, wp copy_global_equal_kernel_mappings_restricted) apply (clarsimp simp: global_refs_def) - apply (rule valid_prove_more, rule hoare_vcg_conj_lift, rule hoare_TrueI) + apply (rule hoare_post_add, rule hoare_vcg_conj_lift, rule hoare_TrueI) apply (simp add: copy_global_mappings_def valid_pspace_def) apply (rule hoare_seq_ext [OF _ gets_sp]) apply (rule hoare_strengthen_post) diff --git a/proof/invariant-abstract/X64/ArchTcb_AI.thy b/proof/invariant-abstract/X64/ArchTcb_AI.thy index 460e642ebf..4c651ca8f7 100644 --- a/proof/invariant-abstract/X64/ArchTcb_AI.thy +++ b/proof/invariant-abstract/X64/ArchTcb_AI.thy @@ -257,7 +257,7 @@ lemma tc_invs[Tcb_AI_asms]: checked_insert_no_cap_to out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid - static_imp_wp static_imp_conj_wp)[1] + hoare_weak_lift_imp hoare_weak_lift_imp_conj)[1] | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def del: hoare_True_E_R diff --git a/proof/invariant-abstract/X64/ArchUntyped_AI.thy b/proof/invariant-abstract/X64/ArchUntyped_AI.thy index 0493fa22ae..361eddabc0 100644 --- a/proof/invariant-abstract/X64/ArchUntyped_AI.thy +++ b/proof/invariant-abstract/X64/ArchUntyped_AI.thy @@ -389,10 +389,10 @@ lemma create_cap_ioports[wp, Untyped_AI_assms]: (* FIXME: move *) lemma simpler_store_pml4e_def: "store_pml4e p pde s = - (case kheap s (p && ~~ mask pml4_bits) of + (case (kheap s)(p && ~~ mask pml4_bits) of Some (ArchObj (PageMapL4 pml4)) => - ({((), s\kheap := (kheap s((p && ~~ mask pml4_bits) \ - (ArchObj (PageMapL4 (pml4(ucast (p && mask pml4_bits >> word_size_bits) := pde))))))\)}, False) + ({((), s\kheap := (kheap s)(p && ~~ mask pml4_bits \ + (ArchObj (PageMapL4 (pml4(ucast (p && mask pml4_bits >> word_size_bits) := pde)))))\)}, False) | _ => ({}, True))" apply (auto simp: store_pml4e_def set_object_def get_object_def simpler_gets_def assert_def a_type_simps return_def fail_def set_object_def get_def put_def bind_def get_pml4_def aa_type_simps diff --git a/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy b/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy index e71b9742e9..77720c0a6a 100644 --- a/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy +++ b/proof/invariant-abstract/X64/ArchVSpaceEntries_AI.thy @@ -318,7 +318,7 @@ crunch valid_vspace_objs'[wp]: set_simple_ko "valid_vspace_objs'" (wp: crunch_wps) crunch valid_vspace_objs'[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_vspace_objs'" - (wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object) + (wp: crunch_wps preemption_point_inv simp: crunch_simps unless_def ignore:set_object) lemma preemption_point_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ preemption_point \\rv. valid_vspace_objs'\" @@ -556,7 +556,7 @@ lemma invoke_untyped_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: perform_asid_pool_invocation, perform_asid_control_invocation "valid_vspace_objs'" (ignore: delete_objects set_object - wp: static_imp_wp select_wp crunch_wps + wp: hoare_weak_lift_imp crunch_wps simp: crunch_simps unless_def) lemma pte_range_interD: @@ -703,7 +703,7 @@ lemma handle_invocation_valid_vspace_objs'[wp]: crunch valid_vspace_objs'[wp]: activate_thread,switch_to_thread, handle_hypervisor_fault, switch_to_idle_thread, handle_call, handle_recv, handle_reply, handle_send, handle_yield, handle_interrupt "valid_vspace_objs'" - (simp: crunch_simps wp: crunch_wps alternative_wp select_wp OR_choice_weak_wp select_ext_weak_wp + (simp: crunch_simps wp: crunch_wps OR_choice_weak_wp select_ext_weak_wp ignore: without_preemption getActiveIRQ resetTimer ackInterrupt getFaultAddress OR_choice set_scheduler_action) @@ -714,8 +714,7 @@ lemma handle_event_valid_vspace_objs'[wp]: lemma schedule_valid_vspace_objs'[wp]: "\valid_vspace_objs'\ schedule :: (unit,unit) s_monad \\_. valid_vspace_objs'\" apply (simp add: schedule_def allActiveTCBs_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma call_kernel_valid_vspace_objs'[wp]: diff --git a/proof/invariant-abstract/X64/ArchVSpace_AI.thy b/proof/invariant-abstract/X64/ArchVSpace_AI.thy index 1b359002d0..29ae65de00 100644 --- a/proof/invariant-abstract/X64/ArchVSpace_AI.thy +++ b/proof/invariant-abstract/X64/ArchVSpace_AI.thy @@ -1606,7 +1606,7 @@ lemma update_aobj_not_reachable: apply (rule_tac x = "(aa, baa)" in bexI[rotated]) apply assumption apply (simp add: fun_upd_def[symmetric]) - apply (rule_tac s4 = s in vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(p \ ArchObj aobj)\" for s + apply (rule_tac s4 = s in vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(p \ ArchObj aobj)\" for s ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def Image_def split: if_split_asm pde.split_asm) @@ -2817,7 +2817,7 @@ lemma lookup_pages_shrink_store_pdpte: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pdpte_ref_pages_def split: if_split_asm pde.split_asm) @@ -2831,7 +2831,7 @@ lemma lookup_pages_shrink_store_pde: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def split: if_split_asm pde.split_asm) @@ -2845,7 +2845,7 @@ lemma lookup_pages_shrink_store_pte: apply (simp add: vs_lookup_pages_def) apply (drule_tac s1 = s in lookup_bound_estimate[OF vs_lookup_pages1_is_wellformed_lookup, rotated -1]) apply (simp add: fun_upd_def[symmetric]) - apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := kheap s(ptr \ ArchObj obj)\" for s ptr obj + apply (rule vs_lookup_pages1_is_wellformed_lookup[where s = "s\kheap := (kheap s)(ptr \ ArchObj obj)\" for s ptr obj ,simplified]) apply (clarsimp simp: lookup_refs_def vs_lookup_pages1_on_heap_obj_def vs_refs_pages_def image_def obj_at_def graph_of_def pde_ref_pages_def split: if_split_asm pde.split_asm) diff --git a/proof/invariant-abstract/X64/Machine_AI.thy b/proof/invariant-abstract/X64/Machine_AI.thy index 4e37700a4e..efd31cf7d0 100644 --- a/proof/invariant-abstract/X64/Machine_AI.thy +++ b/proof/invariant-abstract/X64/Machine_AI.thy @@ -17,7 +17,7 @@ definition "no_irq f \ \P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" lemma wpc_helper_no_irq: - "no_irq f \ wpc_helper (P, P') (Q, Q') (no_irq f)" + "no_irq f \ wpc_helper (P, P', P'') (Q, Q', Q'') (no_irq f)" by (simp add: wpc_helper_def) wpc_setup "\m. no_irq m" wpc_helper_no_irq @@ -56,7 +56,7 @@ setup \ \ crunch_ignore (no_irq) (add: - NonDetMonad.bind return "when" get gets fail + Nondet_Monad.bind return "when" get gets fail assert put modify unless select alternative assert_opt gets_the returnOk throwError lift bindE @@ -184,7 +184,7 @@ definition "irq_state_independent P \ \f s. P s \ lemma getActiveIRQ_inv [wp]: "\irq_state_independent P\ \ \P\ getActiveIRQ in_kernel \\rv. P\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply (simp add: irq_state_independent_def) done @@ -369,7 +369,7 @@ lemma getActiveIRQ_le_maxIRQ': getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply clarsimp apply (rule ccontr) apply (simp add: linorder_not_le) @@ -379,14 +379,13 @@ lemma getActiveIRQ_le_maxIRQ': lemma getActiveIRQ_neq_Some0xFF': "\\\ getActiveIRQ in_kernel \\rv s. rv \ Some 0x3FF\" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) - apply simp + apply wpsimp done lemma getActiveIRQ_neq_non_kernel: "\\\ getActiveIRQ True \\rv s. rv \ Some ` non_kernel_IRQs \" apply (simp add: getActiveIRQ_def) - apply (wp alternative_wp select_wp) + apply wp apply auto done diff --git a/proof/refine/AARCH64/ADT_H.thy b/proof/refine/AARCH64/ADT_H.thy new file mode 100644 index 0000000000..4f93f4044c --- /dev/null +++ b/proof/refine/AARCH64/ADT_H.thy @@ -0,0 +1,1754 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +chapter \Abstract datatype for the executable specification\ + +theory ADT_H + imports Syscall_R +begin + +text \ + The general refinement calculus (see theory Simulation) requires + the definition of a so-called ``abstract datatype'' for each refinement layer. + This theory defines this datatype for the executable specification. + It is based on the abstract specification because we chose + to base the refinement's observable state on the abstract state. +\ + +consts + initEntry :: machine_word + initFrames :: "machine_word list" + initOffset :: machine_word + initKernelFrames :: "machine_word list" + initBootFrames :: "machine_word list" + initDataStart :: machine_word + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \ + The construction of the abstract data type + for the executable specification largely follows + the one for the abstract specification. +\ +definition Init_H :: "kernel_state global_state set" where + "Init_H \ + ({empty_context} \ snd ` + fst (initKernel (VPtr initEntry) (PPtr initOffset) (map PPtr initFrames) + (map PPtr initKernelFrames) initBootFrames + (newKernelState initDataStart))) \ + {UserMode} \ {None}" + +definition + "user_mem' s \ \p. + if pointerInUserData p s then Some (underlying_memory (ksMachineState s) p) else None" + +definition + "device_mem' s \ \p. + if pointerInDeviceData p s then Some p else None" + +definition vm_rights_of :: "vmrights \ rights set" where + "vm_rights_of x \ case x of VMKernelOnly \ vm_kernel_only + | VMReadOnly \ vm_read_only + | VMReadWrite \ vm_read_write" + +lemma vm_rights_of_vmrights_map_id[simp]: + "rs \ valid_vm_rights \ vm_rights_of (vmrights_map rs) = rs" + by (auto simp: vm_rights_of_def vmrights_map_def valid_vm_rights_def + vm_read_write_def vm_read_only_def vm_kernel_only_def) + +(* We expect 'a to be one of {pt_index, vs_index} *) +definition absPageTable0 :: + "(obj_ref \ Structures_H.kernel_object) \ obj_ref \ 'a::len word \ AARCH64_A.pte" where + "absPageTable0 h a \ \offs. + case h (a + (ucast offs << pte_bits)) of + Some (KOArch (KOPTE (InvalidPTE))) \ Some AARCH64_A.InvalidPTE + | Some (KOArch (KOPTE (PagePTE p small global execNever dev rights))) \ + Some (AARCH64_A.PagePTE p small + {x. global \ x=Global \ \execNever \ x = Execute \ + dev \ x = Device} + (vm_rights_of rights)) + | Some (KOArch (KOPTE (PageTablePTE p))) \ + if p \ mask ppn_len + then Some (AARCH64_A.PageTablePTE (ucast p)) + else None + | _ \ None" + +definition absPageTable :: + "(obj_ref \ Structures_H.kernel_object) \ pt_type option \ obj_ref \ pt" where + "absPageTable h pt_t a \ + case pt_t of + Some NormalPT_T \ + if is_aligned a (pt_bits NormalPT_T) \ (\off::pt_index. absPageTable0 h a off \ None) + then Some (NormalPT (\off. the (absPageTable0 h a off))) + else None + | Some VSRootPT_T \ + if is_aligned a (pt_bits VSRootPT_T) \ (\off::vs_index. absPageTable0 h a off \ None) + then Some (VSRootPT (\off. the (absPageTable0 h a off))) + else None + | None \ None" + +definition absVGIC :: "gicvcpuinterface \ gic_vcpu_interface" where + "absVGIC v \ case v of + VGICInterface hcr vmcr apr lr \ gic_vcpu_interface.make hcr vmcr apr lr" + +lemma absVGIC_eq[simp]: + "absVGIC (vgic_map vgic) = vgic" + by (simp add: vgic_map_def absVGIC_def gic_vcpu_interface.make_def) + +(* Can't pull the whole heap off at once, start with arch specific stuff.*) +definition absHeapArch :: + "(machine_word \ kernel_object) \ (machine_word \ pt_type) \ + machine_word \ arch_kernel_object \ arch_kernel_obj" where + "absHeapArch h pt_types a \ \ako. + case ako of + KOASIDPool (AARCH64_H.ASIDPool ap) \ + Some (AARCH64_A.ASIDPool (\w. map_option abs_asid_entry (ap (ucast w)))) + | KOPTE _ \ + map_option PageTable (absPageTable h (pt_types a) a) + | KOVCPU (VCPUObj tcb vgic regs vppimask vtimer) \ + Some (VCPU \ vcpu_tcb = tcb, + vcpu_vgic = absVGIC vgic, + vcpu_regs = regs, + vcpu_vppi_masked = vppimask, + vcpu_vtimer = vtimer \)" + +definition + "EndpointMap ep \ case ep of + Structures_H.IdleEP \ Structures_A.IdleEP + | Structures_H.SendEP q \ Structures_A.SendEP q + | Structures_H.RecvEP q \ Structures_A.RecvEP q" + +definition + "AEndpointMap ntfn \ + \ ntfn_obj = case ntfnObj ntfn of + Structures_H.IdleNtfn \ Structures_A.IdleNtfn + | Structures_H.WaitingNtfn q \ Structures_A.WaitingNtfn q + | Structures_H.ActiveNtfn b \ Structures_A.ActiveNtfn b + , ntfn_bound_tcb = ntfnBoundTCB ntfn \" + +definition mdata_map' :: + "(asid \ vspace_ref) option \ (Machine_A.AARCH64_A.asid \ vspace_ref) option" where + "mdata_map' = map_option (\(asid, ref). (ucast asid, ref))" + +lemma mdata_map'_inv[simp]: + "mdata_map' (mdata_map m) = m" + by (cases m; simp add: mdata_map_def mdata_map'_def split_def ucast_down_ucast_id is_down) + +fun CapabilityMap :: "capability \ cap" where + "CapabilityMap capability.NullCap = cap.NullCap" +| "CapabilityMap (capability.UntypedCap d ref n idx) = cap.UntypedCap d ref n idx" +| "CapabilityMap (capability.EndpointCap ref b sr rr gr grr) = + cap.EndpointCap ref b {x. sr \ x = AllowSend \ rr \ x = AllowRecv \ + gr \ x = AllowGrant \ grr \ x = AllowGrantReply}" +| "CapabilityMap (capability.NotificationCap ref b sr rr) = + cap.NotificationCap ref b {x. sr \ x = AllowSend \ rr \ x = AllowRecv}" +| "CapabilityMap (capability.CNodeCap ref n L l) = + cap.CNodeCap ref n (bin_to_bl l (uint L))" +| "CapabilityMap (capability.ThreadCap ref) = cap.ThreadCap ref" +| "CapabilityMap capability.DomainCap = cap.DomainCap" +| "CapabilityMap (capability.ReplyCap ref master gr) = + cap.ReplyCap ref master {x. gr \ x = AllowGrant \ x = AllowWrite}" +| "CapabilityMap capability.IRQControlCap = cap.IRQControlCap" +| "CapabilityMap (capability.IRQHandlerCap irq) = cap.IRQHandlerCap irq" +| "CapabilityMap (capability.Zombie p b n) = + cap.Zombie p (case b of ZombieTCB \ None | ZombieCNode n \ Some n) n" +| "CapabilityMap (capability.ArchObjectCap (arch_capability.ASIDPoolCap x y)) = + cap.ArchObjectCap (arch_cap.ASIDPoolCap x (ucast y))" +| "CapabilityMap (capability.ArchObjectCap (arch_capability.ASIDControlCap)) = + cap.ArchObjectCap (arch_cap.ASIDControlCap)" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.FrameCap word rghts sz d data)) = + cap.ArchObjectCap (arch_cap.FrameCap word (vm_rights_of rghts) sz d (mdata_map' data))" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.PageTableCap word pt_t data)) = + cap.ArchObjectCap (arch_cap.PageTableCap word pt_t (mdata_map' data))" +| "CapabilityMap (capability.ArchObjectCap + (arch_capability.VCPUCap v)) = + cap.ArchObjectCap (arch_cap.VCPUCap v)" + +(* FIXME: wellformed_cap_simps has lots of duplicates. *) +lemma cap_relation_imp_CapabilityMap: + "\wellformed_cap c; cap_relation c c'\ \ CapabilityMap c' = c" + apply (case_tac c; simp add: wellformed_cap_simps) + apply (rule set_eqI, clarsimp) + apply (case_tac "x", simp_all) + apply (rule set_eqI, clarsimp) + apply (case_tac "x", simp_all add: word_bits_def) + apply clarsimp + apply (simp add: set_eq_iff, rule allI) + apply (case_tac x; clarsimp) + apply (simp add: uint_of_bl_is_bl_to_bin bl_bin_bl[simplified]) + apply (simp add: zbits_map_def split: option.splits) + apply (rename_tac arch_cap) + apply clarsimp + apply (case_tac arch_cap, simp_all add: wellformed_cap_simps) + apply (simp add: ucast_down_ucast_id is_down) + done + +primrec ThStateMap :: "Structures_H.thread_state \ Structures_A.thread_state" where + "ThStateMap Structures_H.thread_state.Running = + Structures_A.thread_state.Running" +| "ThStateMap Structures_H.thread_state.Restart = + Structures_A.thread_state.Restart" +| "ThStateMap Structures_H.thread_state.Inactive = + Structures_A.thread_state.Inactive" +| "ThStateMap Structures_H.thread_state.IdleThreadState = + Structures_A.thread_state.IdleThreadState" +| "ThStateMap Structures_H.thread_state.BlockedOnReply = + Structures_A.thread_state.BlockedOnReply" +| "ThStateMap (Structures_H.thread_state.BlockedOnReceive oref grant) = + Structures_A.thread_state.BlockedOnReceive oref \ receiver_can_grant = grant \" +| "ThStateMap (Structures_H.thread_state.BlockedOnSend oref badge grant grant_reply call) = + Structures_A.thread_state.BlockedOnSend oref + \ sender_badge = badge, + sender_can_grant = grant, + sender_can_grant_reply = grant_reply, + sender_is_call = call \" +| "ThStateMap (Structures_H.thread_state.BlockedOnNotification oref) = + Structures_A.thread_state.BlockedOnNotification oref" + +lemma thread_state_relation_imp_ThStateMap: + "thread_state_relation ts ts' \ ThStateMap ts' = ts" + by (cases ts) simp_all + +definition + "LookupFailureMap \ \lf. case lf of + Fault_H.lookup_failure.InvalidRoot \ + ExceptionTypes_A.lookup_failure.InvalidRoot + | Fault_H.lookup_failure.MissingCapability n \ + ExceptionTypes_A.lookup_failure.MissingCapability n + | Fault_H.lookup_failure.DepthMismatch n m \ + ExceptionTypes_A.lookup_failure.DepthMismatch n m + | Fault_H.lookup_failure.GuardMismatch n g l \ + ExceptionTypes_A.lookup_failure.GuardMismatch n (bin_to_bl l (uint g))" + +lemma LookupFailureMap_lookup_failure_map: + "(\n g. lf = ExceptionTypes_A.GuardMismatch n g \ length g \ word_bits) + \ LookupFailureMap (lookup_failure_map lf) = lf" + by (clarsimp simp add: LookupFailureMap_def lookup_failure_map_def + uint_of_bl_is_bl_to_bin word_bits_def + simp del: bin_to_bl_def + split: ExceptionTypes_A.lookup_failure.splits) + +primrec ArchFaultMap :: "Fault_H.arch_fault \ ExceptionTypes_A.arch_fault" where + "ArchFaultMap (AARCH64_H.VMFault p m) = AARCH64_A.VMFault p m" +| "ArchFaultMap (AARCH64_H.VCPUFault w) = AARCH64_A.VCPUFault w" +| "ArchFaultMap (AARCH64_H.VGICMaintenance m) = AARCH64_A.VGICMaintenance m" +| "ArchFaultMap (AARCH64_H.VPPIEvent irq) = AARCH64_A.VPPIEvent irq" + +primrec FaultMap :: "Fault_H.fault \ ExceptionTypes_A.fault" where + "FaultMap (Fault_H.fault.CapFault ref b failure) = + ExceptionTypes_A.fault.CapFault ref b (LookupFailureMap failure)" +| "FaultMap (Fault_H.fault.ArchFault fault) = + ExceptionTypes_A.fault.ArchFault (ArchFaultMap fault)" +| "FaultMap (Fault_H.fault.UnknownSyscallException n) = + ExceptionTypes_A.fault.UnknownSyscallException n" +| "FaultMap (Fault_H.fault.UserException x y) = + ExceptionTypes_A.fault.UserException x y" + +lemma ArchFaultMap_arch_fault_map: "ArchFaultMap (arch_fault_map f) = f" + by (cases f; simp add: ArchFaultMap_def arch_fault_map_def) + +lemma FaultMap_fault_map[simp]: + "valid_fault ft \ FaultMap (fault_map ft) = ft" + apply (case_tac ft, simp_all) + apply (simp add: valid_fault_def LookupFailureMap_lookup_failure_map) + apply (rule ArchFaultMap_arch_fault_map) + done + +definition + "ArchTcbMap atcb \ + \ tcb_context = atcbContext atcb, tcb_vcpu = atcbVCPUPtr atcb \" + +lemma arch_tcb_relation_imp_ArchTcnMap: + "\ arch_tcb_relation atcb atcb'\ \ ArchTcbMap atcb' = atcb" + by (clarsimp simp: arch_tcb_relation_def ArchTcbMap_def) + +definition + "TcbMap tcb \ + \tcb_ctable = CapabilityMap (cteCap (tcbCTable tcb)), + tcb_vtable = CapabilityMap (cteCap (tcbVTable tcb)), + tcb_reply = CapabilityMap (cteCap (tcbReply tcb)), + tcb_caller = CapabilityMap (cteCap (tcbCaller tcb)), + tcb_ipcframe = CapabilityMap (cteCap (tcbIPCBufferFrame tcb)), + tcb_state = ThStateMap (tcbState tcb), + tcb_fault_handler = to_bl (tcbFaultHandler tcb), + tcb_ipc_buffer = tcbIPCBuffer tcb, + tcb_fault = map_option FaultMap (tcbFault tcb), + tcb_bound_notification = tcbBoundNotification tcb, + tcb_mcpriority = tcbMCP tcb, + tcb_arch = ArchTcbMap (tcbArch tcb)\" + +definition + "absCNode sz h a \ CNode sz (\bl. + if length bl = sz + then Some (CapabilityMap (case (h (a + of_bl bl * 2^cteSizeBits)) of + Some (KOCTE cte) \ cteCap cte)) + else None)" + +definition absHeap :: + "(machine_word \ vmpage_size) \ (machine_word \ nat) \ (machine_word \ pt_type) \ + (machine_word \ Structures_H.kernel_object) \ Structures_A.kheap" where + "absHeap ups cns pt_types h \ \x. + case h x of + Some (KOEndpoint ep) \ Some (Endpoint (EndpointMap ep)) + | Some (KONotification ntfn) \ Some (Notification (AEndpointMap ntfn)) + | Some KOKernelData \ undefined \ \forbidden by pspace_relation\ + | Some KOUserData \ map_option (ArchObj \ DataPage False) (ups x) + | Some KOUserDataDevice \ map_option (ArchObj \ DataPage True) (ups x) + | Some (KOTCB tcb) \ Some (TCB (TcbMap tcb)) + | Some (KOCTE cte) \ map_option (\sz. absCNode sz h x) (cns x) + | Some (KOArch ako) \ map_option ArchObj (absHeapArch h pt_types x ako) + | None \ None" + +lemma unaligned_page_offsets_helper: + "\is_aligned y (pageBitsForSize vmpage_size); n\0; + n < 2 ^ (pageBitsForSize vmpage_size - pageBits)\ + \ \ is_aligned (y + n * 2 ^ pageBits :: machine_word) (pageBitsForSize vmpage_size)" + apply (simp (no_asm_simp) add: is_aligned_mask) + apply (simp add: mask_add_aligned) + apply (cut_tac mask_eq_iff_w2p [of "pageBitsForSize vmpage_size" "n * 2 ^ pageBits"]) + prefer 2 + apply (case_tac vmpage_size, simp_all add: word_size bit_simps) + apply (cut_tac word_power_nonzero_64[of n pageBits]; + simp add: word_bits_conv pageBits_def) + prefer 2 + apply (case_tac vmpage_size, simp_all add: bit_simps word_size) + apply (frule less_trans[of n _ "0x10000000000000"], simp+)+ + apply clarsimp + apply (case_tac vmpage_size, simp_all add: bit_simps) + apply (frule_tac i=n and k="0x1000" in word_mult_less_mono1, simp+)+ + done + +lemma pspace_aligned_distinct_None: + (* NOTE: life would be easier if pspace_aligned and pspace_distinct were defined on PSpace instead of the whole kernel state. *) + assumes pspace_aligned: "\x\dom ha. is_aligned (x :: machine_word) (obj_bits (the (ha x)))" + assumes pspace_distinct: + "\x y ko ko'. + ha x = Some ko \ ha y = Some ko' \ x \ y \ + {x..x + (2 ^ obj_bits ko - 1)} \ {y..y + (2 ^ obj_bits ko' - 1)} = {}" + shows "\ha x = Some ko; y \ {0<..<2^(obj_bits ko)}\ \ ha (x+y) = None" + using pspace_aligned[simplified dom_def, simplified] + apply (erule_tac x=x in allE) + apply (rule ccontr) + apply clarsimp + apply (rename_tac ko') + using pspace_distinct pspace_aligned[simplified dom_def, simplified] + apply (erule_tac x=x in allE) + apply (erule_tac x="x+y" in allE)+ + apply (clarsimp simp add: word_gt_0) + apply (clarsimp simp add: ucast_of_nat_small is_aligned_mask mask_2pm1[symmetric]) + apply (frule (1) is_aligned_AND_less_0) + apply (clarsimp simp add: word_plus_and_or_coroll le_word_or2) + apply (simp add: or.assoc le_word_or2) + apply (simp add: word_plus_and_or_coroll[symmetric]) + apply (subgoal_tac "x + y \ x + mask (obj_bits ko)", simp) + apply (rule word_add_le_mono2) + apply (simp add: mask_def plus_one_helper) + apply (thin_tac "~ P" for P)+ + apply (thin_tac "(x::'a::len word) < y" for x y)+ + apply (thin_tac "x = Some y" for x y)+ + apply (thin_tac "x && mask (obj_bits ko') = 0" for x) + apply (thin_tac "x && y = 0") + apply (clarsimp simp add: dvd_def word_bits_len_of word_bits_conv + and_mask_dvd_nat[symmetric]) + apply (cut_tac x=x in unat_lt2p) + apply (cut_tac x="mask (obj_bits ko)::machine_word" in unat_lt2p) + apply (simp add: mult.commute + add.commute[of "unat (mask (obj_bits ko))"]) + apply (case_tac "k=0", simp+) + apply (subgoal_tac "obj_bits ko\64") + prefer 2 + apply (rule ccontr) + apply (simp add: not_le) + apply (frule_tac a="2::nat" and n=64 in power_strict_increasing, simp+) + apply (case_tac "k=1", simp) + apply (cut_tac m=k and n="2 ^ obj_bits ko" in n_less_n_mult_m, + (simp(no_asm_simp))+) + apply (simp only: mult.commute) + apply (thin_tac "x = y" for x y)+ + apply (clarsimp simp add: le_less) + apply (erule disjE) + prefer 2 + apply (simp add: mask_def) + apply (subgoal_tac "obj_bits ko <= (63::nat)", simp_all) + apply (simp add: mask_def unat_minus_one word_bits_conv) + apply (cut_tac w=k and c="2 ^ obj_bits ko" and b="2^(64-obj_bits ko)" + in less_le_mult_nat) + apply (simp_all add: power_add[symmetric]) + apply (rule ccontr) + apply (simp add: not_less) + apply (simp add: le_less[of "2 ^ (64 - obj_bits ko)"]) + apply (erule disjE) + prefer 2 + apply (clarsimp simp add: power_add[symmetric]) + apply clarsimp + apply (drule mult_less_mono1[of "2 ^ (64 - obj_bits ko)" _ "2 ^ obj_bits ko"]) + apply (simp add: power_add[symmetric])+ + done + +lemma pspace_aligned_distinct_None': + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + shows "\kheap s x = Some ko; y \ {0<..<2^(obj_bits ko)}\ \ kheap s (x+y) = None" + apply (rule pspace_aligned_distinct_None) + apply (rule pspace_aligned[simplified pspace_aligned_def]) + apply (rule pspace_distinct[simplified pspace_distinct_def]) + apply assumption+ + done + +lemma n_less_2p_pageBitsForSize: + "n < 2 ^ (pageBitsForSize sz - pageBits) \ n * 2 ^ pageBits < 2 ^ pageBitsForSize sz" + for n::machine_word + apply (subst mult_ac) + apply (subst shiftl_t2n[symmetric]) + apply (erule shiftl_less_t2n) + using pbfs_less_wb' by (simp add: word_bits_def) + +lemma pte_offset_in_datapage: + "\ n < 2 ^ (pageBitsForSize sz - pageBits); n \ 0 \ \ + (n << pageBits) - (ucast off << pte_bits) < 2 ^ pageBitsForSize sz" + for n::machine_word and off::pt_index + apply (frule n_less_2p_pageBitsForSize) + apply (simp only: bit_simps) + apply (subst shiftl_t2n) + apply (rule order_le_less_trans[rotated], assumption) + apply (rule word_le_imp_diff_le) + prefer 2 + apply (simp add: mult_ac) + apply (subst shiftl_t2n[symmetric]) + apply (subst (asm) mult_ac) + apply (subst (asm) shiftl_t2n[symmetric])+ + apply (rule order_trans[where y="mask pageBits"]) + apply (simp add: le_mask_shiftl_le_mask[where n=9] ucast_leq_mask pageBits_def) + apply word_bitwise + apply (clarsimp simp: nth_w2p pageBits_def rev_bl_order_simps) + apply (cases sz; simp add: pageBits_def ptTranslationBits_def) + done + +lemma absHeap_correct: + fixes s' :: kernel_state + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes valid_objs: "valid_objs s" + assumes pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + assumes ghost_relation: "ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') + (gsPTTypes (ksArchState s'))" + shows "absHeap (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) (ksPSpace s') = kheap s" +proof - + from ghost_relation + have gsUserPages: + "\a sz. (\dev. kheap s a = Some (ArchObj (DataPage dev sz))) \ + gsUserPages s' a = Some sz" + and gsCNodes: + "\a n. (\cs. kheap s a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ + gsCNodes s' a = Some n" + and gsPTs: + "\a pt_t. (\pt. kheap s a = Some (ArchObj (PageTable pt)) \ pt_t = pt_type pt) \ + gsPTTypes (ksArchState s') a = Some pt_t" + by (fastforce simp add: ghost_relation_def)+ + + show "?thesis" + supply image_cong_simp [cong del] + apply (rule ext) + apply (simp add: absHeap_def split: option.splits) + apply (rule conjI) + using pspace_relation + apply (clarsimp simp: pspace_relation_def pspace_dom_def UNION_eq dom_def Collect_eq) + apply (erule_tac x=x in allE) + apply clarsimp + apply (case_tac "kheap s x", simp) + apply (erule_tac x=x in allE, clarsimp) + apply (erule_tac x=x in allE, simp add: Ball_def) + apply (erule_tac x=x in allE, clarsimp) + apply (rename_tac a) + apply (case_tac a; simp add: other_obj_relation_def + split: if_split_asm Structures_H.kernel_object.splits) + apply (rename_tac sz cs) + apply (clarsimp simp: image_def cte_map_def well_formed_cnode_n_def Collect_eq dom_def) + apply (erule_tac x="replicate sz False" in allE)+ + apply simp + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: image_def) + apply (erule allE, drule_tac x=0 in bspec, simp, fastforce) + apply (erule_tac x=0 in allE, simp add: not_less) + apply (rename_tac vmpage_size) + apply (case_tac vmpage_size; simp add: bit_simps) + + apply (clarsimp split: kernel_object.splits) + apply (intro conjI impI allI) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply clarsimp + apply (case_tac ko; simp add: other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (clarsimp simp: ep_relation_def EndpointMap_def + split: Structures_A.endpoint.splits) + apply (clarsimp simp: EndpointMap_def split: Structures_A.endpoint.splits) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko; simp add: other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (clarsimp simp: ntfn_relation_def AEndpointMap_def + split: Structures_A.ntfn.splits) + apply (clarsimp simp: AEndpointMap_def split: Structures_A.ntfn.splits) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko; simp add: other_obj_relation_def) + apply (clarsimp simp: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (rename_tac vmpage_size) + apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) + apply (case_tac "n=0", simp) + apply (case_tac "kheap s (y + n * 2 ^ pageBits)") + apply (rule ccontr) + apply (clarsimp simp: shiftl_t2n mult_ac dest!: gsUserPages[symmetric, THEN iffD1] ) + using pspace_aligned + apply (simp add: pspace_aligned_def dom_def) + apply (erule_tac x=y in allE) + apply (case_tac "n=0",(simp split: if_split_asm)+) + apply (frule (2) unaligned_page_offsets_helper) + apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply simp + apply (rule conjI, clarsimp simp add: word_gt_0) + apply (erule n_less_2p_pageBitsForSize) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (rename_tac vmpage_size) + apply (cut_tac a=y and sz=vmpage_size in gsUserPages, clarsimp split: if_split_asm) + apply (case_tac "n=0", simp) + apply (case_tac "kheap s (y + n * 2 ^ pageBits)") + apply (rule ccontr) + apply (clarsimp simp: shiftl_t2n mult_ac dest!: gsUserPages[symmetric, THEN iffD1]) + using pspace_aligned + apply (simp add: pspace_aligned_def dom_def) + apply (erule_tac x=y in allE) + apply (case_tac "n=0",simp+) + apply (frule (2) unaligned_page_offsets_helper) + apply (frule_tac y="n*2^pageBits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply simp + apply (rule conjI, clarsimp simp add: word_gt_0) + apply (erule n_less_2p_pageBitsForSize) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + prefer 2 + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp add: TcbMap_def tcb_relation_def valid_obj_def) + apply (rename_tac tcb y tcb') + apply (case_tac tcb) + apply (case_tac tcb') + apply (simp add: thread_state_relation_imp_ThStateMap) + apply (subgoal_tac "map_option FaultMap (tcbFault tcb) = tcb_fault") + prefer 2 + apply (simp add: fault_rel_optionation_def) + using valid_objs[simplified valid_objs_def dom_def fun_app_def, simplified] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_tcb_def + split: option.splits) + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: cap_relation_imp_CapabilityMap valid_obj_def + valid_tcb_def ran_tcb_cap_cases valid_cap_def2 + arch_tcb_relation_imp_ArchTcnMap) + apply (simp add: absCNode_def cte_map_def) + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def + split: if_split_asm) + prefer 2 + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm) + apply (simp add: cte_map_def) + apply (clarsimp simp add: cte_relation_def) + apply (cut_tac a=y and n=sz in gsCNodes, clarsimp) + using pspace_aligned[simplified pspace_aligned_def] + apply (drule_tac x=y in bspec, clarsimp) + apply clarsimp + apply (case_tac "(of_bl ya::machine_word) << cte_level_bits = 0", simp) + apply (rule ext) + apply simp + apply (rule conjI) + prefer 2 + using valid_objs[simplified valid_objs_def Ball_def dom_def + fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def + well_formed_cnode_n_def dom_def Collect_eq) + apply (frule_tac x=ya in spec, simp) + apply (erule_tac x=bl in allE) + apply clarsimp+ + apply (frule pspace_relation_absD[OF _ pspace_relation]) + apply (simp add: cte_map_def) + apply (drule_tac x="y + of_bl bl * 2^cte_level_bits" in spec) + apply (clarsimp simp: shiftl_t2n mult_ac) + apply (erule_tac x="cte_relation bl" in allE) + apply (erule impE) + apply (fastforce simp add: well_formed_cnode_n_def) + apply clarsimp + apply (clarsimp simp add: cte_relation_def) + apply (rule cap_relation_imp_CapabilityMap) + using valid_objs[simplified valid_objs_def Ball_def dom_def + fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp: valid_obj_def valid_cs_def valid_cap_def2 ran_def) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (subgoal_tac "kheap s (y + of_bl ya * 2^cte_level_bits) = None") + prefer 2 + using valid_objs[simplified valid_objs_def Ball_def dom_def fun_app_def] + apply (erule_tac x=y in allE) + apply (clarsimp simp add: valid_obj_def valid_cs_def valid_cs_size_def) + apply (rule pspace_aligned_distinct_None'[OF + pspace_aligned pspace_distinct], assumption) + apply (clarsimp simp: word_neq_0_conv power_add cte_index_repair) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq shiftl_t2n mult_ac) + apply (erule_tac x=ya in allE)+ + apply (rule word_mult_less_mono1) + apply (subgoal_tac "sz = length ya") + apply simp + apply (rule of_bl_length, (simp add: word_bits_def)+)[1] + apply fastforce + apply (simp add: cte_level_bits_def) + apply (simp add: word_bits_conv cte_level_bits_def) + apply (drule_tac a="2::nat" in power_strict_increasing, simp+) + apply (simp add: shiftl_t2n mult_ac) + apply (rule ccontr, clarsimp) + apply (cut_tac a="y + of_bl ya * 2^cte_level_bits" and n=yc in gsCNodes) + apply clarsimp + + (* mapping architecture-specific objects *) + apply clarsimp + apply (erule pspace_dom_relatedE[OF _ pspace_relation]) + apply (case_tac ko, simp_all add: other_obj_relation_def) + apply (clarsimp simp add: cte_relation_def split: if_split_asm) + apply (rename_tac arch_kernel_object y ko P arch_kernel_obj) + apply (case_tac arch_kernel_object, simp_all add: absHeapArch_def + split: asidpool.splits) + + apply (in_case "KOASIDPool ?pool") + apply clarsimp + apply (case_tac arch_kernel_obj) + apply (simp add: other_obj_relation_def asid_pool_relation_def + inv_def o_def) + apply (clarsimp simp add: pte_relation_def) + apply (clarsimp split: if_split_asm)+ + apply (simp add: other_obj_relation_def) + + apply (in_case "KOPTE ?pte") + apply (case_tac arch_kernel_obj; + simp add: other_obj_relation_def asid_pool_relation_def inv_def o_def) + apply clarsimp + apply (rename_tac p pte pt idx) + apply (frule pspace_alignedD, rule pspace_aligned) + apply (clarsimp simp add: pte_relation_def) + apply (prop_tac "pt_at (pt_type pt) p s", simp add: obj_at_def) + apply (drule page_table_at_cross[OF _ pspace_aligned pspace_distinct pspace_relation]) + apply (clarsimp simp: page_table_at'_def typ_at'_def ko_wp_at'_def) + apply (cut_tac a=p and pt_t="pt_type pt" in gsPTs, clarsimp) + apply (case_tac "pt_type pt"; clarsimp) + apply (in_case "VSRootPT_T") + apply (clarsimp simp: absPageTable_def split del: if_split split: option.splits) + apply (rule conjI, clarsimp) + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], assumption) + apply simp + apply (case_tac "idx << pte_bits = 0", simp) + apply (clarsimp simp: word_neq_0_conv) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add: table_size_bounded[unfolded word_bits_def, simplified]) + apply (clarsimp split del: if_split) + apply (prop_tac "idx << pte_bits = 0") + apply (rename_tac pt_t') + apply (cut_tac a="p + (idx << pte_bits)" and pt_t=pt_t' in gsPTs) + apply clarsimp + apply (rule ccontr) + apply (drule_tac y="idx << pte_bits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply (clarsimp simp: word_neq_0_conv table_size_def) + apply (rule shiftl_less_t2n, simp) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (simp add: bit_simps) + apply simp + apply (thin_tac "pte_relation' pte pte'" for pte pte') + apply (clarsimp simp: pt_bits_def) + apply (case_tac pt; clarsimp) + apply (rename_tac vs) + apply (clarsimp simp: absPageTable0_def) + apply (rule conjI, clarsimp) + apply (rule ext, rename_tac offs) + apply (erule_tac x="ucast offs" in allE, erule impE, rule ucast_leq_mask) + apply (simp add: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (erule_tac x="ucast offs" in allE) + apply clarsimp + apply (rename_tac pte y) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast offs" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (erule pspace_valid_objsE, rule valid_objs) + apply (clarsimp simp: valid_obj_def) + apply (erule_tac x=offs in allE) + apply (clarsimp simp: wellformed_pte_def) + apply (case_tac "vs offs"; clarsimp split: if_split_asm) + apply (rule set_eqI, simp) + apply (rename_tac x, case_tac x; simp) + apply (simp add: ucast_ucast_mask ge_mask_eq) + apply clarsimp + apply (erule_tac x="ucast off" in allE) + apply (erule impE) + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast off" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (case_tac "vs off"; simp add: ucast_leq_mask ppn_len_def) + + (* NormalPT_T is an exact duplicate of the VSRootPT_T case, but I don't see any good way + to factor out the commonality *) + apply (in_case "NormalPT_T") + apply (clarsimp simp: absPageTable_def split del: if_split split: option.splits) + apply (rule conjI, clarsimp) + apply (rule sym) + apply (rule pspace_aligned_distinct_None'[OF pspace_aligned pspace_distinct], assumption) + apply simp + apply (case_tac "idx << pte_bits = 0", simp) + apply (clarsimp simp: word_neq_0_conv) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add: table_size_bounded[unfolded word_bits_def, simplified]) + apply (clarsimp split del: if_split) + apply (prop_tac "idx << pte_bits = 0") + apply (rename_tac pt_t') + apply (cut_tac a="p + (idx << pte_bits)" and pt_t=pt_t' in gsPTs) + apply clarsimp + apply (rule ccontr) + apply (drule_tac y="idx << pte_bits" in pspace_aligned_distinct_None' + [OF pspace_aligned pspace_distinct]) + apply (clarsimp simp: word_neq_0_conv table_size_def) + apply (rule shiftl_less_t2n, simp) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (simp add: bit_simps) + apply simp + apply (thin_tac "pte_relation' pte pte'" for pte pte') + apply (clarsimp simp: pt_bits_def) + apply (case_tac pt; clarsimp) + apply (rename_tac vs) + apply (clarsimp simp: absPageTable0_def) + apply (rule conjI, clarsimp) + apply (rule ext, rename_tac offs) + apply (erule_tac x="ucast offs" in allE, erule impE, rule ucast_leq_mask) + apply (simp add: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (erule_tac x="ucast offs" in allE) + apply clarsimp + apply (rename_tac pte y) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast offs" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (erule pspace_valid_objsE, rule valid_objs) + apply (clarsimp simp: valid_obj_def) + apply (erule_tac x=offs in allE) + apply (clarsimp simp: wellformed_pte_def) + apply (case_tac "vs offs"; clarsimp split: if_split_asm) + apply (rule set_eqI, simp) + apply (rename_tac x, case_tac x; simp) + apply (simp add: ucast_ucast_mask ge_mask_eq) + apply clarsimp + apply (erule_tac x="ucast off" in allE) + apply (erule impE) + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp dest!: koTypeOf_pte simp: objBits_simps) + apply (frule pspace_relation_absD, rule pspace_relation) + apply clarsimp + apply (drule_tac x="ucast off" in bspec) + apply clarsimp + apply (rule ucast_leq_mask) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: pte_relation_def ucast_ucast_mask ge_mask_eq vs_index_bits_def) + apply (case_tac "vs off"; simp add: ucast_leq_mask ppn_len_def) + + apply (in_case "DataPage ?p ?sz") + apply (clarsimp split: if_splits) + + apply (in_case "KOVCPU ?vcpu") + apply clarsimp + apply (rename_tac arch_kernel_obj vcpu) + apply (case_tac arch_kernel_obj; + clarsimp simp: other_obj_relation_def pte_relation_def split: if_splits) + apply (rename_tac vcpu') + apply (case_tac vcpu') + apply (clarsimp simp: vcpu_relation_def split: vcpu.splits) + done +qed + +definition + "EtcbMap tcb \ + \tcb_priority = tcbPriority tcb, + time_slice = tcbTimeSlice tcb, + tcb_domain = tcbDomain tcb\" + +definition absEkheap :: + "(machine_word \ Structures_H.kernel_object) \ obj_ref \ etcb option" where + "absEkheap h \ \x. + case h x of + Some (KOTCB tcb) \ Some (EtcbMap tcb) + | _ \ None" + +lemma absEkheap_correct: + assumes pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + assumes ekheap_relation: "ekheap_relation (ekheap s) (ksPSpace s')" + assumes vetcbs: "valid_etcbs s" + shows "absEkheap (ksPSpace s') = ekheap s" + apply (rule ext) + apply (clarsimp simp: absEkheap_def split: option.splits Structures_H.kernel_object.splits) + apply (subgoal_tac "\x. (\tcb. kheap s x = Some (TCB tcb)) = + (\tcb'. ksPSpace s' x = Some (KOTCB tcb'))") + using vetcbs ekheap_relation + apply (clarsimp simp: valid_etcbs_def is_etcb_at_def dom_def ekheap_relation_def st_tcb_at_def obj_at_def) + apply (erule_tac x=x in allE)+ + apply (rule conjI, force) + apply clarsimp + apply (rule conjI, clarsimp simp: EtcbMap_def etcb_relation_def)+ + apply clarsimp + using pspace_relation + apply (clarsimp simp add: pspace_relation_def pspace_dom_def UNION_eq + dom_def Collect_eq) + apply (rule iffI) + apply (erule_tac x=x in allE)+ + apply (case_tac "ksPSpace s' x", clarsimp) + apply (erule_tac x=x in allE, clarsimp) + apply clarsimp + apply (case_tac a, simp_all add: other_obj_relation_def) + apply (insert pspace_relation) + apply (clarsimp simp: obj_at'_def) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE) + apply (clarsimp simp: other_obj_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + done + +text \The following function can be used to reverse cte_map.\ +definition + "cteMap cns \ \p. + let P = (\(a,bl). cte_map (a,bl) = p \ cns a = Some (length bl)) + in if \x. P x + then (SOME x. P x) + else (p && ~~ mask tcbBlockSizeBits, bin_to_bl 3 (uint (p >> cte_level_bits)))" + +lemma tcb_cap_cases_length: + "tcb_cap_cases b = Some x \ length b = 3" + by (simp add: tcb_cap_cases_def tcb_cnode_index_def split: if_split_asm) + +lemma TCB_implies_KOTCB: + "\pspace_relation (kheap s) (ksPSpace s'); kheap s a = Some (TCB tcb)\ + \ \tcb'. ksPSpace s' a = Some (KOTCB tcb') \ tcb_relation tcb tcb'" + apply (clarsimp simp add: pspace_relation_def pspace_dom_def + dom_def UNION_eq Collect_eq) + apply (erule_tac x=a in allE)+ + apply (clarsimp simp add: other_obj_relation_def + split: Structures_H.kernel_object.splits) + apply (drule iffD1) + apply (fastforce simp add: dom_def image_def) + apply clarsimp + done + +lemma cte_at_CNodeI: + "\kheap s a = Some (CNode (length b) cs); well_formed_cnode_n (length b) cs\ + \ cte_at (a,b) s" + apply (subgoal_tac "\y. cs b = Some y") + apply clarsimp + apply (rule_tac cte=y in cte_wp_at_cteI[of s _ "length b" cs]; simp) + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + done + +lemma cteMap_correct: + assumes rel: "(s,s') \ state_relation" + assumes valid_objs: "valid_objs s" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + shows "p \ dom (caps_of_state s) \ cteMap (gsCNodes s') (cte_map p) = p" +proof - + from rel have gsCNodes: + "\a n. (\cs. kheap s a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ + gsCNodes s' a = Some n" + by (simp add: state_relation_def ghost_relation_def) + show ?thesis + apply (simp add: dom_def cteMap_def split: if_split_asm) + apply (clarsimp simp: caps_of_state_cte_wp_at split: if_split_asm) + apply (drule cte_wp_cte_at) + apply (intro conjI impI) + apply (rule some_equality) + apply (clarsimp simp add: split_def) + apply (frule gsCNodes[rule_format,THEN iffD2]) + apply clarsimp + apply (frule (1) cte_at_CNodeI) + apply (frule (2) cte_map_inj_eq[OF _ _ _ valid_objs pspace_aligned pspace_distinct]) + apply clarsimp + apply (clarsimp simp add: split_def) + apply (frule gsCNodes[rule_format,THEN iffD2]) + apply clarsimp + apply (frule (1) cte_at_CNodeI) + apply (frule (2) cte_map_inj_eq[OF _ _ _ valid_objs pspace_aligned pspace_distinct]) + apply clarsimp + apply (case_tac p) + apply (clarsimp simp add: cte_wp_at_cases) + apply (erule disjE) + apply clarsimp + apply (drule_tac x=a in spec, drule_tac x=b in spec, simp) + apply (cut_tac a=a and n=sz in gsCNodes[rule_format]) + apply clarsimp + apply (simp add: well_formed_cnode_n_def dom_def Collect_eq) + apply (erule_tac x=b in allE) + apply simp + apply (thin_tac "ALL x. P x" for P) + apply clarsimp + apply (frule TCB_implies_KOTCB[OF state_relation_pspace_relation[OF rel]]) + apply clarsimp + using pspace_aligned'[simplified pspace_aligned'_def] + apply (drule_tac x=a in bspec, simp add: dom_def) + apply (simp add: objBitsKO_def cte_map_def) + apply (rule conjI[rotated]) + apply (drule tcb_cap_cases_length) + apply (frule_tac b=b and c=cte_level_bits in bin_to_bl_of_bl_eq) + apply (fastforce simp: cte_level_bits_def objBits_defs shiftl_t2n mult_ac)+ + apply (case_tac "b = [False, False, False]") + apply simp + apply (frule_tac b=b and c=cte_level_bits in bin_to_bl_of_bl_eq) + apply (fastforce simp: tcb_cap_cases_length cte_level_bits_def objBits_defs)+ + apply (subgoal_tac "ksPSpace s' (cte_map (a, b)) = None") + prefer 2 + apply (rule ccontr) + apply clarsimp + using pspace_distinct'[simplified pspace_distinct'_def] + apply (drule_tac x=a in bspec, simp add: dom_def) + apply (simp add: ps_clear_def dom_def mask_2pm1[symmetric] x_power_minus_1) + apply (simp add: objBitsKO_def) + apply (drule_tac a="cte_map (a, b)" in equals0D) + apply (clarsimp simp add: cte_map_def) + apply (drule tcb_cap_cases_length) + apply (erule impE) + apply (rule word_plus_mono_right) + apply (cut_tac 'a=machine_word_len and xs=b in of_bl_length, fastforce simp: word_bits_conv) + apply (drule_tac k="2^cte_level_bits" in word_mult_less_mono1) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (simp add: mask_def) + apply (rule ccontr) + apply (simp add: not_le shiftl_t2n mult_ac) + apply (drule (1) less_trans, fastforce simp: cte_level_bits_def objBits_defs) + apply (drule is_aligned_no_overflow'[simplified mask_2pm1[symmetric]]) + apply (simp add: word_bits_conv) + apply simp + apply (erule impE) + apply (drule is_aligned_no_overflow'[simplified mask_2pm1[symmetric]]) + apply (cut_tac 'a=machine_word_len and xs=b in of_bl_length, simp add: word_bits_conv) + apply (drule_tac k="2^cte_level_bits" in word_mult_less_mono1) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + apply (erule word_random) + apply (rule order.strict_implies_order) + apply (simp add: shiftl_t2n mult_ac) + apply (erule less_trans) + apply (fastforce simp: cte_level_bits_def objBits_defs mask_def) + apply (simp add: mult.commute[of _ "2^cte_level_bits"] + shiftl_t2n[of _ cte_level_bits, simplified, symmetric]) + apply word_bitwise + apply simp + apply (case_tac b, simp) + apply (rename_tac b, case_tac b, simp) + apply (rename_tac b, case_tac b, simp) + apply (clarsimp simp add: test_bit_of_bl eval_nat_numeral cte_level_bits_def) + apply (simp add: cte_map_def shiftl_t2n mult_ac split: option.splits) + apply (drule tcb_cap_cases_length) + apply (rule of_bl_mult_and_not_mask_eq[where m=cte_level_bits, simplified]) + apply (fastforce simp: cte_level_bits_def objBits_defs)+ + done +qed + +definition (* NOTE: cnp maps addresses to CNode, offset pairs *) + "absIsOriginalCap cnp h \ \(oref,cref). + cnp (cte_map (oref, cref)) = (oref, cref) \ + cte_map (oref,cref) : dom (map_to_ctes h) \ + (\cte. map_to_ctes h (cte_map (oref,cref)) = Some cte \ + (cteCap cte \ capability.NullCap) \ mdbRevocable (cteMDBNode cte))" + +lemma absIsOriginalCap_correct: + assumes valid_ioc: "valid_ioc s" + assumes valid_objs: "valid_objs s" + assumes rel: "(s,s') \ state_relation" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + shows "absIsOriginalCap (cteMap (gsCNodes s')) (ksPSpace s') = is_original_cap s" +proof - + from valid_ioc + have no_cap_not_orig: + "\p. caps_of_state s p = None \ is_original_cap s p = False" + and null_cap_not_orig: + "\p. caps_of_state s p = Some cap.NullCap \ is_original_cap s p = False" + by (fastforce simp: valid_ioc_def2 null_filter_def)+ + + have cnp: + "\a b. caps_of_state s (a, b) \ None \ + (cteMap (gsCNodes s')) (cte_map (a, b)) = (a, b)" + using cteMap_correct[OF rel valid_objs pspace_aligned pspace_distinct + pspace_aligned' pspace_distinct'] + by (clarsimp simp: dom_def) + + show ?thesis + apply (subgoal_tac "revokable_relation (is_original_cap s) + (null_filter (caps_of_state s)) (ctes_of s') \ + pspace_relation (kheap s) (ksPSpace s')") + prefer 2 + using rel + apply (clarsimp simp add: state_relation_def) + apply (rule ext) + apply (clarsimp simp add: revokable_relation_def + null_filter_def absIsOriginalCap_def + split: if_split_asm) + apply (erule_tac x=a in allE) + apply (erule_tac x=b in allE) + apply (case_tac "caps_of_state s (a, b)") + apply (clarsimp simp: no_cap_not_orig) + apply (frule (1) pspace_relation_cte_wp_atI[OF _ _ valid_objs]) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (subgoal_tac "(a,b) = (aa,ba)", simp) + apply (cut_tac a=aa and b=ba in cnp[rule_format], simp) + apply (simp add: cte_map_def) + apply simp + apply (case_tac "aa = cap.NullCap") + apply (clarsimp simp add: null_cap_not_orig) + apply (frule (1) pspace_relation_ctes_ofI + [OF _ caps_of_state_cteD pspace_aligned' pspace_distinct']) + apply clarsimp + apply (frule (1) pspace_relation_ctes_ofI + [OF _ caps_of_state_cteD pspace_aligned' pspace_distinct']) + apply (clarsimp simp add: dom_def) + apply (cut_tac a=a and b=b in cnp[rule_format], simp+) + apply (case_tac cte, clarsimp) + apply (case_tac aa, simp_all) + apply (rename_tac arch_cap) + apply (case_tac arch_cap, simp_all) + done +qed + +text \ + In the executable specification, + a linked list connects all children of a certain node. + More specifically, the predicate @{term "subtree h c c'"} holds iff + the map @{term h} from addresses to CTEs contains capabilities + at the addresses @{term c} and @{term c'} and + the latter is a child of the former. + + In the abstract specification, the capability-derivation tree @{term "cdt s"} + maps the address of each capability to the address of its immediate parent. + + The definition below takes a binary predicate @{term ds} as parameter, + which represents a childhood relation like @{term "subtree h"}, + and converts this into an optional function to the immediate parent + in the same format as @{term "cdt s"}. +\ +definition + "parent_of' ds \ \x. + if \p. \ ds p x + then None + else Some (THE p. ds p x \ (\q. ds p q \ ds q x \ p = q))" + +definition + "absCDT cnp h \ \(oref,cref). + if cnp (cte_map (oref, cref)) = (oref, cref) + then map_option cnp (parent_of' (subtree h) (cte_map (oref, cref))) + else None" + +lemma valid_mdb_mdb_cte_at: + "valid_mdb s \ mdb_cte_at (\p. \c. caps_of_state s p = Some c \ cap.NullCap \ c) (cdt s)" + by (simp add: valid_mdb_def2) + +lemma absCDT_correct': + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct': "pspace_distinct' s'" + assumes valid_objs: "valid_objs s" + assumes valid_mdb: "valid_mdb s" + assumes rel: "(s,s') \ state_relation" + shows + "absCDT (cteMap (gsCNodes s')) (ctes_of s') = cdt s" (is ?P) + "(case (cdt s x) of None \ caps_of_state s x \ None \ (\q. \(ctes_of s' \ q \ cte_map x)) | + Some p \ + ctes_of s' \ cte_map p \ cte_map x \ + (\q. ctes_of s' \ cte_map p \ q \ + ctes_of s' \ q \ cte_map x \ + cte_map p = q))" (is ?Q) +proof - + have cnp: + "\a b. caps_of_state s (a, b) \ None \ + (cteMap (gsCNodes s')) (cte_map (a, b)) = (a, b)" + using cteMap_correct[OF rel valid_objs pspace_aligned pspace_distinct + pspace_aligned' pspace_distinct'] + by (clarsimp simp: dom_def) + + from rel + have descs_eq: + "\a b. cte_wp_at (\_. True) (a, b) s \ + {y. \x\descendants_of (a, b) (cdt s). y = cte_map x} = + descendants_of' (cte_map (a, b)) (ctes_of s')" + apply (clarsimp simp add: state_relation_def) + apply (clarsimp simp add: swp_def cdt_relation_def image_def) + done + + from rel + have pspace_relation: "pspace_relation (kheap s) (ksPSpace s')" + by (clarsimp simp add: state_relation_def) + + note cdt_has_caps = mdb_cte_atD[OF _ valid_mdb_mdb_cte_at[OF valid_mdb]] + note descendants_of_simps = descendants_of_def cdt_parent_rel_def is_cdt_parent_def + + have descendants_implies: + "\p p'. p' \ descendants_of p (cdt s) \ + \cap cap'. caps_of_state s p = Some cap \ caps_of_state s p' = Some cap'" + apply (clarsimp simp: descendants_of_simps) + apply (frule tranclD2, drule tranclD) + apply (auto dest: cdt_has_caps) + done + + let ?cnp = "cteMap (gsCNodes s')" + have subtree_implies: + "\p p'. subtree (ctes_of s') p p' \ + \cap cap'. ?cnp p' \ descendants_of (?cnp p) (cdt s) \ + caps_of_state s (?cnp p) = Some cap \ + caps_of_state s (?cnp p') = Some cap' \ + (\cte cte'. ctes_of s' p = Some cte \ ctes_of s' p' = Some cte')" + apply (subgoal_tac "(ctes_of s') \ p parentOf p'") + prefer 2 + apply (erule subtree.cases, simp+) + apply (clarsimp simp add: parentOf_def) + apply (frule_tac x=p in pspace_relation_cte_wp_atI[OF pspace_relation _ valid_objs]) + apply clarsimp + apply (frule descs_eq[rule_format, OF cte_wp_at_weakenE], simp) + apply (simp add: descendants_of'_def Collect_eq) + apply (drule spec, drule(1) iffD2) + apply (clarsimp simp: cnp cte_wp_at_caps_of_state) + apply (frule descendants_implies) + apply (clarsimp simp: cnp) + done + have is_parent: + "\a b p cap cap' a' b' c. + \cdt s (a, b) = Some (a', b')\ + \ ctes_of s' \ cte_map (a', b') \ cte_map (a, b) \ + (\q. ctes_of s' \ cte_map (a', b') \ q \ + ctes_of s' \ q \ cte_map (a, b) \ + cte_map (a', b') = q)" + apply (frule cdt_has_caps) + using descs_eq pspace_relation + apply (frule_tac x=a' in spec, erule_tac x=b' in allE) + apply (simp add: cte_wp_at_caps_of_state Collect_eq descendants_of_simps + descendants_of'_def) + apply (rule conjI) + apply fastforce + apply clarsimp + apply (drule subtree_implies)+ + apply (clarsimp simp: cnp) + using valid_mdb + apply (clarsimp simp: cnp descendants_of_simps valid_mdb_def no_mloop_def) + apply (drule_tac x="?cnp q" and y="(a, b)" in tranclD2) + apply clarsimp + apply (fastforce intro: trancl_rtrancl_trancl) + done + + + show ?P + apply (rule ext) + using descs_eq pspace_relation + apply (simp add: absCDT_def) + apply (rule conjI[rotated]) + apply clarsimp + apply (rule sym, rule ccontr, clarsimp) + apply (frule cdt_has_caps) + using cnp + apply fastforce + apply clarsimp + + apply (clarsimp simp: parent_of'_def) + apply (rule conjI) + apply clarsimp + apply (rule sym, rule ccontr, clarsimp) + apply (simp add: descendants_of_simps descendants_of'_def) + apply (rename_tac a' b') + apply (erule_tac x=a' in allE, erule_tac x=b' in allE) + apply (erule_tac x="cte_map (a', b')" in allE, erule notE) + apply (frule cdt_has_caps) + apply (clarsimp simp: cte_wp_at_caps_of_state Collect_eq) + apply fastforce + apply clarsimp + apply (drule subtree_implies) + apply clarsimp + apply (case_tac "cdt s (a, b)") + apply (simp add: descendants_of_simps descendants_of'_def) + apply (drule tranclD2) + apply clarsimp + apply clarsimp + apply (rename_tac a' b') + apply (frule cdt_has_caps) + apply clarsimp + apply (rule trans[rotated]) + apply (rule cnp[rule_format], simp) + apply (rule arg_cong[where f="?cnp"]) + apply (rule the_equality) + apply (rule is_parent,assumption) + apply clarsimp + apply (rule ccontr) + apply (drule_tac x="cte_map (a', b')" in spec, drule mp) + apply simp_all + apply (drule subtree_implies) + apply clarsimp + apply (drule_tac p=pa in ctes_of_cte_wpD) + apply (drule pspace_relation_cte_wp_atI'[OF pspace_relation _ valid_objs]) + apply (clarsimp simp add: cte_wp_at_caps_of_state cnp) + apply (thin_tac "(a, b) \ descendants_of (?cnp p) (cdt s)", + thin_tac "caps_of_state s (?cnp p) = Some cap") + apply (unfold descendants_of'_def) + apply (erule_tac x=a' in allE) + apply (erule_tac x=b' in allE) + apply (simp add: Collect_eq) + apply (erule_tac x="cte_map (a, b)" in allE) + apply (drule iffD1) + apply (rule_tac x="(a, b)" in bexI, simp) + apply (clarsimp simp: cnp descendants_of_simps) + apply (rule trancl.intros(1)) + apply simp_all + apply (rule descs_eq[simplified descendants_of'_def Collect_eq, + rule_format, THEN iffD1]) + apply (clarsimp simp add: cte_wp_at_caps_of_state) + apply (rule_tac x="(a', b')" in bexI, simp) + apply (clarsimp simp: descendants_of_simps) + apply (drule_tac x="(aa,ba)" and y="(a, b)" in tranclD2) + apply clarsimp + apply (drule rtranclD, erule disjE, simp_all)[1] + done + thus ?Q + apply (case_tac x) + apply (case_tac "cdt s (a, b)") + apply (drule sym) + apply (simp add: mdb_cte_at_def) + apply (simp add: absCDT_def split_def) + apply (simp add: parent_of'_def split: if_split_asm) + apply (intro impI) + apply (frule_tac a=a and b=b in cnp[simplified,rule_format]) + apply simp + apply simp + apply (clarsimp simp: is_parent) + done +qed + +lemmas absCDT_correct = absCDT_correct'(1) +lemmas cdt_simple_rel = absCDT_correct'(2) + + +(* Produce a cdt_list from a cdt by sorting the children + sets by reachability via mdbNext. We then demonstrate + that a list satisfying the state relation must + already be sorted in the same way and therefore is + equivalent. *) + +definition sort_cdt_list where + "sort_cdt_list cd m = + (\p. THE xs. set xs = {c. cd c = Some p} \ + partial_sort.psorted (\x y. m \ cte_map x \\<^sup>* cte_map y) xs \ distinct xs)" + +end + +locale partial_sort_cdt = + partial_sort "\ x y. m' \ cte_map x \\<^sup>* cte_map y" + "\ x y. cte_at x (s::det_state) \ cte_at y s \ + (\p. m' \ p \ cte_map x \ m' \ p \ cte_map y)" for m' s + + fixes s'::"kernel_state" + fixes m t + defines "m \ (cdt s)" + defines "t \ (cdt_list s)" + assumes m'_def : "m' = (ctes_of s')" + assumes rel:"(s,s') \ state_relation" + assumes valid_mdb: "valid_mdb s" + assumes assms' : "pspace_aligned s" "pspace_distinct s" "pspace_aligned' s'" + "pspace_distinct' s'" "valid_objs s" "valid_mdb s" "valid_list s" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma valid_list_2 : "valid_list_2 t m" + apply (insert assms') + apply (simp add: t_def m_def) + done + +lemma has_next_not_child_is_descendant: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_not_child slot t m = Some slot2 \ (\p. slot \ descendants_of p m)" + apply (drule next_not_childD) + apply (simp add: m_def finite_depth assms')+ + using assms' + apply (simp add: valid_mdb_def) + apply (elim disjE) + apply (drule next_sib_same_parent[OF valid_list_2]) + apply (elim exE) + apply (rule_tac x=p in exI) + apply (rule child_descendant) + apply simp + apply (elim conjE exE) + apply force + done + +lemma has_next_slot_is_descendant : + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_slot slot t m = Some slot2 \ m slot2 = Some slot \ (\p. slot \ descendants_of p m)" + apply (insert valid_list_2) + apply (simp add: next_slot_def next_child_def split: if_split_asm) + apply (case_tac "t slot",simp+) + apply (simp add: valid_list_2_def) + apply (rule disjI1) + apply force + apply (rule disjI2) + apply (erule has_next_not_child_is_descendant) + done + +lemma descendant_has_parent: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "slot \ descendants_of p m \ \q. m slot = Some q" + apply (simp add: descendants_of_def) + apply (drule tranclD2) + apply (simp add: cdt_parent_of_def) + apply force + done + +lemma next_slot_cte_at: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "next_slot slot t m = Some slot2 \ cte_at slot s" + apply (cut_tac valid_mdb_mdb_cte_at) + prefer 2 + apply (cut_tac assms') + apply simp + apply (fold m_def) + apply (simp add: mdb_cte_at_def) + apply (simp add: cte_wp_at_caps_of_state) + apply (drule has_next_slot_is_descendant) + apply (elim disjE) + apply force + apply (elim exE) + apply (drule descendant_has_parent) + apply force + done + +lemma cte_at_has_cap: + "cte_at slot s \ \c. cte_wp_at ((=) c) slot s" + apply (drule cte_at_get_cap_wp) + apply force + done + +lemma next_slot_mdb_next: + notes split_paired_All[simp del] + shows "next_slot slot t m = Some slot2 \ m' \ (cte_map slot) \ (cte_map slot2)" + apply (frule cte_at_has_cap[OF next_slot_cte_at]) + apply (elim exE) + apply (cut_tac s=s and s'=s' in pspace_relation_ctes_ofI) + apply (fold m'_def) + using rel + apply (simp add: state_relation_def) + apply simp + using assms' + apply simp + using assms' + apply simp + apply (subgoal_tac "cdt_list_relation t m m'") + apply (simp add: cdt_list_relation_def) + apply (elim exE) + apply (case_tac cte) + apply (simp add: mdb_next_rel_def mdb_next_def) + apply force + using rel + apply (simp add: state_relation_def m_def t_def m'_def) + done + +lemma next_sib_2_reachable: + "next_sib_2 slot p s = Some slot2 \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (induct slot rule: next_sib_2_pinduct[where s=s and p=p]) + apply (cut_tac slot=slot and s=s and p=p in next_sib_2.psimps[OF next_sib_2_termination]; + simp add: assms') + apply (fold m_def t_def) + apply (simp split: if_split_asm) + apply (case_tac "next_slot slot t m") + apply simp + apply (simp split: if_split_asm) + apply (rule r_into_rtrancl) + apply (erule next_slot_mdb_next) + apply (rule trans) + apply (rule r_into_rtrancl) + apply (rule next_slot_mdb_next) + apply (simp add: assms' valid_list_2)+ + done + +lemma next_sib_reachable: + "next_sib slot t m = Some slot2 \ m slot = Some p \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (rule next_sib_2_reachable) + apply (insert assms') + apply (simp add: t_def m_def) + apply (subst next_sib_def2,simp+) + done + +lemma after_in_list_next_reachable: + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "after_in_list (t p) slot = Some slot2 \ m' \ (cte_map slot) \\<^sup>* (cte_map slot2)" + apply (subgoal_tac "m slot = Some p") + apply (rule next_sib_reachable) + apply (simp add: next_sib_def)+ + apply (drule after_in_list_in_list') + apply (insert valid_list_2) + apply (simp add: valid_list_2_def) + done + +lemma sorted_lists: + "psorted (t p)" + apply (rule after_order_sorted) + apply (rule after_in_list_next_reachable) + apply simp + apply (insert assms') + apply (simp add: valid_list_def t_def del: split_paired_All) + done + +lemma finite_children: + notes split_paired_All[simp del] + shows "finite {c. m c = Some p}" + apply (insert assms') + apply(subgoal_tac "{x. x \ descendants_of p (cdt s)} \ {x. cte_wp_at (\_. True) x s}") + prefer 2 + apply(fastforce simp: descendants_of_cte_at) + apply(drule finite_subset) + apply(simp add: cte_wp_at_set_finite) + apply(subgoal_tac "{c. m c = Some p} \ {c. c \ descendants_of p (cdt s)}") + apply (drule finite_subset) + apply simp + apply simp + apply clarsimp + apply (simp add: m_def child_descendant) + done + +lemma ex1_sorted_cdt: + "\!xs. set xs = {c. m c = Some p} \ psorted xs \ distinct xs" + apply (rule psorted_set[OF finite_children]) + apply (simp add: R_set_def) + apply (intro impI conjI allI) + apply (simp add: has_parent_cte_at[OF valid_mdb] m_def) + apply (simp add: has_parent_cte_at[OF valid_mdb] m_def) + + apply (cut_tac s=s and s'=s' and x="(a,b)" in cdt_simple_rel, simp_all add: assms' rel) + apply (simp add: m_def) + apply (cut_tac s=s and s'=s' and x="(aa,ba)" in cdt_simple_rel, simp_all add: assms' rel) + apply (rule_tac x="cte_map p" in exI) + apply (simp add: m'_def) + done + +lemma sort_cdt_list_correct: + "sort_cdt_list m m' = t" + apply (rule ext) + apply (simp add: sort_cdt_list_def) + apply (rule the1_equality) + apply (rule ex1_sorted_cdt) + apply (simp add: sorted_lists) + apply (insert assms') + apply (simp add: valid_list_def t_def m_def del: split_paired_All) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition absCDTList where + "absCDTList cnp h \ sort_cdt_list (absCDT cnp h) h" + +lemma no_loops_sym_eq: "no_loops m \ m \ a \\<^sup>* b \ m \ b \\<^sup>* a \ a = b" + apply (rule ccontr) + apply (subgoal_tac "m \ a \\<^sup>+ a") + apply (simp add: no_loops_def) + apply (simp add: rtrancl_eq_or_trancl) + done + +lemma mdb_next_single_valued: "single_valued (mdb_next_rel m)" + apply (simp add: single_valued_def mdb_next_rel_def) + done + +lemma substring_next: "m \ a \\<^sup>* b \ m \ a \\<^sup>* c \ m \ b \\<^sup>* c \ m \ c \\<^sup>* b" + apply (rule single_valued_confluent) + apply (rule mdb_next_single_valued) + apply simp+ + done + +lemma ancestor_comparable: "\m \ a \ x; m \ a \ y\ \ m \ x \\<^sup>* y \ m \ y \\<^sup>* x" + apply (rule substring_next) + apply (erule subtree_mdb_next[THEN trancl_into_rtrancl])+ + done + +lemma valid_mdb'_no_loops: "valid_mdb' s \ no_loops (ctes_of s)" + apply (rule mdb_chain_0_no_loops) + apply (simp add: valid_mdb'_def valid_mdb_ctes_def)+ + done + +lemma absCDTList_correct: + notes split_paired_All[simp del] split_paired_Ex[simp del] + assumes valid_mdb: "valid_mdb s" + assumes valid_mdb': "valid_mdb' s'" + assumes valid_list: "valid_list s" + assumes valid_objs: "valid_objs s" + assumes pspace_aligned: "pspace_aligned s" + assumes pspace_aligned': "pspace_aligned' s'" + assumes pspace_distinct: "pspace_distinct s" + assumes pspace_distinct': "pspace_distinct' s'" + assumes rel: "(s,s') \ state_relation" + shows "absCDTList (cteMap (gsCNodes s')) (ctes_of s') = cdt_list s" + apply (simp add: absCDTList_def) + apply (subst absCDT_correct[where s=s]) + apply (simp add: assms)+ + apply (rule partial_sort_cdt.sort_cdt_list_correct[where s'=s']) + apply (simp add: partial_sort_cdt_def) + apply (rule context_conjI') + apply unfold_locales + apply (simp add: assms)+ + apply (simp add: partial_sort_cdt_axioms_def) + apply (elim conjE exE) + apply (rule ancestor_comparable,assumption+) + apply (elim conjE) + apply (rule cte_map_inj_eq) + apply (rule no_loops_sym_eq[where m="ctes_of s'"]) + apply (rule valid_mdb'_no_loops[OF valid_mdb']) + apply (simp add: assms)+ + done + +definition + "absInterruptIRQNode is' \ \irq. + case is' of InterruptState node irqs' \ + node + (ucast irq << cte_level_bits)" + +definition + "irq_state_map s \ case s of + irq_state.IRQInactive \ irqstate.IRQInactive + | irq_state.IRQSignal \ irqstate.IRQSignal + | irq_state.IRQTimer \ irqstate.IRQTimer + | irq_state.IRQReserved \ irqstate.IRQReserved" + +definition + "IRQStateMap s \ case s of + irqstate.IRQInactive \ irq_state.IRQInactive + | irqstate.IRQSignal \ irq_state.IRQSignal + | irqstate.IRQTimer \ irq_state.IRQTimer + | irqstate.IRQReserved \ irq_state.IRQReserved" + +definition + "absInterruptStates is' \ case is' of InterruptState node m \ IRQStateMap \ m" + +lemma absInterruptIRQNode_correct: + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + absInterruptIRQNode (ksInterruptState s') = interrupt_irq_node s" + by (rule ext) (clarsimp simp add: absInterruptIRQNode_def interrupt_state_relation_def) + +lemma absInterruptStates_correct: + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + absInterruptStates (ksInterruptState s') = interrupt_states s" + apply (rule ext) + apply (clarsimp simp : absInterruptStates_def IRQStateMap_def interrupt_state_relation_def + irq_state_relation_def) + apply (erule_tac x=x in allE)+ + apply (clarsimp split: irq_state.splits irqstate.splits) + done + +definition + "absArchState s' \ + case s' of + ARMKernelState asid_tbl kvspace vmid_tab next_vmid global_us_vspace current_vcpu + num_list_regs gs_pt_types \ + \ arm_asid_table = asid_tbl \ ucast, + arm_kernel_vspace = kvspace, + arm_vmid_table = map_option ucast \ vmid_tab, + arm_next_vmid = next_vmid, + arm_us_global_vspace = global_us_vspace, + arm_current_vcpu = current_vcpu, + arm_gicvcpu_numlistregs = num_list_regs \" + +lemma absArchState_correct: + "(s,s') \ state_relation \ absArchState (ksArchState s') = arch_state s" + apply (prop_tac "(arch_state s, ksArchState s') \ arch_state_relation") + apply (simp add: state_relation_def) + apply (clarsimp simp: arch_state_relation_def absArchState_def + split: AARCH64_H.kernel_state.splits) + apply (simp add: o_assoc flip: map_option_comp2) + apply (simp add: o_def ucast_up_ucast_id is_up map_option.identity) + done + +definition absSchedulerAction where + "absSchedulerAction action \ + case action of ResumeCurrentThread \ resume_cur_thread + | SwitchToThread t \ switch_thread t + | ChooseNewThread \ choose_new_thread" + +lemma absSchedulerAction_correct: + "sched_act_relation action action' \ absSchedulerAction action' = action" + by (cases action; simp add: absSchedulerAction_def) + +definition + "absExst s \ + \work_units_completed_internal = ksWorkUnitsCompleted s, + scheduler_action_internal = absSchedulerAction (ksSchedulerAction s), + ekheap_internal = absEkheap (ksPSpace s), + domain_list_internal = ksDomSchedule s, + domain_index_internal = ksDomScheduleIdx s, + cur_domain_internal = ksCurDomain s, + domain_time_internal = ksDomainTime s, + ready_queues_internal = curry (ksReadyQueues s), + cdt_list_internal = absCDTList (cteMap (gsCNodes s)) (ctes_of s)\" + +lemma absExst_correct: + assumes invs: "einvs s" and invs': "invs' s'" + assumes rel: "(s, s') \ state_relation" + shows "absExst s' = exst s" + apply (rule det_ext.equality) + using rel invs invs' + apply (simp_all add: absExst_def absSchedulerAction_correct absEkheap_correct + absCDTList_correct[THEN fun_cong] state_relation_def invs_def valid_state_def + ready_queues_relation_def invs'_def valid_state'_def + valid_pspace_def valid_sched_def valid_pspace'_def curry_def fun_eq_iff) + apply (fastforce simp: absEkheap_correct) + done + + +definition + "absKState s \ + \kheap = absHeap (gsUserPages s) (gsCNodes s) (gsPTTypes (ksArchState s)) (ksPSpace s), + cdt = absCDT (cteMap (gsCNodes s)) (ctes_of s), + is_original_cap = absIsOriginalCap (cteMap (gsCNodes s)) (ksPSpace s), + cur_thread = ksCurThread s, idle_thread = ksIdleThread s, + machine_state = observable_memory (ksMachineState s) (user_mem' s), + interrupt_irq_node = absInterruptIRQNode (ksInterruptState s), + interrupt_states = absInterruptStates (ksInterruptState s), + arch_state = absArchState (ksArchState s), + exst = absExst s\" + + +definition checkActiveIRQ :: "(kernel_state, bool) nondet_monad" where + "checkActiveIRQ \ + do irq \ doMachineOp (getActiveIRQ False); + return (irq \ None) + od" + +definition check_active_irq_H :: + "((user_context \ kernel_state) \ bool \ (user_context \ kernel_state)) set" where + "check_active_irq_H \ {((tc, s), irq, (tc, s')). (irq, s') \ fst (checkActiveIRQ s)}" + +definition doUserOp :: + "user_transition \ user_context \ (kernel_state, event option \ user_context) nondet_monad" + where + "doUserOp uop tc \ + do t \ getCurThread; + trans \ gets (ptable_lift t \ absKState); + perms \ gets (ptable_rights t \ absKState); + + um \ gets (\s. user_mem' s \ ptrFromPAddr); + dm \ gets (\s. device_mem' s \ ptrFromPAddr); + + ds \ gets (device_state \ ksMachineState); + assert (dom (um \ addrFromPPtr) \ - dom ds); + assert (dom (dm \ addrFromPPtr) \ dom ds); + + (e, tc',um',ds') \ select (fst (uop t (restrict_map trans {pa. perms pa \ {}}) perms + (tc, restrict_map um + {pa. \va. trans va = Some pa \ AllowRead \ perms va} + ,(ds \ ptrFromPAddr) |` {pa. \va. trans va = Some pa \ AllowRead \ perms va} ) + )); + doMachineOp (user_memory_update + ((um' |` {pa. \va. trans va = Some pa \ AllowWrite \ perms va} + \ addrFromPPtr) |` (- dom ds))); + doMachineOp (device_memory_update + ((ds' |` {pa. \va. trans va = Some pa \ AllowWrite \ perms va} + \ addrFromPPtr )|` (dom ds))); + return (e, tc') + od" + +definition do_user_op_H :: + "user_transition \ + ((user_context \ kernel_state) \ (event option \ user_context \ kernel_state)) set" where + "do_user_op_H uop \ monad_to_transition (doUserOp uop)" + +definition + "kernelEntry e tc \ do + t \ getCurThread; + threadSet (\tcb. tcb \ tcbArch := atcbContextSet tc (tcbArch tcb) \) t; + callKernel e; + t' \ getCurThread; + threadGet (atcbContextGet o tcbArch) t' + od" + +definition kernel_call_H :: + "event \ ((user_context \ kernel_state) \ mode \ (user_context \ kernel_state)) set" + where + "kernel_call_H e \ + {(s, m, s'). s' \ fst (split (kernelEntry e) s) \ + m = (if ct_running' (snd s') then UserMode else IdleMode)}" + +definition ADT_H :: + "user_transition \ (kernel_state global_state, det_ext observable, unit) data_type" + where + "ADT_H uop \ + \Init = \s. Init_H, + Fin = \((tc,s),m,e). ((tc, absKState s),m,e), + Step = (\u. global_automaton check_active_irq_H (do_user_op_H uop) kernel_call_H)\" + +end + +end diff --git a/proof/refine/AARCH64/ArchAcc_R.thy b/proof/refine/AARCH64/ArchAcc_R.thy new file mode 100644 index 0000000000..0980b9b05b --- /dev/null +++ b/proof/refine/AARCH64/ArchAcc_R.thy @@ -0,0 +1,1222 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Lemmas on arch get/set object etc +*) + +theory ArchAcc_R +imports SubMonad_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare if_cong[cong] (* FIXME: if_cong *) + +lemma asid_pool_at_ko: + "asid_pool_at p s \ \pool. ko_at (ArchObj (AARCH64_A.ASIDPool pool)) p s" + by (clarsimp simp: asid_pools_at_eq obj_at_def elim!: opt_mapE) + +lemma corres_gets_asid: + "corres (\a c. a = c o ucast) \ \ (gets asid_table) (gets (armKSASIDTable \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma asid_low_bits [simp]: + "asidLowBits = asid_low_bits" + by (simp add: asid_low_bits_def asidLowBits_def) + +lemma pteBits_pte_bits[simp]: + "pteBits = pte_bits" + by (simp add: bit_simps pteBits_def) + +lemma cte_map_in_cnode1: + "\ x \ x + 2 ^ (cte_level_bits + length y) - 1 \ \ x \ cte_map (x, y)" + apply (simp add: cte_map_def) + apply (rule word_plus_mono_right2[where b="mask (cte_level_bits + length y)"]) + apply (simp add: mask_def add_diff_eq) + apply (rule leq_high_bits_shiftr_low_bits_leq_bits) + apply (rule of_bl_max) + done + +lemma pspace_aligned_cross: + "\ pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ pspace_aligned' s'" + apply (clarsimp simp: pspace_aligned'_def pspace_aligned_def pspace_relation_def) + apply (rename_tac p' ko') + apply (prop_tac "p' \ pspace_dom (kheap s)", fastforce) + apply (thin_tac "pspace_dom k = p" for k p) + apply (clarsimp simp: pspace_dom_def) + apply (drule bspec, fastforce)+ + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp simp: objBits_simps) + apply (clarsimp simp: cte_map_def) + apply (simp add: cteSizeBits_def cte_level_bits_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (rule is_aligned_shift) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shift) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (rule pbfs_atleast_pageBits) + apply (rule is_aligned_shift) + apply (simp add: other_obj_relation_def) + apply (clarsimp simp: bit_simps' tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def + split: kernel_object.splits Structures_A.kernel_object.splits) + apply (clarsimp simp: archObjSize_def bit_simps + split: arch_kernel_object.splits arch_kernel_obj.splits) + apply (erule is_aligned_weaken, simp add: bit_simps)+ + done + +lemma of_bl_shift_cte_level_bits: + "(of_bl z :: machine_word) << cte_level_bits \ mask (cte_level_bits + length z)" + by word_bitwise + (simp add: test_bit_of_bl bit_simps word_size cte_level_bits_def rev_bl_order_simps) + +lemma obj_relation_cuts_range_limit: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ + \ \x n. p' = p + x \ is_aligned x n \ n \ obj_bits ko \ x \ mask (obj_bits ko)" + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (drule (1) wf_cs_nD) + apply (clarsimp simp: cte_map_def) + apply (rule_tac x=cte_level_bits in exI) + apply (simp add: is_aligned_shift of_bl_shift_cte_level_bits) + apply (rule_tac x=pte_bits in exI) + apply (simp add: is_aligned_shift mask_def) + apply (rule shiftl_less_t2n) + apply (simp add: table_size_def) + apply (simp add: bit_simps) + apply (rule_tac x=pageBits in exI) + apply (simp add: is_aligned_shift pbfs_atleast_pageBits) + apply (simp add: mask_def shiftl_t2n mult_ac) + apply (erule word_less_power_trans2, rule pbfs_atleast_pageBits) + apply (simp add: pbfs_less_wb'[unfolded word_bits_def, simplified]) + apply fastforce + done + +lemma obj_relation_cuts_range_mask_range: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko'; is_aligned p (obj_bits ko) \ + \ p' \ mask_range p (obj_bits ko)" + apply (drule (1) obj_relation_cuts_range_limit, clarsimp) + apply (rule conjI) + apply (rule word_plus_mono_right2; assumption?) + apply (simp add: is_aligned_no_overflow_mask) + apply (erule word_plus_mono_right) + apply (simp add: is_aligned_no_overflow_mask) + done + +lemma obj_relation_cuts_obj_bits: + "\ (p', P) \ obj_relation_cuts ko p; P ko ko' \ \ objBitsKO ko' \ obj_bits ko" + apply (erule (1) obj_relation_cutsE; + clarsimp simp: objBits_simps objBits_defs bit_simps cte_level_bits_def + pbfs_atleast_pageBits[simplified bit_simps]) + apply (cases ko; simp add: other_obj_relation_def objBits_defs split: kernel_object.splits) + apply (rename_tac ako, case_tac ako; clarsimp; + rename_tac ako', case_tac ako'; clarsimp simp: archObjSize_def) + done + +lemmas is_aligned_add_step_le' = is_aligned_add_step_le[simplified mask_2pm1 add_diff_eq] + +lemma pspace_distinct_cross: + "\ pspace_distinct s; pspace_aligned s; pspace_relation (kheap s) (ksPSpace s') \ \ + pspace_distinct' s'" + apply (frule (1) pspace_aligned_cross) + apply (clarsimp simp: pspace_distinct'_def) + apply (rename_tac p' ko') + apply (rule pspace_dom_relatedE; assumption?) + apply (rename_tac p ko P) + apply (frule (1) pspace_alignedD') + apply (frule (1) pspace_alignedD) + apply (rule ps_clearI, assumption) + apply (case_tac ko'; simp add: objBits_simps objBits_defs bit_simps') + apply (simp split: arch_kernel_object.splits add: bit_simps') + apply (rule ccontr, clarsimp) + apply (rename_tac x' ko_x') + apply (frule_tac x=x' in pspace_alignedD', assumption) + apply (rule_tac x=x' in pspace_dom_relatedE; assumption?) + apply (rename_tac x ko_x P') + apply (frule_tac p=x in pspace_alignedD, assumption) + apply (case_tac "p = x") + apply clarsimp + apply (erule (1) obj_relation_cutsE; clarsimp) + apply (clarsimp simp: cte_relation_def cte_map_def objBits_simps) + apply (rule_tac n=cte_level_bits in is_aligned_add_step_le'; assumption?) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (rule is_aligned_add; (rule is_aligned_shift)?) + apply (erule is_aligned_weaken, simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def cteSizeBits_def) + apply (clarsimp simp: pte_relation_def objBits_simps) + apply (rule_tac n=pte_bits in is_aligned_add_step_le'; assumption?) + apply (simp add: objBitsKO_Data) + apply (rule_tac n=pageBits in is_aligned_add_step_le'; assumption?) + apply (case_tac ko; simp split: if_split_asm add: is_other_obj_relation_type_CapTable) + apply (rename_tac ako, case_tac ako; simp add: is_other_obj_relation_type_def split: if_split_asm) + apply (frule (1) obj_relation_cuts_obj_bits) + apply (drule (2) obj_relation_cuts_range_mask_range)+ + apply (prop_tac "x' \ mask_range p' (objBitsKO ko')", simp add: mask_def add_diff_eq) + apply (frule_tac x=p and y=x in pspace_distinctD; assumption?) + apply (drule (4) mask_range_subsetD) + apply (erule (2) in_empty_interE) + done + +lemma asid_pool_at_cross: + "\ asid_pool_at p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned s; pspace_distinct s \ + \ asid_pool_at' p s'" + apply (drule (2) pspace_distinct_cross) + apply (clarsimp simp: obj_at_def typ_at'_def ko_wp_at'_def) + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (clarsimp simp: pspace_dom_def) + apply (rule bexI) + prefer 2 + apply fastforce + apply clarsimp + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: other_obj_relation_def split: kernel_object.splits arch_kernel_object.splits) + apply (clarsimp simp: objBits_simps) + apply (frule (1) pspace_alignedD) + apply (rule conjI, simp add: bit_simps) + apply (clarsimp simp: pspace_distinct'_def) + apply (drule bspec, fastforce) + apply (simp add: objBits_simps) + done + +lemma corres_cross_over_asid_pool_at: + "\ \s. P s \ asid_pool_at p s \ pspace_distinct s \ pspace_aligned s; + corres r P (Q and asid_pool_at' p) f g \ \ + corres r P Q f g" + apply (rule corres_cross_over_guard[where Q="Q and asid_pool_at' p"]) + apply (drule meta_spec, drule (1) meta_mp, clarsimp) + apply (erule asid_pool_at_cross, clarsimp simp: state_relation_def; assumption) + apply assumption + done + +lemma getObject_ASIDPool_corres: + "p' = p \ + corres asid_pool_relation + (asid_pool_at p and pspace_aligned and pspace_distinct) \ + (get_asid_pool p) (getObject p')" + apply (rule corres_cross_over_asid_pool_at, fastforce) + apply (simp add: getObject_def gets_map_def split_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, simp_all)[1] + apply (clarsimp simp: lookupAround2_known1) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp add: objBits_simps + split: option.split) + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets) + apply (drule asid_pool_at_ko) + apply (clarsimp simp: obj_at_def assert_opt_def fail_def return_def in_omonad + split: option.split) + apply (simp add: in_magnitude_check objBits_simps pageBits_def) + apply (clarsimp simp: state_relation_def pspace_relation_def) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def) + done + +lemma aligned_distinct_obj_atI': + "\ ksPSpace s x = Some ko; pspace_aligned' s; pspace_distinct' s; ko = injectKO v \ + \ ko_at' v x s" + apply (simp add: obj_at'_def project_inject pspace_distinct'_def pspace_aligned'_def) + apply (drule bspec, erule domI)+ + apply simp + done + +lemma storePTE_cte_wp_at'[wp]: + "storePTE ptr val \\s. P (cte_wp_at' P' p s)\" + apply (simp add: storePTE_def) + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +lemma storePTE_state_refs_of[wp]: + "storePTE ptr val \\s. P (state_refs_of' s)\" + unfolding storePTE_def + apply (wp setObject_state_refs_of_eq; + clarsimp simp: updateObject_default_def in_monad) + done + +lemma storePTE_state_hyp_refs_of[wp]: + "\\s. P (state_hyp_refs_of' s)\ + storePTE ptr val + \\rv s. P (state_hyp_refs_of' s)\" + by (wpsimp wp: hoare_drop_imps setObject_state_hyp_refs_of_eq + simp: storePTE_def updateObject_default_def in_monad) + +crunch cte_wp_at'[wp]: setIRQState "\s. P (cte_wp_at' P' p s)" +crunch inv[wp]: getIRQSlot "P" + +lemma setObject_ASIDPool_corres[corres]: + "\ p = p'; a = map_option abs_asid_entry o inv ASIDPool a' o ucast \ \ + corres dc (asid_pool_at p and pspace_aligned and pspace_distinct) \ + (set_asid_pool p a) (setObject p' a')" + apply (simp add: set_asid_pool_def) + apply (rule corres_underlying_symb_exec_l[where P=P and Q="\_. P" for P]) + apply (rule corres_no_failI; clarsimp) + apply (clarsimp simp: gets_map_def bind_def simpler_gets_def assert_opt_def fail_def return_def + obj_at_def in_omonad + split: option.splits) + prefer 2 + apply wpsimp + apply (rule corres_cross_over_asid_pool_at, fastforce) + apply (rule corres_guard_imp) + apply (rule setObject_other_corres [where P="\ko::asidpool. True"]) + apply simp + apply (clarsimp simp: obj_at'_def) + apply (erule map_to_ctes_upd_other, simp, simp) + apply (simp add: a_type_def is_other_obj_relation_type_def) + apply (simp add: objBits_simps) + apply simp + apply (simp add: objBits_simps pageBits_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (simp add: typ_at'_def obj_at'_def ko_wp_at'_def) + apply clarsimp + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (clarsimp simp: obj_at_def exs_valid_def assert_def a_type_def return_def fail_def) + apply (auto split: Structures_A.kernel_object.split_asm arch_kernel_obj.split_asm if_split_asm)[1] + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma p_le_table_base: + "is_aligned p pte_bits \ p + mask pte_bits \ table_base pt_t p + mask (table_size pt_t)" + apply (simp add: is_aligned_mask word_plus_and_or_coroll table_size_def pt_bits_def) + apply (subst word_plus_and_or_coroll, word_eqI_solve) + apply word_bitwise + apply (simp add: word_size bit_simps) + done + +lemma table_index_in_table: + "table_index pt_t p \ mask (ptTranslationBits pt_t)" + by (simp add: pt_bits_def table_size_def word_bool_le_funs flip: shiftr_then_mask_commute) + +lemma pte_at_cross: + "\ pte_at pt_t p s; pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s; pspace_distinct s \ + \ pte_at' p s'" + apply (drule (2) pspace_distinct_cross) + apply (clarsimp simp: pte_at_def ptes_of_def in_omonad obj_at_def typ_at'_def ko_wp_at'_def) + apply (simp split: if_split_asm) + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (clarsimp simp: pspace_dom_def) + apply (rule bexI) + prefer 2 + apply fastforce + apply (clarsimp simp: ran_def image_iff) + apply (rule_tac x="table_index pt_t p" in bexI) + apply (simp add: table_base_index_eq) + apply (simp add: table_index_in_table) + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, fastforce) + apply clarsimp + apply (drule_tac x="table_index pt_t p" in bspec) + apply (simp add: table_index_in_table) + apply (simp add: table_base_index_eq) + apply (clarsimp simp: pte_relation_def) + apply (clarsimp simp: objBits_simps) + apply (clarsimp simp: pspace_distinct'_def) + apply (drule bspec, fastforce) + apply (simp add: objBits_simps) + done + +lemma corres_cross_over_pte_at: + "\ \s. P s \ pte_at pt_t p s \ pspace_distinct s \ pspace_aligned s; + corres r P (P' and pte_at' p) f g\ \ + corres r P P' f g" + apply (rule corres_cross_over_guard[where Q="P' and pte_at' p"]) + apply (drule meta_spec, drule (1) meta_mp, clarsimp) + apply (erule pte_at_cross; assumption?) + apply (simp add: state_relation_def) + apply assumption + done + +lemma getObject_PTE_corres: + "corres pte_relation' (pte_at pt_t p and pspace_aligned and pspace_distinct) \ + (get_pte pt_t p) (getObject p)" + apply (rule corres_cross_over_pte_at, fastforce) + apply (simp add: getObject_def gets_map_def split_def bind_assoc) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: ko_wp_at'_def typ_at'_def lookupAround2_known1) + apply (case_tac ko, simp_all)[1] + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (clarsimp simp: objBits_def cong: option.case_cong) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply (simp add: objBits_simps word_bits_def) + apply simp + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets fst_assert_opt) + apply (clarsimp simp: pte_at_eq) + apply (clarsimp simp: ptes_of_def) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def in_magnitude_check objBits_simps pte_bits_def word_size_bits_def) + apply (clarsimp simp: state_relation_def pspace_relation_def elim!: opt_mapE) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def pte_relation_def) + apply (drule_tac x="table_index pt_t p" in bspec) + apply (simp add: table_index_in_table) + apply (clarsimp simp: table_base_index_eq[simplified bit_simps] bit_simps) + done + +lemmas aligned_distinct_pte_atI' + = aligned_distinct_obj_atI'[where 'a=pte, + simplified, OF _ _ _ refl] + +lemma one_less_2p_pte_bits[simp]: + "(1::machine_word) < 2 ^ pte_bits" + by (simp add: bit_simps) + +lemma pt_apply_upd_eq': + "idx \ mask (ptTranslationBits (pt_type pt)) \ + pt_apply (pt_upd pt (table_index (pt_type pt) p) pte) idx = + (if table_index (pt_type pt) p = idx then pte else pt_apply pt idx)" + unfolding pt_apply_def pt_upd_def + using table_index_mask_eq[where pt_t=NormalPT_T] table_index_mask_eq[where pt_t=VSRootPT_T] + by (cases pt; clarsimp simp: ucast_eq_mask vs_index_ptTranslationBits pt_index_ptTranslationBits + word_le_mask_eq) + +\ \setObject_other_corres unfortunately doesn't work here\ +lemma setObject_PT_corres: + "pte_relation' pte pte' \ + corres dc ((\s. pts_of s (table_base pt_t p) = Some pt) and K (is_aligned p pte_bits \ pt_type pt = pt_t) and + pspace_aligned and pspace_distinct) \ + (set_pt (table_base pt_t p) (pt_upd pt (table_index pt_t p) pte)) + (setObject p pte')" + apply (rule corres_cross_over_pte_at[where p=p]) + apply (fastforce simp: pte_at_eq ptes_of_def in_omonad) + apply (simp add: set_pt_def get_object_def bind_assoc set_object_def gets_map_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply simp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def typ_at'_def lookupAround2_known1) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (simp add: objBits_simps word_bits_def) + apply (clarsimp simp: setObject_def in_monad split_def updateObject_default_def) + apply (simp add: in_magnitude_check objBits_simps a_type_simps) + apply (clarsimp simp: obj_at_def exec_gets) + apply (clarsimp simp: exec_get put_def elim!: opt_mapE) + apply (clarsimp simp: state_relation_def) + apply (rule conjI) + apply (clarsimp simp: pspace_relation_def split del: if_split) + apply (rule conjI) + apply (subst pspace_dom_update, assumption) + apply (simp add: a_type_def) + apply (auto simp: dom_def)[1] + apply (rule conjI) + apply (drule bspec, blast) + apply clarsimp + apply (drule_tac x = x in bspec) + apply simp + apply (rule conjI; clarsimp) + apply (clarsimp simp: pte_relation_def pt_apply_upd_eq') + apply (metis more_pt_inner_beauty) + apply (clarsimp simp: pte_relation_def table_base_index_eq pt_apply_upd_eq' + dest!: more_pt_inner_beauty) + apply (rule ballI) + apply (drule (1) bspec) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: pte_relation_def pt_apply_upd_eq') + apply (metis more_pt_inner_beauty table_base_index_eq) + apply clarsimp + apply (drule bspec, assumption) + apply clarsimp + apply (erule (1) obj_relation_cutsE) + apply simp + apply clarsimp + apply (smt (verit, best) pspace_aligned_pts_ofD pts_of_Some pts_of_type_unique aobjs_of_Some + table_base_plus) + apply ((simp split: if_split_asm)+)[2] + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits arch_kernel_obj.splits) + apply (rule conjI) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule_tac x=p in bspec, erule domI) + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits) + apply (rule conjI) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x="p && ~~ mask (pt_bits (pt_type pt))" in allE)+ + apply fastforce + apply (simp add: map_to_ctes_upd_other) + apply (simp add: fun_upd_def) + apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) + done + +lemma storePTE_corres: + "pte_relation' pte pte' \ + corres dc (pte_at pt_t p and pspace_aligned and pspace_distinct) \ (store_pte pt_t p pte) (storePTE p pte')" + apply (simp add: store_pte_def storePTE_def) + apply (rule corres_assume_pre) + apply (rule corres_symb_exec_l) + apply (rule corres_symb_exec_l[where P="pte_at pt_t p and pspace_aligned and pspace_distinct"]) + apply (rule corres_symb_exec_l) + apply (erule setObject_PT_corres) + prefer 2 + apply (rule assert_inv) + apply wpsimp + apply wpsimp + prefer 2 + apply (wpsimp simp: ptes_of_def in_omonad obj_at_def pte_at_def split: if_split_asm) + apply (clarsimp simp: exs_valid_def gets_map_def fst_assert_opt in_omonad ptes_of_def + exec_gets pte_at_def) + apply (wpsimp simp: pte_at_def ptes_of_def in_omonad) + apply (wpsimp simp: pte_at_def2) + apply wpsimp + apply (wpsimp simp: pte_at_def2) + done + +lemmas tableBitSimps[simplified bit_simps pteBits_pte_bits, simplified] = ptBits_def +lemmas bitSimps = tableBitSimps + +lemma bit_simps_corres[simp]: + "ptBits pt_t = pt_bits pt_t" + by (simp add: bit_simps bitSimps) + +defs checkPTAt_def: + "checkPTAt p \ stateAssert (\s. \pt. page_table_at' pt p s) []" + +lemma pte_relation_must_pte: + "pte_relation m (ArchObj (PageTable pt)) ko \ \pte. ko = (KOArch (KOPTE pte))" + apply (case_tac ko) + apply (simp_all add:pte_relation_def) + apply clarsimp + done + +lemma page_table_at_cross: + "\ pt_at pt_t p s; pspace_aligned s; pspace_distinct s; pspace_relation (kheap s) (ksPSpace s') \ \ + page_table_at' pt_t p s'" + apply (clarsimp simp: page_table_at'_def) + apply (rule context_conjI) + apply (clarsimp simp: obj_at_def) + apply (frule (1) pspace_alignedD) + apply (clarsimp simp: bit_simps split: if_splits) + apply clarsimp + apply (rule pte_at_cross; assumption?) + apply (erule (2) page_table_pte_atI_nicer) + done + +lemma getPTE_wp: + "\\s. \ko. ko_at' (ko::pte) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def in_magnitude_check + in_monad valid_def obj_at'_def objBits_simps) + +lemma pt_at_lift: + "corres_inst_eq ptr ptr' \ \s s'. (s, s') \ state_relation \ True \ + (pspace_aligned s \ pspace_distinct s \ pt_at pt_t ptr s \ ptr = ptr') \ + \ s' \ page_table_at' pt_t ptr' s'" + by ( fastforce intro!: page_table_at_cross) + +lemmas checkPTAt_corres[corresK] = + corres_stateAssert_implied_frame[OF pt_at_lift, folded checkPTAt_def] + +lemma lookupPTSlotFromLevel_inv: + "lookupPTSlotFromLevel level pt_ptr vptr \P\" + apply (induct level arbitrary: pt_ptr) + apply (subst lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pteAtIndex_def wp: getPTE_wp) + apply (subst lookupPTSlotFromLevel.simps) + apply (wpsimp simp: pteAtIndex_def checkPTAt_def wp: getPTE_wp|assumption)+ + done + +declare lookupPTSlotFromLevel_inv[wp] + +lemma lookupPTFromLevel_inv[wp]: + "lookupPTFromLevel level pt vptr target_pt \P\" +proof (induct level arbitrary: pt) + case 0 show ?case + by (subst lookupPTFromLevel.simps, simp add: checkPTAt_def, wpsimp) +next + case (Suc level) + show ?case + by (subst lookupPTFromLevel.simps, simp add: checkPTAt_def) + (wpsimp wp: Suc getPTE_wp simp: pteAtIndex_def) +qed + +lemma size_maxPTLevel[simp]: + "size max_pt_level = maxPTLevel" + by (simp add: maxPTLevel_def level_defs) + +lemma ptBitsLeft_0[simp]: + "ptBitsLeft 0 = pageBits" + by (simp add: ptBitsLeft_def) + +lemma ptBitsLeft_eq[simp]: + "ptBitsLeft (size level) = pt_bits_left level" + unfolding ptBitsLeft_def pt_bits_left_def + by (clarsimp simp flip: vm_level.size_less_eq + simp: asid_pool_level_size ptTranslationBits_def maxPTLevel_def + split: if_splits) + +lemma ptIndex_eq[simp]: + "ptIndex (size level) p = pt_index level p" + by (clarsimp simp: ptIndex_def pt_index_def levelType_def + simp flip: size_maxPTLevel level_type_eq(1)) + +lemma ptSlotIndex_eq[simp]: + "ptSlotIndex (size level) = pt_slot_offset level" + by (clarsimp intro!: ext simp: ptSlotIndex_def pt_slot_offset_def) + +lemmas ptSlotIndex_0[simp] = ptSlotIndex_eq[where level=0, simplified] + +lemma pteAtIndex_corres: + "level' = size level \ + corres pte_relation' + (pte_at pt_t (pt_slot_offset level pt vptr) and pspace_aligned and pspace_distinct) + \ + (get_pte pt_t (pt_slot_offset level pt vptr)) + (pteAtIndex level' pt vptr)" + by (simp add: pteAtIndex_def) (rule getObject_PTE_corres) + +lemma user_region_or: + "\ vref \ user_region; vref' \ user_region \ \ vref || vref' \ user_region" + by (simp add: user_region_def canonical_user_def le_mask_high_bits word_size) + +lemma lookupPTSlotFromLevel_corres: + "\ level' = size level; pt' = pt; level \ max_pt_level \ \ + corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) + (pspace_aligned and pspace_distinct and valid_vspace_objs and valid_asid_table and + \\ (level, pt) and K (vptr \ user_region \ level \ max_pt_level)) + \ + (gets_the (pt_lookup_slot_from_level level 0 pt vptr \ ptes_of)) + (lookupPTSlotFromLevel level' pt' vptr)" +proof (induct level arbitrary: pt pt' level') + case 0 + thus ?case by (simp add: lookupPTSlotFromLevel.simps pt_bits_left_def) +next + case (minus level) + from `0 < level` + obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) + with `0 < level` + have nlevel1: "nlevel < nlevel + 1" using bit1.pred by fastforce + with nlevel + have level: "size level = Suc (size nlevel)" by simp + + from `0 < level` `level \ max_pt_level` + have level_m1: "level - 1 \ max_pt_level" + by blast + + from level + have levelType[simp]: + "levelType (Suc (size nlevel)) = level_type level" + unfolding levelType_def using vm_level.size_inj + by fastforce + + define vref_step where + "vref_step vref \ vref_for_level vref (level+1) || (pt_index level vptr << pt_bits_left level)" + for vref + + from `level \ max_pt_level` + have vref_for_level_step[simp]: + "vref_for_level (vref_step vref) (level + 1) = vref_for_level vref (level + 1)" + for vref + unfolding vref_step_def + using vref_for_level_pt_index_idem[of level level level vref vptr] by simp + + from `level \ max_pt_level` + have pt_walk_vref[simp]: + "pt_walk max_pt_level level pt (vref_step vref) = + pt_walk max_pt_level level pt vref" for pt vref + by - (rule pt_walk_vref_for_level_eq; simp) + + from `level \ max_pt_level` + have vref_step_user_region[simp]: + "\ vref \ user_region; vptr \ user_region \ \ vref_step vref \ user_region" + for vref + unfolding vref_step_def + using nlevel1 nlevel + by (auto intro!: user_region_or vref_for_level_user_region + simp: pt_bits_left_def bit_simps user_region_def + pt_index_def canonical_user_def word_eqI_simps + dest!: max_pt_level_enum) + + have pt_slot_offset_step[simp]: + "\ is_aligned pt (pt_bits level); vref \ user_region \ \ + pt_slot_offset level pt (vref_step vref) = pt_slot_offset level pt vptr" for vref + unfolding vref_step_def using nlevel1 nlevel + apply simp + apply (clarsimp simp: pt_slot_offset_or_def user_region_def canonical_user_def) + apply (simp add: pt_index_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (simp add: plus_one_eq_asid_pool vref_for_level_def pt_bits_left_def) + apply (rule conjI, simp add: max_pt_level_def) + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def) + apply word_eqI_solve + apply (clarsimp simp: vref_for_level_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (subgoal_tac "nlevel = max_pt_level - 1") + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def split: if_split_asm) + apply word_eqI_solve + apply (subst (asm) add.commute[where a=2]) + apply (drule add_implies_diff) + apply (simp add: max_pt_level_def) + apply (simp add: pt_bits_def) + apply (prop_tac "level_type (nlevel + 1) = NormalPT_T") + apply (drule max_pt_level_enum) + apply (auto simp: level_defs split: if_split_asm)[1] + apply (simp add: bit_simps) + apply word_eqI + apply (drule max_pt_level_enum) + by (auto split: if_split_asm) + + from `0 < level` `level' = size level` `pt' = pt` level `level \ max_pt_level` level_m1 + show ?case + apply (subst pt_lookup_slot_from_level_rec) + apply (simp add: lookupPTSlotFromLevel.simps Let_def obind_comp_dist if_comp_dist + gets_the_if_distrib checkPTAt_def gets_the_oapply2_comp) + apply (rule corres_guard_imp, rule corres_split[where r'=pte_relation']) + apply (rule pteAtIndex_corres, simp) + apply (rule corres_if3) + apply (rename_tac pte pte', case_tac pte; (simp add: isPageTablePTE_def)) + apply (rule corres_stateAssert_implied) + apply (rule minus(1)) + apply (simp add: nlevel) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply simp + apply clarsimp + apply (rule_tac x=NormalPT_T in exI) + apply (rule page_table_at_cross; assumption?) + apply (drule (2) valid_vspace_objs_strongD; assumption?) + apply simp + apply (clarsimp simp: pt_at_eq in_omonad AARCH64_A.is_PageTablePTE_def pptr_from_pte_def + getPPtrFromPTE_def isPagePTE_def paddr_from_ppn_def) + apply (simp add: state_relation_def) + apply (rule corres_inst[where P=\ and P'=\]) + apply (clarsimp simp: ptSlotIndex_def pt_slot_offset_def pt_index_def pt_bits_left_def + ptIndex_def ptBitsLeft_def) + apply (rule conjI; clarsimp) + apply (metis vm_level.size_less_eq size_maxPTLevel) + apply wpsimp+ + apply (frule (5) vs_lookup_table_is_aligned) + apply (rule conjI) + apply (drule (5) valid_vspace_objs_strongD) + apply (clarsimp simp: pte_at_def obj_at_def ptes_of_def in_omonad) + apply (simp add: pt_slot_offset_def) + apply (rule conjI, fastforce) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shiftl, simp) + apply clarsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule vm_level.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply simp + done +qed + +lemma lookupPTSlot_corres: + "corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(max_pt_level,pt) + and K (vptr \ user_region)) + \ + (gets_the (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" + unfolding lookupPTSlot_def pt_lookup_slot_def + by (corresKsimp corres: lookupPTSlotFromLevel_corres) + +lemma lookupPTFromLevel_corres: + "\ level' = size level; pt' = pt \ \ + corres (lfr \ ((=) \ fst)) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(level,pt) + and K (vptr \ user_region \ level \ max_pt_level \ pt \ target)) + \ + (pt_lookup_from_level level pt vptr target) + (lookupPTFromLevel level' pt' vptr target)" +proof (induct level arbitrary: level' pt pt') + case 0 + then show ?case + apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) + apply simp + apply (rule corres_gen_asm) + apply (simp add: lookup_failure_map_def) + done +next + case (minus level) + + (* FIXME: unfortunate duplication from lookupPTSlotFromLevel_corres *) + from `0 < level` + obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) + with `0 < level` + have nlevel1: "nlevel < nlevel + 1" using vm_level.pred by fastforce + with nlevel + have level: "size level = Suc (size nlevel)" by simp + + define vref_step where + "vref_step vref \ + vref_for_level vref (level+1) || (pt_index level vptr << pt_bits_left level)" + for vref + + have vref_for_level_step[simp]: + "level \ max_pt_level \ + vref_for_level (vref_step vref) (level + 1) = vref_for_level vref (level + 1)" + for vref + unfolding vref_step_def + using vref_for_level_pt_index_idem[of level level level vref vptr] by simp + + have pt_walk_vref[simp]: + "level \ max_pt_level \ + pt_walk max_pt_level level pt (vref_step vref) = + pt_walk max_pt_level level pt vref" for pt vref + by (rule pt_walk_vref_for_level_eq; simp) + + have vref_step_user_region[simp]: + "\ vref \ user_region; vptr \ user_region; level \ max_pt_level \ + \ vref_step vref \ user_region" + for vref + unfolding vref_step_def + using nlevel1 nlevel + by (auto intro!: user_region_or vref_for_level_user_region + simp: pt_bits_left_def bit_simps user_region_def + pt_index_def canonical_user_def word_eqI_simps + dest!: max_pt_level_enum) + + have pt_slot_offset_step[simp]: + "\ is_aligned pt (pt_bits level); vref \ user_region \ \ + pt_slot_offset level pt (vref_step vref) = pt_slot_offset level pt vptr" for vref + unfolding vref_step_def using nlevel1 nlevel + apply simp + apply (clarsimp simp: pt_slot_offset_or_def user_region_def canonical_user_def) + apply (simp add: pt_index_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (simp add: plus_one_eq_asid_pool vref_for_level_def pt_bits_left_def) + apply (rule conjI, simp add: max_pt_level_def) + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def) + apply word_eqI_solve + apply (clarsimp simp: vref_for_level_def pt_bits_left_def) + apply (rule conjI; clarsimp) + apply (subgoal_tac "nlevel = max_pt_level - 1") + apply (clarsimp simp: level_defs bit_simps maxPTLevel_def split: if_split_asm) + apply word_eqI_solve + apply (subst (asm) add.commute[where a=2]) + apply (drule add_implies_diff) + apply (simp add: max_pt_level_def) + apply (simp add: pt_bits_def) + apply (prop_tac "level_type (nlevel + 1) = NormalPT_T") + apply (drule max_pt_level_enum) + apply (auto simp: level_defs split: if_split_asm)[1] + apply (simp add: bit_simps) + apply word_eqI + apply (drule max_pt_level_enum) + by (auto split: if_split_asm) + + note vm_level.size_minus_one[simp] + from minus.prems + show ?case + apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) + apply (simp add: unlessE_whenE not_less) + apply (rule corres_gen_asm, simp) + apply (rule corres_initial_splitE[where r'=dc]) + apply (corresKsimp simp: lookup_failure_map_def) + apply (rule corres_splitEE[where r'=pte_relation']) + apply (simp, rule getObject_PTE_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (rename_tac pte pte', case_tac pte; simp add: isPageTablePTE_def) + apply (rule corres_if) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply (rule corres_returnOk[where P=\ and P'=\], simp) + apply (clarsimp simp: checkPTAt_def) + apply (subst liftE_bindE, rule corres_stateAssert_implied) + apply (rule minus.hyps) + apply (simp add: minus.hyps(2)) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply clarsimp + apply (rule_tac x=NormalPT_T in exI) + apply (rule page_table_at_cross; assumption?) + apply (drule vs_lookup_table_pt_at; simp?) + apply (clarsimp simp: AARCH64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromPTE_def + paddr_from_ppn_def isPagePTE_def) + apply (simp add: level_type_def split: if_split_asm) + apply (simp add: state_relation_def) + apply wpsimp+ + apply (simp add: vm_level.neq_0_conv) + apply (frule (5) vs_lookup_table_is_aligned) + apply (rule conjI) + apply (drule (5) valid_vspace_objs_strongD) + apply (clarsimp simp: pte_at_def obj_at_def ptes_of_def in_omonad) + apply (rule conjI, fastforce) + apply (simp add: pt_slot_offset_def) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply (simp add: bit_simps) + apply (rule is_aligned_shiftl, simp) + apply clarsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule vm_level.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply wpsimp + done +qed + +declare in_set_zip_refl[simp] + +crunch typ_at' [wp]: storePTE "\s. P (typ_at' T p s)" + (wp: crunch_wps mapM_x_wp' simp: crunch_simps ignore_del: setObject) + +lemmas storePTE_typ_ats[wp] = typ_at_lifts [OF storePTE_typ_at'] + +lemma setObject_asid_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ setObject p' (v::asidpool) \\_ s. P (typ_at' T p s)\" + by (wp setObject_typ_at') + +lemmas setObject_asid_typ_ats' [wp] = typ_at_lifts [OF setObject_asid_typ_at'] + +lemma getObject_pte_inv[wp]: + "\P\ getObject p \\rv :: pte. P\" + by (simp add: getObject_inv loadObject_default_inv) + +lemma corres_gets_global_pt [corres]: + "corres (=) valid_global_arch_objs \ + (gets global_pt) (gets (armKSGlobalUserVSpace \ ksArchState))" + by (clarsimp simp add: state_relation_def arch_state_relation_def) + +lemmas getObject_PTE_corres'[corres] = getObject_PTE_corres[@lift_corres_args] +lemmas storePTE_corres'[corres] = storePTE_corres[@lift_corres_args] + +lemma arch_cap_rights_update: + "acap_relation c c' \ + cap_relation (cap.ArchObjectCap (acap_rights_update (acap_rights c \ msk) c)) + (Arch.maskCapRights (rights_mask_map msk) c')" + apply (cases c, simp_all add: AARCH64_H.maskCapRights_def + acap_rights_update_def Let_def isCap_simps) + apply (simp add: maskVMRights_def vmrights_map_def rights_mask_map_def + validate_vm_rights_def vm_read_write_def vm_read_only_def + vm_kernel_only_def ) + done + +lemma arch_deriveCap_inv: + "\P\ Arch.deriveCap arch_cap u \\rv. P\" + apply (simp add: AARCH64_H.deriveCap_def + cong: if_cong + split del: if_split) + apply (wp undefined_valid) + apply (cases u; simp add: isCap_defs) + done + +lemma arch_deriveCap_valid: + "\valid_cap' (ArchObjectCap arch_cap)\ + Arch.deriveCap u arch_cap + \\rv. valid_cap' rv\,-" + apply (simp add: AARCH64_H.deriveCap_def split del: if_split) + apply (wp undefined_validE_R) + apply (cases arch_cap; simp add: isCap_defs) + apply (simp add: valid_cap'_def capAligned_def capUntypedPtr_def AARCH64_H.capUntypedPtr_def) + done + +lemma mdata_map_simps[simp]: + "mdata_map None = None" + "mdata_map (Some (asid, ref)) = Some (ucast asid, ref)" + by (auto simp add: mdata_map_def) + +lemma arch_deriveCap_corres: + "cap_relation (cap.ArchObjectCap c) (ArchObjectCap c') \ + corres (ser \ (\c c'. cap_relation c c')) + \ \ + (arch_derive_cap c) + (Arch.deriveCap slot c')" + unfolding arch_derive_cap_def AARCH64_H.deriveCap_def Let_def + apply (cases c, simp_all add: isCap_simps split: option.splits split del: if_split) + apply (clarify?, rule corres_noopE; wpsimp)+ + done + +definition + "vmattributes_map \ \R. VMAttributes (Execute \ R) (Device \ R)" + +lemma pte_relation'_Invalid_inv [simp]: + "pte_relation' x AARCH64_H.pte.InvalidPTE = (x = AARCH64_A.pte.InvalidPTE)" + by (cases x) auto + +lemma asidHighBitsOf [simp]: + "asidHighBitsOf asid = ucast (asid_high_bits_of (ucast asid))" + by (word_eqI_solve simp: asidHighBitsOf_def asid_high_bits_of_def asidHighBits_def asid_low_bits_def) + +lemma le_mask_asidBits_asid_wf: + "asid_wf asid \ asid \ mask asidBits" + by (simp add: asidBits_def asidHighBits_def asid_wf_def asid_bits_defs mask_def) + +lemma asid_le_mask_asidBits[simp]: + "UCAST(asid_len \ machine_word_len) asid \ mask asidBits" + by (rule ucast_leq_mask, simp add: asidBits_def asidHighBits_def asid_low_bits_def) + +lemma asid_case_zero[simp]: + "0 < asid \ 0 < UCAST(asid_len \ machine_word_len) asid" + by word_bitwise + +lemma find_vspace_for_asid_rewite: + "find_vspace_for_asid asid = + doE + unlessE (0 < asid) $ throwError ExceptionTypes_A.InvalidRoot; + entry_opt \ liftE $ gets (entry_for_asid asid); + case entry_opt of + Some entry \ returnOk (ap_vspace entry) + | None \ throwError ExceptionTypes_A.InvalidRoot + odE" + unfolding find_vspace_for_asid_def vspace_for_asid_def + apply (cases "0 < asid") + apply simp (* rewrite unlessE before unfolding things *) + apply (fastforce simp: bindE_def throw_opt_def liftE_def simpler_gets_def bind_def return_def + obind_None_eq + split: option.splits) + apply (simp add: liftE_def simpler_gets_def bindE_def bind_def return_def throw_opt_def + throwError_def) + done + +lemma getPoolPtr_corres: + "corres (=) (K (0 < asid)) \ (gets (pool_for_asid asid)) (getPoolPtr (ucast asid))" + unfolding pool_for_asid_def getPoolPtr_def asidRange_def + apply simp + apply corres_pre + apply (rule corres_assert_gen_asm) + apply (rule corres_assert_gen_asm) + apply (rule corres_trivial) + apply (clarsimp simp: gets_return_gets_eq state_relation_def arch_state_relation_def + ucast_up_ucast_id is_up) + apply (simp flip: mask_eq_exp_minus_1) + apply simp + done + +lemma getASIDPoolEntry_corres: + "corres (\r r'. r = map_option abs_asid_entry r') + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct + and K (0 < asid)) + (no_0_obj') + (gets (entry_for_asid asid)) + (getASIDPoolEntry (ucast asid))" + unfolding entry_for_asid_def getASIDPoolEntry_def K_def + apply (rule corres_gen_asm) + apply (clarsimp simp: gets_obind_bind_eq entry_for_pool_def obind_comp_dist + cong: option.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule getPoolPtr_corres) + apply (rule_tac x=pool_ptr and x'=poolPtr in option_corres) + apply (rule corres_trivial, simp) + apply clarsimp + apply (rule monadic_rewrite_corres_l) + apply (monadic_rewrite_l gets_oapply_liftM_rewrite) + apply (rule monadic_rewrite_refl) + apply (clarsimp simp: liftM_def) + apply (rule corres_split[OF getObject_ASIDPool_corres[OF refl]]) + apply (rule corres_trivial) + apply (case_tac rv', clarsimp) + apply (clarsimp simp: asid_pool_relation_def asid_low_bits_of_def ucast_ucast_mask2 + is_down asid_low_bits_def ucast_and_mask) + apply wpsimp+ + apply (drule (1) pool_for_asid_validD) + apply (simp add: asid_pools_at_eq) + apply simp + done + +lemma no_0_page_table: + "\ no_0_obj' s; page_table_at' pt_t 0 s \ \ False" + apply (clarsimp simp: page_table_at'_def) + apply (erule_tac x=0 in allE) + apply simp + done + +crunches getASIDPoolEntry + for no_0_obj'[wp]: no_0_obj' + (wp: getObject_inv simp: loadObject_default_def) + +lemma findVSpaceForASID_corres: + assumes "asid' = ucast asid" + shows "corres (lfr \ (=)) + (valid_vspace_objs and valid_asid_table + and pspace_aligned and pspace_distinct + and K (0 < asid)) + (no_0_obj') + (find_vspace_for_asid asid) (findVSpaceForASID asid')" (is "corres _ ?P ?Q _ _") + using assms + apply (simp add: findVSpaceForASID_def) + apply (rule corres_gen_asm) + apply (subst find_vspace_for_asid_rewite) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_initial_splitE[where r'="\r r'. r = map_option abs_asid_entry r'"]) + apply simp + apply (rule getASIDPoolEntry_corres) + apply (rule_tac Q="\entry s. pspace_aligned s \ pspace_distinct s \ + vspace_pt_at (ap_vspace entry) s" + in option_corres[where P=\ and P'=\ and Q'="\_. no_0_obj'"]) + apply (clarsimp simp: lookup_failure_map_def) + apply (rename_tac entry entry') + apply (case_tac entry') + apply (clarsimp simp: checkPTAt_def abs_asid_entry_def) + apply (rename_tac p) + apply (rule_tac Q="\s. \pt_t. page_table_at' pt_t p s \ no_0_obj' s" in corres_cross_over_guard) + apply clarsimp + apply (rule_tac x=VSRootPT_T in exI) + apply (erule (2) page_table_at_cross, simp add: state_relation_def) + apply (simp add: liftE_bindE assertE_liftE) + apply (rule corres_assert_assume) + apply (rule corres_stateAssert_assume) + apply (rule corres_returnOk, simp) + apply clarsimp + apply (fastforce dest: no_0_page_table) + apply simp + apply wpsimp + apply (clarsimp simp: entry_for_asid_def) + apply (drule (2) pool_for_asid_valid_vspace_objs) + apply (fastforce simp: entry_for_pool_def) + apply (wpsimp wp: hoare_drop_imps)+ + done + +lemma setObject_arch: + assumes X: "\p q n ko. \\s. P (ksArchState s)\ updateObject val p q n ko \\rv s. P (ksArchState s)\" + shows "\\s. P (ksArchState s)\ setObject t val \\rv s. P (ksArchState s)\" + apply (simp add: setObject_def split_def) + apply (wp X | simp)+ + done + +lemma setObject_ASID_arch [wp]: + "\\s. P (ksArchState s)\ setObject p (v::asidpool) \\_ s. P (ksArchState s)\" + apply (rule setObject_arch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_PTE_arch [wp]: + "\\s. P (ksArchState s)\ setObject p (v::pte) \\_ s. P (ksArchState s)\" + apply (rule setObject_arch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_ASID_valid_arch [wp]: + "setObject p (v::asidpool) \valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_ko_wp_at) + (auto simp: objBits_simps pageBits_def is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_PTE_valid_arch [wp]: + "\valid_arch_state'\ setObject p (v::pte) \\_. valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_typ_at' setObject_ko_wp_at) + (auto simp: objBits_simps pageBits_def is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_ASID_ct [wp]: + "\\s. P (ksCurThread s)\ setObject p (e::asidpool) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_default_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_pte_ct [wp]: + "\\s. P (ksCurThread s)\ setObject p (e::pte) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_default_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ASID_cur_tcb' [wp]: + "\\s. cur_tcb' s\ setObject p (e::asidpool) \\_ s. cur_tcb' s\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply wp+ + done + +lemma setObject_pte_cur_tcb' [wp]: + "\\s. cur_tcb' s\ setObject p (e::pte) \\_ s. cur_tcb' s\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply wp+ + done + +lemma getASID_wp: + "\\s. \ko. ko_at' (ko::asidpool) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def + in_magnitude_check pageBits_def in_monad valid_def obj_at'_def objBits_simps) + +lemma storePTE_ctes [wp]: + "\\s. P (ctes_of s)\ storePTE p pte \\_ s. P (ctes_of s)\" + apply (rule ctes_of_from_cte_wp_at [where Q=\, simplified]) + apply (rule storePTE_cte_wp_at') + done + +lemma setObject_ASID_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ + setObject ptr (asid::asidpool) + \\rv s. P (cte_wp_at' P' p s)\" + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +lemma setObject_ASID_ctes_of'[wp]: + "\\s. P (ctes_of s)\ + setObject ptr (asid::asidpool) + \\rv s. P (ctes_of s)\" + by (rule ctes_of_from_cte_wp_at [where Q=\, simplified]) wp + +lemma clearMemory_vms': + "valid_machine_state' s \ + \x\fst (clearMemory ptr bits (ksMachineState s)). + valid_machine_state' (s\ksMachineState := snd x\)" + apply (clarsimp simp: valid_machine_state'_def + disj_commute[of "pointerInUserData p s" for p s]) + apply (drule_tac x=p in spec, simp) + apply (drule_tac P4="\m'. underlying_memory m' p = 0" + in use_valid[where P=P and Q="\_. P" for P], simp_all) + apply (rule clearMemory_um_eq_0) + done + +lemma dmo_clearMemory_invs'[wp]: + "\invs'\ doMachineOp (clearMemory w sz) \\_. invs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def) + apply (rule conjI) + apply (simp add: valid_irq_masks'_def, elim allEI, clarsimp) + apply (drule use_valid) + apply (rule no_irq_clearMemory[simplified no_irq_def, rule_format]) + apply simp_all + apply (drule clearMemory_vms') + apply fastforce + done + +end +end diff --git a/proof/refine/AARCH64/ArchMove_R.thy b/proof/refine/AARCH64/ArchMove_R.thy new file mode 100644 index 0000000000..7779fdb2bc --- /dev/null +++ b/proof/refine/AARCH64/ArchMove_R.thy @@ -0,0 +1,43 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch specific lemmas that should be moved into theory files before Refine *) + +theory ArchMove_R +imports + Move_R +begin + +(* Use one of these forms everywhere, rather than choosing at random. *) +lemmas cte_index_repair = mult.commute[where a="(2::'a::len word) ^ cte_level_bits"] +lemmas cte_index_repair_sym = cte_index_repair[symmetric] + +lemma invs_valid_ioc[elim!]: "invs s \ valid_ioc s" + by (clarsimp simp add: invs_def valid_state_def) + +context begin interpretation Arch . + +lemma get_pt_mapM_x_lower: + assumes g: "\P pt x. \ \s. P (kheap s pt_ptr) \ g pt x \ \_ s. P (kheap s pt_ptr) \" + assumes y: "ys \ []" + notes [simp] = gets_map_def get_object_def gets_def get_def bind_def return_def + assert_opt_def fail_def opt_map_def + shows "do pt \ get_pt pt_ptr; mapM_x (g pt) ys od + = mapM_x (\y. get_pt pt_ptr >>= (\pt. g pt y)) ys" + apply (rule get_mapM_x_lower + [where P="\opt_pt s. case kheap s pt_ptr of + Some (ArchObj (PageTable pt)) \ opt_pt = Some pt + | _ \ opt_pt = None", + OF _ _ _ y]) + apply (wp g) + apply (case_tac "kheap s pt_ptr"; simp; rename_tac ko; case_tac ko; simp; + rename_tac ako; case_tac ako; simp)+ + done + +end + +end diff --git a/proof/refine/AARCH64/Arch_R.thy b/proof/refine/AARCH64/Arch_R.thy new file mode 100644 index 0000000000..4512ce955c --- /dev/null +++ b/proof/refine/AARCH64/Arch_R.thy @@ -0,0 +1,2046 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Top level architecture related proofs. +*) + +theory Arch_R +imports Untyped_R Finalise_R +begin + +unbundle l4v_word_context + +lemmas [datatype_schematic] = cap.sel list.sel(1) list.sel(3) + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare arch_cap.sel [datatype_schematic] +declare is_aligned_shiftl [intro!] +declare is_aligned_shiftr [intro!] + +definition + "asid_ci_map i \ + case i of AARCH64_A.MakePool frame slot parent base \ + AARCH64_H.MakePool frame (cte_map slot) (cte_map parent) (ucast base)" + +definition + "valid_aci' aci \ case aci of MakePool frame slot parent base \ + \s. cte_wp_at' (\c. cteCap c = NullCap) slot s \ + cte_wp_at' (\cte. \idx. cteCap cte = UntypedCap False frame pageBits idx) parent s \ + descendants_of' parent (ctes_of s) = {} \ + slot \ parent \ + ex_cte_cap_to' slot s \ + sch_act_simple s \ + is_aligned base asid_low_bits \ asid_wf base" + +lemma vp_strgs': + "valid_pspace' s \ pspace_distinct' s" + "valid_pspace' s \ pspace_aligned' s" + "valid_pspace' s \ valid_mdb' s" + by auto + +lemma safe_parent_strg': + "cte_wp_at' (\cte. cteCap cte = UntypedCap False frame pageBits idx) p s \ + descendants_of' p (ctes_of s) = {} \ + valid_pspace' s + \ safe_parent_for' (ctes_of s) p (ArchObjectCap (ASIDPoolCap frame base))" + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac cte) + apply (simp add: isCap_simps) + apply (subst conj_comms) + apply (rule context_conjI) + apply (drule ctes_of_valid_cap', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply (drule is_aligned_no_overflow) + apply (clarsimp simp: capRange_def asid_low_bits_def bit_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps capRange_def asid_low_bits_def bit_simps) + done + +lemma descendants_of'_helper: + "\P\ f \\r s. Q (descendants_of' t (null_filter' (ctes_of s)))\ + \ \P\ f \\r s. Q (descendants_of' t (ctes_of s))\" + apply (clarsimp simp:valid_def) + apply (subst null_filter_descendants_of') + prefer 2 + apply fastforce + apply simp + done + +lemma createObject_typ_at': + "\\s. koTypeOf ty = otype \ is_aligned ptr (objBitsKO ty) \ + pspace_aligned' s \ pspace_no_overlap' ptr (objBitsKO ty) s\ + createObjects' ptr (Suc 0) ty 0 + \\rv s. typ_at' otype ptr s\" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (clarsimp simp:createObjects'_def alignError_def split_def | wp unless_wp | wpc )+ + apply (clarsimp simp:obj_at'_def ko_wp_at'_def typ_at'_def pspace_distinct'_def)+ + apply (subgoal_tac "ps_clear ptr (objBitsKO ty) + (s\ksPSpace := \a. if a = ptr then Some ty else ksPSpace s a\)") + apply (simp add:ps_clear_def)+ + apply (rule ccontr) + apply (drule int_not_emptyD) + apply clarsimp + apply (unfold pspace_no_overlap'_def) + apply (erule allE)+ + apply (erule(1) impE) + apply (subgoal_tac "x \ mask_range x (objBitsKO y)") + apply (fastforce simp: is_aligned_neg_mask_eq) + apply (drule(1) pspace_alignedD') + apply (clarsimp simp: is_aligned_no_overflow_mask) + done + +lemma retype_region2_ext_retype_region_ArchObject: + "retype_region ptr n us (ArchObject x)= + retype_region2 ptr n us (ArchObject x)" + apply (rule ext) + apply (simp add: retype_region_def retype_region2_def bind_assoc + retype_region2_ext_def retype_region_ext_def default_ext_def) + apply (rule ext) + apply (intro monad_eq_split_tail ext)+ + apply simp + apply simp + apply (simp add:gets_def get_def bind_def return_def simpler_modify_def ) + apply (rule_tac x = xc in fun_cong) + apply (rule_tac f = do_extended_op in arg_cong) + apply (rule ext) + apply simp + apply simp + done + +lemma set_cap_device_and_range_aligned: + "is_aligned ptr sz \ \\_. True\ + set_cap + (cap.UntypedCap dev ptr sz idx) + aref + \\rv s. + \slot. + cte_wp_at + (\c. cap_is_device c = dev \ + up_aligned_area ptr sz \ cap_range c) + slot s\" + apply (subst is_aligned_neg_mask_eq[symmetric]) + apply simp + apply (wp set_cap_device_and_range) + done + +lemma performASIDControlInvocation_corres: + "asid_ci_map i = i' \ + corres dc + (einvs and ct_active and valid_aci i) + (invs' and ct_active' and valid_aci' i') + (perform_asid_control_invocation i) + (performASIDControlInvocation i')" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (cases i) + apply (rename_tac word1 prod1 prod2 word2) + apply (clarsimp simp: asid_ci_map_def) + apply (simp add: perform_asid_control_invocation_def placeNewObject_def2 + performASIDControlInvocation_def) + apply (rule corres_name_pre) + apply (clarsimp simp:valid_aci_def valid_aci'_def cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (subgoal_tac "valid_cap' (capability.UntypedCap False word1 pageBits idx) s'") + prefer 2 + apply (case_tac ctea) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply fastforce + apply (frule valid_capAligned) + apply (clarsimp simp: capAligned_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (erule deleteObjects_corres) + apply (simp add:pageBits_def) + apply (rule corres_split[OF getSlotCap_corres], simp) + apply (rule_tac F = " pcap = (cap.UntypedCap False word1 pageBits idxa)" in corres_gen_asm) + apply (rule corres_split[OF updateFreeIndex_corres]) + apply (clarsimp simp:is_cap_simps) + apply (simp add: free_index_of_def) + apply (rule corres_split) + apply (simp add: retype_region2_ext_retype_region_ArchObject ) + apply (rule corres_retype [where ty="Inl (KOArch (KOASIDPool F))" for F, + unfolded APIType_map2_def makeObjectKO_def, + THEN createObjects_corres',simplified, + where val = "makeObject::asidpool"]) + apply simp + apply (simp add: objBits_simps obj_bits_api_def arch_kobj_size_def + default_arch_object_def bit_simps)+ + apply (simp add: obj_relation_retype_def default_object_def + default_arch_object_def objBits_simps) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (simp add: makeObject_asidpool const_def inv_def) + apply (rule range_cover_full) + apply (simp add: obj_bits_api_def arch_kobj_size_def default_arch_object_def bit_simps + word_bits_def)+ + apply (rule corres_split) + apply (rule cteInsert_simple_corres, simp, rule refl, rule refl) + apply (rule_tac F="asid_low_bits_of word2 = 0" in corres_gen_asm) + apply (simp add: is_aligned_mask dc_def[symmetric]) + apply (rule corres_split[where P=\ and P'=\ and r'="\t t'. t = t' o ucast"]) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (rule corres_trivial) + apply (rule corres_modify) + apply (thin_tac "x \ state_relation" for x) + apply (clarsimp simp: state_relation_def arch_state_relation_def o_def) + apply (rule ext) + apply (clarsimp simp: up_ucast_inj_eq) + apply wp+ + apply (strengthen safe_parent_strg[where idx = "2^pageBits"]) + apply (strengthen invs_valid_objs invs_distinct + invs_psp_aligned invs_mdb + | simp cong:conj_cong)+ + apply (wp retype_region_plain_invs[where sz = pageBits] + retype_cte_wp_at[where sz = pageBits])+ + apply (strengthen vp_strgs' + safe_parent_strg'[where idx = "2^pageBits"]) + apply (simp cong: conj_cong) + apply (wp createObjects_valid_pspace' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"]) + apply (simp add: makeObjectKO_def)+ + apply (simp add:objBits_simps range_cover_full valid_cap'_def)+ + apply (clarsimp simp:valid_cap'_def) + apply (wp createObject_typ_at' + createObjects_orig_cte_wp_at'[where sz = pageBits]) + apply (rule descendants_of'_helper) + apply (wp createObjects_null_filter' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool undefined))"]) + apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def + objBits_simps default_arch_object_def pred_conj_def) + apply (clarsimp simp: conj_comms + | strengthen invs_mdb invs_valid_pspace)+ + apply (simp add:region_in_kernel_window_def) + apply (wp set_untyped_cap_invs_simple[where sz = pageBits] + set_cap_cte_wp_at + set_cap_caps_no_overlap[where sz = pageBits] + set_cap_no_overlap + set_cap_device_and_range_aligned[where dev = False,simplified] + set_untyped_cap_caps_overlap_reserved[where sz = pageBits])+ + apply (clarsimp simp: conj_comms obj_bits_api_def arch_kobj_size_def + objBits_simps default_arch_object_def pred_conj_def + makeObjectKO_def range_cover_full + simp del: capFreeIndex_update.simps + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct' + exI[where x="makeObject :: asidpool"])+ + apply (wp updateFreeIndex_forward_invs' + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_caps_no_overlap'' + updateFreeIndex_descendants_of2 + updateFreeIndex_cte_wp_at + updateFreeIndex_caps_overlap_reserved + | simp add: descendants_of_null_filter' split del: if_split)+ + apply (wp get_cap_wp)+ + apply (subgoal_tac "word1 && ~~ mask pageBits = word1 \ pageBits \ word_bits \ word_size_bits \ pageBits") + prefer 2 + apply (clarsimp simp:bit_simps word_bits_def is_aligned_neg_mask_eq) + apply (simp only:delete_objects_rewrite) + apply wp+ + apply (clarsimp simp: conj_comms) + apply (clarsimp simp: conj_comms ex_disj_distrib + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct')+ + apply (wp deleteObjects_invs'[where p="makePoolParent i'"] + deleteObjects_cte_wp_at' + deleteObjects_descendants[where p="makePoolParent i'"]) + apply (clarsimp split del: if_split simp:valid_cap'_def) + apply (wp hoare_vcg_ex_lift + deleteObjects_caps_no_overlap''[where slot="makePoolParent i'"] + deleteObject_no_overlap + deleteObjects_ct_active'[where cref="makePoolParent i'"]) + apply (clarsimp simp: is_simple_cap_def valid_cap'_def max_free_index_def is_cap_simps + cong: conj_cong) + apply (strengthen empty_descendants_range_in') + apply (wp deleteObjects_descendants[where p="makePoolParent i'"] + deleteObjects_cte_wp_at' + deleteObjects_null_filter[where p="makePoolParent i'"]) + apply (clarsimp simp:invs_mdb max_free_index_def invs_untyped_children) + apply (subgoal_tac "detype_locale x y sa" for x y) + prefer 2 + apply (simp add:detype_locale_def cte_wp_at_caps_of_state) + apply (thin_tac "caps_of_state s p = Some cap.NullCap" for s p) + apply (fastforce simp: cte_wp_at_caps_of_state descendants_range_def2 + empty_descendants_range_in invs_untyped_children) + apply (intro conjI) + apply (clarsimp) + apply (erule(1) caps_of_state_valid) + subgoal by (fastforce simp:cte_wp_at_caps_of_state descendants_range_def2 empty_descendants_range_in) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems by (clarsimp simp:invs_def valid_state_def)+ + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (drule detype_locale.non_null_present) + apply (fastforce simp:cte_wp_at_caps_of_state) + apply simp + apply (frule_tac ptr = "(aa,ba)" in detype_invariants [rotated 3]) + apply fastforce + apply simp + apply (simp add: cte_wp_at_caps_of_state) + apply (simp add: is_cap_simps) + apply (simp add:empty_descendants_range_in descendants_range_def2) + apply (frule intvl_range_conv[where bits = pageBits]) + apply (clarsimp simp:pageBits_def word_bits_def) + apply (clarsimp simp: invs_valid_objs cte_wp_at_caps_of_state range_cover_full + invs_psp_aligned invs_distinct cap_master_cap_simps is_cap_simps + is_simple_cap_def) + apply (clarsimp simp: conj_comms) + apply (rule conjI, clarsimp simp: is_aligned_asid_low_bits_of_zero) + apply (frule ex_cte_cap_protects) + apply (simp add:cte_wp_at_caps_of_state) + apply (simp add:empty_descendants_range_in) + apply fastforce + apply (rule subset_refl) + apply fastforce + apply (clarsimp simp: is_simple_cap_arch_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp simp: clear_um_def) + apply (simp add:detype_clear_um_independent) + apply (rule conjI) + apply clarsimp + apply (drule_tac p = "(aa,ba)" in cap_refs_in_kernel_windowD2[OF caps_of_state_cteD]) + apply fastforce + apply (clarsimp simp: region_in_kernel_window_def valid_cap_def + cap_aligned_def is_aligned_neg_mask_eq detype_def clear_um_def) + apply fastforce + apply (rule conjI,erule caps_no_overlap_detype[OF descendants_range_caps_no_overlapI]) + apply (clarsimp simp:is_aligned_neg_mask_eq cte_wp_at_caps_of_state) + apply (simp add:empty_descendants_range_in)+ + apply (rule conjI, rule pspace_no_overlap_subset, + rule pspace_no_overlap_detype[OF caps_of_state_valid]) + apply (simp add:invs_psp_aligned invs_valid_objs is_aligned_neg_mask_eq)+ + apply (clarsimp simp: detype_def clear_um_def detype_ext_def valid_sched_def valid_etcbs_def + st_tcb_at_kh_def obj_at_kh_def st_tcb_at_def obj_at_def is_etcb_at_def) + apply (simp add: detype_def clear_um_def) + apply (drule_tac x = "cte_map (aa,ba)" in pspace_relation_cte_wp_atI[OF state_relation_pspace_relation]) + apply (simp add:invs_valid_objs)+ + apply clarsimp + apply (drule cte_map_inj_eq) + apply ((fastforce simp:cte_wp_at_caps_of_state)+)[5] + apply (clarsimp simp:cte_wp_at_caps_of_state invs_valid_pspace' conj_comms cte_wp_at_ctes_of + valid_cap_simps') + apply (strengthen refl) + apply clarsimp + apply (frule empty_descendants_range_in') + apply (intro conjI, + simp_all add: is_simple_cap'_def isCap_simps descendants_range'_def2 + null_filter_descendants_of'[OF null_filter_simp'] + capAligned_def asid_low_bits_def) + apply (erule descendants_range_caps_no_overlapI') + apply (fastforce simp:cte_wp_at_ctes_of is_aligned_neg_mask_eq) + apply (simp add:empty_descendants_range_in') + apply (simp add:word_bits_def bit_simps) + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1,simplified]) + apply (simp add:pageBits_def) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp:cte_wp_at_ctes_of) + apply assumption+ + apply fastforce + apply simp + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply fastforce + apply simp + done + +definition vcpu_invocation_map :: "vcpu_invocation \ vcpuinvocation" where + "vcpu_invocation_map vcpui \ case vcpui of + vcpu_invocation.VCPUSetTCB v t \ VCPUSetTCB v t + | vcpu_invocation.VCPUInjectIRQ obj n vreg \ VCPUInjectIRQ obj n vreg + | vcpu_invocation.VCPUReadRegister obj vreg \ VCPUReadRegister obj vreg + | vcpu_invocation.VCPUWriteRegister obj vreg word \ VCPUWriteRegister obj vreg word + | vcpu_invocation.VCPUAckVPPI obj irq \ VCPUAckVPPI obj irq" + +(* FIXME AARCH64: move to VSpace_R where page_table_invocation_map is *) +definition + "vspace_invocation_map vsi vsi' \ + case vsi of + AARCH64_A.VSpaceNothing \ vsi' = VSpaceNothing + | AARCH64_A.VSpaceFlush ty start end pstart space asid \ + vsi' = VSpaceFlush ty start end pstart space (ucast asid)" + +(* FIXME AARCH64: move to VSpace_R where valid_psi is *) +definition + "valid_vsi' vsi \ + case vsi of + VSpaceNothing \ \ + | VSpaceFlush ty start end pstart space asid \ \" + +definition + archinv_relation :: "arch_invocation \ Arch.invocation \ bool" +where + "archinv_relation ai ai' \ case ai of + arch_invocation.InvokeVSpace vsi \ + \vsi'. ai' = InvokeVSpace vsi' \ vspace_invocation_map vsi vsi' + | arch_invocation.InvokePageTable pti \ + \pti'. ai' = InvokePageTable pti' \ page_table_invocation_map pti pti' + | arch_invocation.InvokePage pgi \ + \pgi'. ai' = InvokePage pgi' \ page_invocation_map pgi pgi' + | arch_invocation.InvokeASIDControl aci \ + \aci'. ai' = InvokeASIDControl aci' \ aci' = asid_ci_map aci + | arch_invocation.InvokeASIDPool ap \ + \ap'. ai' = InvokeASIDPool ap' \ ap' = asid_pool_invocation_map ap + | arch_invocation.InvokeVCPU vcpui \ + \vcpui'. ai' = InvokeVCPU vcpui' \ vcpui' = vcpu_invocation_map vcpui" + +definition + valid_arch_inv' :: "Arch.invocation \ kernel_state \ bool" +where + "valid_arch_inv' ai \ case ai of + InvokeVSpace vsi \ valid_vsi' vsi + | InvokePageTable pti \ valid_pti' pti + | InvokePage pgi \ valid_page_inv' pgi + | InvokeASIDControl aci \ valid_aci' aci + | InvokeASIDPool ap \ valid_apinv' ap + | InvokeVCPU v \ valid_vcpuinv' v" + +lemma mask_vmrights_corres: + "maskVMRights (vmrights_map R) (rightsFromWord d) = + vmrights_map (mask_vm_rights R (data_to_rights d))" + by (clarsimp simp: rightsFromWord_def data_to_rights_def + vmrights_map_def Let_def maskVMRights_def + mask_vm_rights_def nth_ucast + validate_vm_rights_def vm_read_write_def + vm_kernel_only_def vm_read_only_def + split: bool.splits) + +lemma vm_attributes_corres: + "vmattributes_map (attribs_from_word w) = attribsFromWord w" + by (clarsimp simp: attribsFromWord_def attribs_from_word_def + Let_def vmattributes_map_def) + +lemma checkVPAlignment_corres: + "corres (ser \ dc) \ \ + (check_vp_alignment sz w) + (checkVPAlignment sz w)" + apply (simp add: check_vp_alignment_def checkVPAlignment_def) + apply (cases sz, simp_all add: corres_returnOk unlessE_whenE is_aligned_mask) + apply ((rule corres_guard_imp, rule corres_whenE, rule refl, auto)[1])+ + done + +lemma checkVP_wpR [wp]: + "\\s. vmsz_aligned w sz \ P () s\ + checkVPAlignment sz w \P\, -" + apply (simp add: checkVPAlignment_def unlessE_whenE cong: vmpage_size.case_cong) + apply (rule hoare_pre) + apply (wp whenE_wp|wpc)+ + apply (simp add: is_aligned_mask vmsz_aligned_def) + done + +lemma asidHighBits [simp]: + "asidHighBits = asid_high_bits" + by (simp add: asidHighBits_def asid_high_bits_def) + +declare word_unat_power [symmetric, simp del] + +lemma ARMMMU_improve_cases: + "(if isFrameCap cap then Q + else if isPageTableCap cap \ capPTType cap = NormalPT_T then R + else if isPageTableCap cap \ capPTType cap = VSRootPT_T then S + else if isASIDControlCap cap then T + else if isASIDPoolCap cap then U + else if isVCPUCap cap then V + else undefined) + = + (if isFrameCap cap then Q + else if isPageTableCap cap \ capPTType cap = NormalPT_T then R + else if isPageTableCap cap \ capPTType cap = VSRootPT_T then S + else if isASIDControlCap cap then T + else if isASIDPoolCap cap then U + else V)" + apply (cases cap; simp add: isCap_simps) + apply (rename_tac pt_t m) + apply (case_tac pt_t; simp) + done + +crunch inv[wp]: "AARCH64_H.decodeInvocation" "P" + (wp: crunch_wps mapME_x_inv_wp getASID_wp hoare_vcg_imp_lift' + simp: crunch_simps ARMMMU_improve_cases) + +lemma case_option_corresE: + assumes nonec: "corres r Pn Qn (nc >>=E f) (nc' >>=E g)" + and somec: "\v'. corres r (Ps v') (Qs v') (sc v' >>=E f) (sc' v' >>=E g)" + shows "corres r (case_option Pn Ps v) (case_option Qn Qs v) (case_option nc sc v >>=E f) (case_option nc' sc' v >>=E g)" + apply (cases v) + apply simp + apply (rule nonec) + apply simp + apply (rule somec) + done + + +lemma cap_relation_Untyped_eq: + "cap_relation c (UntypedCap d p sz f) = (c = cap.UntypedCap d p sz f)" + by (cases c) auto + +declare check_vp_alignment_inv[wp del] + +lemma select_ext_fa: + "free_asid_select asid_tbl \ S + \ ((select_ext (\_. free_asid_select asid_tbl) S) :: _ det_ext_monad) + = return (free_asid_select asid_tbl)" + by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def fail_def) + +lemma select_ext_fap: + "free_asid_pool_select p b \ S + \ ((select_ext (\_. free_asid_pool_select p b) S) :: _ det_ext_monad) + = return (free_asid_pool_select p b)" + by (simp add: select_ext_def get_def gets_def bind_def assert_def return_def) + +lemmas vmsz_aligned_imp_aligned + = vmsz_aligned_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN is_aligned_weaken] + +lemma vmrights_map_vm_kernel_only[simp]: + "vmrights_map vm_kernel_only = VMKernelOnly" + by (simp add: vmrights_map_def vm_kernel_only_def) + +lemma not_in_vm_kernel_only[simp]: + "x \ vm_kernel_only" + by (simp add: vm_kernel_only_def) + +lemma vmrights_map_VMKernelOnly: + "vmrights_map (mask_vm_rights R r) = VMKernelOnly \ mask_vm_rights R r = vm_kernel_only" + by (auto simp: vmrights_map_def mask_vm_rights_def validate_vm_rights_def vm_read_write_def + vm_read_only_def split: if_splits) + +lemma vmrights_map_empty[simp]: + "vmrights_map {} = VMKernelOnly" + by (simp add: vmrights_map_def) + +lemma pte_relation_make_user[simp]: + "pte_relation' + (make_user_pte (addrFromPPtr p) + (attribs_from_word a) + (mask_vm_rights R (data_to_rights r)) + sz) + (makeUserPTE (addrFromPPtr p) + (maskVMRights (vmrights_map R) (rightsFromWord r)) + (attribsFromWord a) + sz)" + by (auto simp: make_user_pte_def makeUserPTE_def attribs_from_word_def + attribsFromWord_def mask_vmrights_corres) + +lemma below_user_vtop_in_user_region: + "p \ user_vtop \ p \ user_region" + by (simp add: user_region_def canonical_user_def user_vtop_def pptrUserTop_def bit_simps) + +lemma vmsz_aligned_user_region: + "\ vmsz_aligned p sz; p + mask (pageBitsForSize sz) \ user_vtop \ \ p \ user_region" + apply (simp add: vmsz_aligned_def) + apply (rule below_user_vtop_in_user_region) + apply (drule is_aligned_no_overflow_mask) + apply simp + done + +lemma checkVSpaceRoot_corres[corres]: + "\ cap_relation cap cap'; n' = n \ \ + corres (ser \ (\(pt, asid) (pt', asid'). pt' = pt \ asid' = ucast asid)) + \ \ + (check_vspace_root cap n) (checkVSpaceRoot cap' n')" + unfolding check_vspace_root_def checkVSpaceRoot_def + apply (corres_cases_both simp: cap_relation_def) (* takes a while, quadratic cap cases *) + apply (corres_cases_both simp: mdata_map_def)+ + apply (rule corres_trivial, rule corres_returnOk, simp) + apply clarsimp + apply clarsimp + done + +lemma labelToFlushType_corres: + "labelToFlushType l = label_to_flush_type l" + by (simp add: labelToFlushType_def label_to_flush_type_def + split: invocation_label.split arch_invocation_label.split) + +lemma decodeARMFrameInvocationFlush_corres[corres]: + "corres (ser \ archinv_relation) + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct and + K (\asid vref. opt = Some (asid, vref) \ 0 < asid)) + no_0_obj' + (decode_fr_inv_flush l args slot (arch_cap.FrameCap p R sz d opt) excaps) + (decodeARMFrameInvocationFlush l args (FrameCap p (vmrights_map R) sz d (mdata_map opt)))" + unfolding decode_fr_inv_flush_def decodeARMFrameInvocationFlush_def + apply (cases args; clarsimp) + apply (clarsimp simp: Let_def neq_Nil_conv) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres corres_returnOkTT + term_simp: AARCH64_H.fromPAddr_def AARCH64.paddrTop_def AARCH64_H.paddrTop_def + AARCH64.pptrTop_def AARCH64_H.pptrTop_def + | corres_cases_both simp: mdata_map_def)+ + apply (fastforce simp: archinv_relation_def page_invocation_map_def mdata_map_def + labelToFlushType_corres) + apply wpsimp+ + done + +lemma decodeARMFrameInvocation_corres: + "\cap = arch_cap.FrameCap p R sz d opt; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_frame_invocation l args slot cap excaps) + (decodeARMFrameInvocation l args (cte_map slot) cap' excaps')" + apply (simp add: decode_frame_invocation_def decodeARMFrameInvocation_def Let_def isCap_simps + split del: if_split) + apply (cases "invocation_type l = ArchInvocationLabel ARMPageMap") + apply (case_tac "\(2 < length args \ excaps \ [])") + apply (auto simp: decode_fr_inv_map_def split: list.split)[1] + apply (simp add: decode_fr_inv_map_def Let_def neq_Nil_conv) + apply (elim exE conjE) + apply (simp split: list.split, intro conjI impI allI, simp_all)[1] + apply (simp add: decodeARMFrameInvocationMap_def) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres checkVPAlignment_corres + term_simp: mask_def user_vtop_def + | corres_cases_both)+ + apply (simp add: mask_def user_vtop_def) + apply (corres corres: lookupPTSlot_corres[@lift_corres_args] + term_simp: lookup_failure_map_def + | corres_cases_both)+ + apply (rule corres_trivial, rule corres_returnOk) + apply (simp add: archinv_relation_def page_invocation_map_def mapping_map_def) + apply (wpsimp+)[3] + apply corres_cases_both + apply (corres simp: up_ucast_inj_eq) + apply (rule corres_trivial) + apply simp + apply (corres corres: lookupPTSlot_corres[@lift_corres_args]) + apply corres_cases_both + apply (corres term_simp: lookup_failure_map_def) + apply (rule corres_trivial) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def page_invocation_map_def mapping_map_def) + apply wpsimp+ + apply (fastforce simp: valid_cap_def wellformed_mapdata_def vmsz_aligned_user_region not_less + intro: vspace_for_asid_vs_lookup) + apply clarsimp + \ \PageUnmap\ + apply (simp split del: if_split) + apply (cases "invocation_type l = ArchInvocationLabel ARMPageUnmap") + apply simp + apply (rule corres_returnOk) + apply (clarsimp simp: archinv_relation_def page_invocation_map_def) + \ \PageGetAddress\ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageGetAddress") + apply simp + apply (rule corres_returnOk) + apply (clarsimp simp: archinv_relation_def page_invocation_map_def) + \ \isPageFlushLabel\ + apply (cases "isPageFlushLabel (invocation_type l)") + apply simp + apply (corres_cases_right; + corres_cases_right?; + (solves \rule corres_trivial, simp add: isPageFlushLabel_def\)?; + corres_cases_right?) + apply corres+ + apply (fastforce simp: valid_cap_def wellformed_mapdata_def) + apply fastforce + \ \error cases\ + apply (fastforce split: invocation_label.splits arch_invocation_label.splits + simp: isPageFlushLabel_def) + done + +lemma VMReadWrite_vmrights_map[simp]: "vmrights_map vm_read_write = VMReadWrite" + by (simp add: vmrights_map_def vm_read_write_def) + +lemma gets_vspace_for_asid_is_catch: + "gets (vspace_for_asid a) = ((liftME Some (find_vspace_for_asid a)) const (return None))" + apply (simp add: find_vspace_for_asid_def liftME_def liftE_bindE catch_def) + apply (rule ext) + apply (clarsimp simp: bind_def simpler_gets_def throw_opt_def bindE_def throwError_def return_def + returnOk_def + split: option.splits) + done + +lemma maybeVSpaceForASID_corres: + "a' = ucast a \ + corres (=) + (valid_vspace_objs and valid_asid_table and pspace_aligned and pspace_distinct + and K (0 < a)) + no_0_obj' + (gets (vspace_for_asid a)) (maybeVSpaceForASID a')" + apply (simp add: maybeVSpaceForASID_def gets_vspace_for_asid_is_catch) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply (simp add: o_def) + apply (rule findVSpaceForASID_corres, simp) + apply (rule corres_trivial, simp) + apply wpsimp+ + done + +(* FIXME AARCH64: move to ArchAcc_R *) +lemma pageBits_leq_table_size[simp, intro!]: + "pageBits \ table_size (pt_type pt)" + by (simp add: bit_simps) + +lemma decodeARMPageTableInvocation_corres: + "\cap = arch_cap.PageTableCap p pt_t opt; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_page_table_invocation l args slot cap excaps) + (decodeARMPageTableInvocation l args (cte_map slot) cap' excaps')" + supply option.case_cong[cong] + apply (simp add: decode_page_table_invocation_def decodeARMPageTableInvocation_def Let_def + isCap_simps + split del: if_split) + \ \PageTableMap\ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageTableMap") + apply (simp add: decode_pt_inv_map_def + split: invocation_label.split arch_invocation_label.splits split del: if_split) + apply (simp split: list.split, intro conjI impI allI, simp_all)[1] + apply (clarsimp simp: neq_Nil_conv Let_def decodeARMPageTableInvocationMap_def) + apply (rule whenE_throwError_corres_initial; (fastforce simp: mdata_map_def)?) + apply (corres' \fastforce\ + term_simp: user_vtop_def + corres: corres_lookup_error findVSpaceForASID_corres + lookupPTSlot_corres[@lift_corres_args] + corres_returnOk[where P="pspace_aligned and pt_at pt_t p and + pspace_in_kernel_window and valid_uses" + and P'=\] + | corres_cases_both)+ + apply (clarsimp simp: archinv_relation_def page_table_invocation_map_def + ppn_from_pptr_def obj_at_def) + apply (frule (1) pspace_alignedD) + apply (rule kernel_window_addrFromPPtr[symmetric]) + apply (erule (3) pspace_in_kw_bounded) + apply (erule is_aligned_weaken) + apply simp + apply wpsimp+ + apply (fastforce simp: valid_cap_def wellformed_mapdata_def below_user_vtop_in_user_region + not_less pt_lookup_slot_pte_at + intro!: vspace_for_asid_vs_lookup) + apply fastforce + \ \PageTableUnmap\ + apply (clarsimp simp: isCap_simps)+ + apply (cases "invocation_type l = ArchInvocationLabel ARMPageTableUnmap") + apply (clarsimp simp: unlessE_whenE liftE_bindE) + apply (rule stronger_corres_guard_imp) + apply (rule corres_symb_exec_r_conj) + apply (rule_tac F="isArchCap isPageTableCap (cteCap cteVal)" + in corres_gen_asm2) + apply (rule corres_split[OF isFinalCapability_corres[where ptr=slot]]) + apply (drule mp) + apply (clarsimp simp: isCap_simps final_matters'_def) + apply (rule whenE_throwError_corres; simp) + apply (rule option_corres) + apply (cases opt; simp add: mdata_map_def) + apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def + page_table_invocation_map_def) + apply (cases opt, clarsimp simp: mdata_map_def) + apply (clarsimp simp: bind_bindE_assoc) + apply datatype_schem + apply (rule corres_trivial, simp add: returnOk_def archinv_relation_def + page_table_invocation_map_def) + apply (cases opt; simp add: mdata_map_def) + apply (simp | wp getCTE_wp' | wp (once) hoare_drop_imps)+ + apply (clarsimp) + apply (rule no_fail_pre, rule no_fail_getCTE) + apply (erule conjunct2) + apply (clarsimp simp: cte_wp_at_caps_of_state invs_vspace_objs + invs_valid_asid_table invs_psp_aligned invs_distinct) + apply (clarsimp simp: valid_cap_def wellformed_mapdata_def) + apply (clarsimp simp: cte_wp_at_ctes_of cap_rights_update_def acap_rights_update_def + cte_wp_at_caps_of_state) + apply (drule pspace_relation_ctes_ofI[OF _ caps_of_state_cteD, rotated], + erule invs_pspace_aligned', clarsimp+) + apply (simp add: isCap_simps invs_no_0_obj') + apply (simp add: isCap_simps split del: if_split) + by (clarsimp split: invocation_label.splits arch_invocation_label.splits) + +lemma list_all2_Cons: "list_all2 f (x#xs) b \ \y ys. b = y # ys" + by (induct b; simp) + +lemma corres_gets_numlistregs[corres]: + "corres (=) \ \ + (gets (arm_gicvcpu_numlistregs \ arch_state)) (gets (armKSGICVCPUNumListRegs \ ksArchState))" + by (clarsimp simp: state_relation_def arch_state_relation_def) + +theorem corres_throwError_ser[corres]: + "corres (ser \ r) (\_. b = syscall_error_map a) (\_. True) (throwError a) (throwError b)" + by simp + +lemmas corres_liftE_rel_sumI = corres_liftE_rel_sum[THEN iffD2] +lemmas corres_liftMI = corres_liftM_simp[THEN iffD2] +lemmas corres_liftM2I = corres_liftM2_simp[THEN iffD2] + +lemma get_vcpu_LR_corres[corres]: + "corres (r \ (\vcpu lr. vgic_lr (vcpu_vgic vcpu) = lr)) (vcpu_at v) (vcpu_at' v) + (liftE (get_vcpu v)) (liftE (liftM (vgicLR \ vcpuVGIC) (getObject v)))" + apply simp + apply (rule corres_rel_imp, rule getObject_vcpu_corres) + apply (rename_tac vcpu', case_tac vcpu') + apply (clarsimp simp: vcpu_relation_def vgic_map_def) + done + +lemma decodeARMVCPUInvocation_corres: + "\acap_relation arch_cap arch_cap'; list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps')\ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap arch_cap) + and (\s. \x\set excaps. s \ fst x \ cte_wp_at (\_. True) (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap arch_cap') + and (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_wp_at' (\_. True) (snd x) s)) + (decode_vcpu_invocation label args arch_cap excaps) + (decodeARMVCPUInvocation label args cptr' cte arch_cap' excaps')" + apply (simp add: decode_vcpu_invocation_def decodeARMVCPUInvocation_def) + apply (cases arch_cap; cases "invocation_type label"; simp add: isVCPUCap_def) + apply (rename_tac vcpui) + apply (case_tac vcpui; simp split del: if_split) + (* set_tcb *) + apply (simp add: decode_vcpu_set_tcb_def decodeVCPUSetTCB_def Let_def isVCPUCap_def) + apply (cases excaps; simp add: null_def) + apply (frule list_all2_Cons) + apply clarsimp + apply (case_tac a; clarsimp simp add: cap_relation_def) + apply (corresK corres: corres_returnOkTT) + apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def) + (* inject_irq *) + apply (simp add: decode_vcpu_inject_irq_def decodeVCPUInjectIRQ_def isVCPUCap_def) + apply (cases args; clarsimp) + apply (clarsimp simp add: rangeCheck_def range_check_def unlessE_whenE) + apply (clarsimp simp: shiftL_nat whenE_bindE_throwError_to_if) + apply (corresKsimp wp: get_vcpu_wp) + apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def + valid_cap'_def valid_cap_def isVIRQActive_def is_virq_active_def + virqType_def virq_type_def + make_virq_def makeVIRQ_def) + (* read register *) + apply (clarsimp simp: decode_vcpu_read_register_def decodeVCPUReadReg_def) + apply (cases args; clarsimp simp: isCap_simps whenE_def split: if_split) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def vcpu_invocation_map_def) + (* write register *) + apply (clarsimp simp: decode_vcpu_write_register_def decodeVCPUWriteReg_def) + apply (cases args; clarsimp simp: isCap_simps) + apply (case_tac list; clarsimp) + apply (rule corres_returnOk) + apply (simp add: archinv_relation_def vcpu_invocation_map_def) + (* ack vppi *) + apply (simp add: decode_vcpu_ack_vppi_def decodeVCPUAckVPPI_def isVCPUCap_def) + apply (cases args; clarsimp simp: isCap_simps) + apply (simp add: arch_check_irq_def rangeCheck_def ucast_nat_def minIRQ_def unlessE_def + word_le_not_less) + apply (case_tac "a > ucast maxIRQ"; simp add: ucast_nat_def word_le_not_less) + apply (clarsimp simp: irq_vppi_event_index_def irqVPPIEventIndex_def maxIRQ_def + word_le_not_less[symmetric] word_le_nat_alt) + apply (fastforce simp: archinv_relation_def vcpu_invocation_map_def ucast_nat_def IRQ_def + intro: corres_returnOk + split: if_splits) + done + +lemma lookupPTSlot_gets_corres[@lift_corres_args, corres]: + "corres (\fr (bits, b'). case fr of + Some (level, b) \ bits = pt_bits_left level \ b' = b + | _ \ False) + (pspace_aligned and pspace_distinct and valid_vspace_objs + and valid_asid_table and \\(max_pt_level,pt) + and K (vptr \ user_region)) + \ + (gets (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" + apply (rule corres_rrel_pre) + apply (rule corres_gets_the_gets) + apply (rule lookupPTSlot_corres) + apply clarsimp + done + +lemma lookupFrame_corres[@lift_corres_args, corres]: + "corres (\fr fr'. case (fr, fr') of + (Some (vmsz, b), Some (bits, b')) \ bits = pageBitsForSize vmsz \ b' = b + | (None, None) \ True + | _ \ False) + (invs and \\ (max_pt_level, vspace) and K (vaddr \ user_region)) + \ + (gets (lookup_frame vspace vaddr \ ptes_of)) (lookupFrame vspace vaddr)" + unfolding lookup_frame_def lookupFrame_def + apply (simp add: gets_obind_bind_eq obind_comp_dist) + apply corres + apply corres_cases_left + apply (rule corres_trivial, simp) + apply corres_cases_right + apply (simp add: gets_obind_bind_eq prod_o_comp gets_prod_comp obind_comp_dist + cong: corres_weaker_cong) + apply corres_cases_left + apply (rename_tac level slot) + apply corres_split + apply (rule corres_gets_the_gets) + apply (simp add: gets_the_oapply2_comp cong: corres_weaker_cong) + apply corres + apply corres_cases_left + apply (rule corres_trivial, simp) + apply (rule corres_if_r') + apply (rename_tac pte) + apply (prop_tac "AARCH64_A.is_PagePTE pte") + apply (case_tac pte; simp add: isPagePTE_def) + apply (simp cong: corres_weaker_cong) + apply (rule_tac F="AARCH64_A.is_PagePTE pte \ level \ max_page_level" in corres_gen_asm) (* FIXME AARCH64: 2 -> max_page_level in spec *) + apply (rule corres_trivial) + apply (clarsimp simp: max_page_level_def AARCH64_A.is_PagePTE_def pte_base_addr_def) + apply (rule corres_inst[where P'=\]) + apply (rename_tac pte) + apply (prop_tac "\ (AARCH64_A.is_PagePTE pte)") + apply (case_tac pte; simp add: isPagePTE_def) + apply simp (* needs separate step to get ofail *) + apply (simp add: ofail_def) + apply (wpsimp wp: getPTE_wp)+ + apply (clarsimp simp: invs_implies invs_valid_asid_table) + apply (frule vs_lookup_table_asid_not_0, simp, assumption, fastforce) + apply (frule pt_lookup_slot_vs_lookup_slotI[rotated]) + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def vspace_for_pool_def in_omonad + vs_lookup_table_def word_neq_0_conv) + apply (erule conjI[rotated]) + apply fastforce + apply (fastforce simp: pte_at_def AARCH64_A.is_PagePTE_def dest: valid_vspace_objs_strong_slotD) + apply simp + done + +lemma decodeARMVSpaceInvocation_corres[corres]: + "\ cap = arch_cap.PageTableCap pt VSRootPT_T map_data; acap_relation cap cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap cap) and + cte_wp_at ((=) (cap.ArchObjectCap cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s)) + (invs' and valid_cap' (ArchObjectCap cap') and + (\s. \x\set excaps'. valid_cap' (fst x) s \ cte_at' (snd x) s)) + (decode_vspace_invocation (mi_label mi) args slot cap excaps) + (decodeARMVSpaceInvocation (mi_label mi) args cap')" + unfolding decodeARMVSpaceInvocation_def decode_vspace_invocation_def + apply (clarsimp simp: Let_def isCap_simps split del: if_split) + apply (cases "isVSpaceFlushLabel (invocation_type (mi_label mi))"; simp) + apply (clarsimp simp: decode_vs_inv_flush_def split del: if_split) + apply (cases args; clarsimp) + apply (clarsimp simp: neq_Nil_conv) + apply (corres corres: corres_lookup_error findVSpaceForASID_corres corres_returnOkTT + simp: checkValidMappingSize_def + term_simp: archinv_relation_def vspace_invocation_map_def labelToFlushType_corres + page_base_def pageBase_def pageBitsForSize_pt_bits_left + | corres_cases_both)+ + apply (fastforce simp: not_less user_vtop_def valid_cap_def wellformed_mapdata_def + intro!: below_user_vtop_in_user_region vspace_for_asid_vs_lookup) + apply clarsimp + done + +lemma dom_ucast_eq: + "is_aligned y asid_low_bits \ + (- dom (\a::asid_low_index. map_option abs_asid_entry (p (ucast a :: machine_word))) \ + {x. ucast x + (y::AARCH64_A.asid) \ 0} = {}) = + (- dom p \ {x. x \ 2 ^ asid_low_bits - 1 \ x + ucast y \ 0} = {})" + apply safe + apply clarsimp + apply (rule ccontr) + apply (erule_tac x="ucast x" in in_emptyE) + apply (clarsimp simp: p2_low_bits_max) + apply (rule conjI) + apply (clarsimp simp: ucast_ucast_mask) + apply (subst (asm) less_mask_eq) + apply (rule word_less_sub_le [THEN iffD1]) + apply (simp add: word_bits_def) + apply (simp add: asid_low_bits_def) + apply simp + apply (clarsimp simp: mask_2pm1[symmetric] ucast_ucast_mask2 is_down is_aligned_mask) + apply (frule and_mask_eq_iff_le_mask[THEN iffD2]) + apply (simp add: asid_low_bits_def) + apply (erule notE) + apply (subst word_plus_and_or_coroll) + apply word_eqI_solve + apply (subst (asm) word_plus_and_or_coroll; word_bitwise, clarsimp simp: word_size) + apply (clarsimp simp: p2_low_bits_max) + apply (rule ccontr) + apply simp + apply (erule_tac x="ucast x" in in_emptyE) + apply clarsimp + apply (rule conjI, blast) + apply (rule conjI) + apply (rule word_less_sub_1) + apply (rule order_less_le_trans) + apply (rule ucast_less, simp) + apply (simp add: asid_low_bits_def) + apply clarsimp + apply (erule notE) + apply (simp add: is_aligned_mask asid_low_bits_def) + apply (subst word_plus_and_or_coroll) + apply word_eqI_solve + apply (subst (asm) word_plus_and_or_coroll) + apply (word_bitwise, clarsimp simp: word_size) + apply (word_bitwise) + done + +lemma assocs_map_option: + "assocs (\x. map_option f (pool x)) = map (\(x,y). (x, map_option f y)) (assocs pool)" + by (simp add: assocs_def) + +lemma fst_hd_map_eq: + "xs \ [] \ fst (hd (map (\p. (fst p, f (snd p))) xs)) = fst (hd xs)" + by (induct xs; simp) + +lemma assocs_dom_comp_split: + "set (map fst (filter (\x. P (fst x) \ snd x = None) (assocs f))) = (- dom f \ Collect P)" + apply (clarsimp simp: in_assocs_is_fun) + apply (rule set_eqI) + apply clarsimp + apply (rule iffI, clarsimp) + apply (erule conjE) + apply (drule not_in_domD) + apply (rule_tac x="(x,None)" in image_eqI) + apply simp + apply simp + done + +lemma arch_decodeInvocation_corres: + "\ acap_relation arch_cap arch_cap'; + list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\s s'. s' = cte_map s) (map snd excaps) (map snd excaps') \ \ + corres (ser \ archinv_relation) + (invs and valid_cap (cap.ArchObjectCap arch_cap) and + cte_wp_at ((=) (cap.ArchObjectCap arch_cap)) slot and + (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s)) + (invs' and valid_cap' (capability.ArchObjectCap arch_cap') and + (\s. \x\set excaps'. s \' fst x \ cte_at' (snd x) s)) + (arch_decode_invocation (mi_label mi) args (to_bl cptr') slot arch_cap excaps) + (Arch.decodeInvocation (mi_label mi) args cptr' (cte_map slot) arch_cap' excaps')" + (* FIXME: check_vp_inv shadowed check_vp_wpR. Instead, + check_vp_wpR should probably be generalised to replace check_vp_inv. *) + supply check_vp_inv[wp del] check_vp_wpR[wp] + apply (simp add: arch_decode_invocation_def + AARCH64_H.decodeInvocation_def + decodeARMMMUInvocation_def + split del: if_split) + apply (cases arch_cap) + \ \ASIDPoolCap\ + apply (simp add: isCap_simps decodeARMMMUInvocation_def decode_asid_pool_invocation_def + decodeARMASIDPoolInvocation_def Let_def + split del: if_split) + apply (cases "invocation_type (mi_label mi) \ ArchInvocationLabel ARMASIDPoolAssign") + apply (simp split: invocation_label.split arch_invocation_label.split) + apply (rename_tac ap asid) + apply (cases "excaps", simp) + apply (cases "excaps'", simp) + apply clarsimp + apply (rename_tac excap0 exslot0 excaps0 excap0' exslot0' excaps0') + apply (case_tac excap0; simp) + apply (rename_tac exarch_cap) + apply (case_tac exarch_cap; simp) + apply (rename_tac pt pt_t map_data) + apply (case_tac "map_data \ None") + apply (clarsimp simp add: mdata_map_def split: pt_type.splits) + apply clarsimp + apply (case_tac pt_t; simp add: mdata_map_def isVTableRoot_def cong: pt_type.case_cong) + apply (corres term_simp: lookup_failure_map_def) + apply (rule_tac F="is_aligned asid asid_low_bits" in corres_gen_asm) + apply (corres' \fastforce\ simp: liftME_def bind_bindE_assoc) + apply (clarsimp simp: asid_pool_relation_def) + apply (subst conj_assoc [symmetric]) + apply (subst assocs_empty_dom_comp [symmetric]) + apply (case_tac rv, simp) + apply (clarsimp simp: o_def dom_ucast_eq) + apply (frule dom_hd_assocsD) + apply (simp add: select_ext_fap[simplified free_asid_pool_select_def] + free_asid_pool_select_def cong: corres_weaker_cong) + apply (simp add: returnOk_liftE[symmetric]) + apply (rule corres_returnOkTT) + apply (simp add: archinv_relation_def asid_pool_invocation_map_def) + apply (case_tac rv, simp add: asid_pool_relation_def) + apply (subst ucast_fst_hd_assocs) + apply (clarsimp simp: o_def dom_map_option) + apply simp + apply (simp add: o_def assocs_map_option filter_map split_def) + apply (subst fst_hd_map_eq; simp?) + apply (clarsimp simp: dom_map_option) + apply (drule arg_cong[where f="map fst" and y="[]"]) + apply (drule arg_cong[where f=set and y="map fst []"]) + apply (subst (asm) assocs_dom_comp_split) + apply (clarsimp simp: split_def) + apply wpsimp+ + apply (fastforce simp: valid_cap_def) + apply simp + \ \ASIDControlCap\ + apply (simp add: isCap_simps decodeARMMMUInvocation_def decode_asid_control_invocation_def + Let_def decodeARMASIDControlInvocation_def + split del: if_split) + apply (cases "invocation_type (mi_label mi) \ ArchInvocationLabel ARMASIDControlMakePool") + apply (simp split: invocation_label.split arch_invocation_label.split) + apply (subgoal_tac "length excaps' = length excaps") + prefer 2 + apply (simp add: list_all2_iff) + apply (cases args, simp) + apply (rename_tac a0 as) + apply (case_tac as, simp) + apply (rename_tac a1 as') + apply (cases excaps, simp) + apply (rename_tac excap0 exs) + apply (case_tac exs) + apply (auto split: list.split)[1] + apply (rename_tac excap1 exss) + apply (case_tac excap0) + apply (rename_tac c0 slot0) + apply (case_tac excap1) + apply (rename_tac c1 slot1) + apply (clarsimp simp: Let_def split del: if_split) + apply (cases excaps', simp) + apply (case_tac list, simp) + apply (rename_tac c0' exs' c1' exss') + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[where r'="\p p'. p = p' o ucast"]) + apply (rule corres_trivial) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + apply (rule corres_splitEE) + apply (rule corres_whenE) + apply (subst assocs_empty_dom_comp [symmetric]) + apply (simp add: o_def) + apply (rule dom_ucast_eq_8) + apply (rule corres_trivial, simp, simp) + apply (simp split del: if_split) + apply (rule_tac F="- dom (asidTable \ ucast) \ {x. x \ 2 ^ asid_high_bits - 1} \ {}" in corres_gen_asm) + apply (drule dom_hd_assocsD) + apply (simp add: select_ext_fa[simplified free_asid_select_def] + free_asid_select_def o_def returnOk_liftE[symmetric] split del: if_split) + apply (thin_tac "fst a \ b \ P" for a b P) + apply (case_tac "isUntypedCap a \ capBlockSize a = objBits (makeObject::asidpool) \ \ capIsDevice a") + prefer 2 + apply (rule corres_guard_imp) + apply (rule corres_trivial) + apply (case_tac ad; simp add: isCap_simps split del: if_split) + apply (case_tac x21; simp split del: if_split) + apply (clarsimp simp: objBits_simps split del: if_split) + apply clarsimp + apply (rule TrueI)+ + apply (clarsimp simp: isCap_simps cap_relation_Untyped_eq lookupTargetSlot_def + objBits_simps bindE_assoc split_def) + apply (rule corres_splitEE) + apply (rule ensureNoChildren_corres, rule refl) + apply (rule corres_splitEE) + apply (erule lookupSlotForCNodeOp_corres, rule refl) + apply (rule corres_splitEE) + apply (rule ensureEmptySlot_corres) + apply clarsimp + apply (rule corres_returnOk[where P="\"]) + apply (clarsimp simp add: archinv_relation_def asid_ci_map_def split_def) + apply (clarsimp simp add: ucast_assocs[unfolded o_def] split_def + filter_map asid_high_bits_def) + apply (simp add: ord_le_eq_trans [OF word_n1_ge]) + apply (wp hoare_drop_imps)+ + apply (simp add: o_def validE_R_def) + apply clarsimp + (* for some reason it takes significantly longer if we don't split off the first conjuncts *) + apply (rule conjI, fastforce)+ + apply (fastforce simp: asid_high_bits_def) + apply clarsimp + apply (simp add: null_def split_def asid_high_bits_def word_le_make_less) + apply (subst hd_map, assumption) + (* need abstract guard to show list nonempty *) + apply (simp add: word_le_make_less) + apply (simp add: ucast_ucast_mask2 is_down) + apply (frule hd_in_set) + apply clarsimp + apply (prop_tac "\x::machine_word. x < 2^asid_high_bits \ x && mask asid_high_bits = x") + apply (clarsimp simp: and_mask_eq_iff_le_mask le_mask_iff_lt_2n[THEN iffD1] asid_high_bits_def) + apply (simp add: asid_high_bits_def) + apply (erule allE, erule (1) impE) + apply (simp add: ucast_shiftl) + apply (subst ucast_ucast_len) + apply (drule hd_in_set) + apply (rule shiftl_less_t2n; simp add: asid_low_bits_def) + apply (fastforce) + + \ \FrameCap\ + apply (rename_tac word cap_rights vmpage_size option) + apply (simp add: isCap_simps decodeARMMMUInvocation_def Let_def split del: if_split) + apply (rule decodeARMFrameInvocation_corres; simp) + + \ \PageTableCap\ + apply (rename_tac pt_t map_data) + apply (simp add: isCap_simps decodeARMMMUInvocation_def Let_def split del: if_split) + apply (case_tac pt_t; clarsimp simp: isCap_simps) + apply (rule decodeARMVSpaceInvocation_corres; simp) + apply (rule decodeARMPageTableInvocation_corres; simp) + + \ \VCPU\ + apply (simp add: isCap_simps acap_relation_def) + apply (rule corres_guard_imp[OF decodeARMVCPUInvocation_corres]; simp) + done + +lemma invokeVCPUInjectIRQ_corres: + "corres (=) (vcpu_at v and pspace_distinct and pspace_aligned) \ + (do y \ invoke_vcpu_inject_irq v index virq; + return [] + od) + (invokeVCPUInjectIRQ v index virq)" + unfolding invokeVCPUInjectIRQ_def invoke_vcpu_inject_irq_def + supply corres_machine_op_Id_eq[corres_term del] + apply (corres corres: corres_machine_op_Id_dc simp: bind_assoc) + apply (fastforce dest: vcpu_at_cross) + done + +lemma invokeVCPUReadReg_corres: + "corres (=) (vcpu_at v and pspace_distinct and pspace_aligned) (no_0_obj') + (invoke_vcpu_read_register v r) + (invokeVCPUReadReg v r)" + unfolding invoke_vcpu_read_register_def invokeVCPUReadReg_def read_vcpu_register_def readVCPUReg_def + apply (rule corres_discard_r) + apply (corres simp: bind_assoc | corres_cases_both)+ + apply (fastforce dest: vcpu_at_cross) + apply (wpsimp simp: getCurThread_def)+ + done + +lemma invokeVCPUWriteReg_corres: + "corres (=) (vcpu_at vcpu and pspace_distinct and pspace_aligned) (no_0_obj') + (do y \ invoke_vcpu_write_register vcpu r v; + return [] + od) + (invokeVCPUWriteReg vcpu r v)" + unfolding invokeVCPUWriteReg_def invoke_vcpu_write_register_def write_vcpu_register_def + writeVCPUReg_def + apply (rule corres_discard_r) + apply (corres simp: bind_assoc | corres_cases_both)+ + apply (fastforce dest: vcpu_at_cross) + apply wpsimp+ + done + +lemma archThreadSet_VCPU_Some_corres[corres]: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update (\_. Some v)) t) (archThreadSet (atcbVCPUPtr_update (\_. Some v)) t)" + apply (rule archThreadSet_corres) + apply (simp add: arch_tcb_relation_def) + done + +crunches dissociateVCPUTCB + for no_0_obj'[wp]: no_0_obj' + and ksCurThread[wp]: "\s. P (ksCurThread s)" + (simp: crunch_simps wp: crunch_wps) + +lemma vcpuSwitch_corres'': + "vcpu' = vcpu + \ corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ valid_arch_state s) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + apply (corres corres: vcpuSwitch_corres') + apply (clarsimp simp: valid_arch_state_def is_vcpu_def obj_at_def cur_vcpu_def in_omonad) + apply fastforce + done + +lemma associateVCPUTCB_corres: + "corres (=) (invs and vcpu_at v and tcb_at t) invs' + (do y \ associate_vcpu_tcb v t; + return [] + od) + (associateVCPUTCB v t)" + unfolding associate_vcpu_tcb_def associateVCPUTCB_def + apply (corres simp: bind_assoc term_simp: vcpu_relation_def + corres: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' + wp: hoare_drop_imps get_vcpu_wp getVCPU_wp + | corres_cases_both simp: vcpu_relation_def)+ + apply (rule_tac Q="\_. invs and tcb_at t" in hoare_strengthen_post) + apply wp + apply clarsimp + apply (rule conjI) + apply (frule (1) sym_refs_vcpu_tcb, fastforce) + apply (clarsimp simp: obj_at_def in_omonad) + apply (fastforce simp: obj_at_def in_omonad) + apply wpsimp+ + apply (rule_tac Q="\_. invs' and tcb_at' t and vcpu_at' v" in hoare_strengthen_post) + apply wpsimp + apply fastforce + apply (wpsimp wp: arch_thread_get_wp archThreadGet_wp)+ + apply (clarsimp simp: invs_implies) + apply (rule conjI; clarsimp) + apply (frule (1) sym_refs_vcpu_tcb, fastforce) + apply (clarsimp simp: obj_at_def in_omonad) + apply (frule (1) sym_refs_tcb_vcpu, fastforce) + apply (clarsimp simp: obj_at_def) + apply clarsimp + apply (fastforce dest: vcpu_at_cross tcb_at_cross) + done + +lemma invokeVCPUAckVPPI_corres: + "corres (=) (vcpu_at vcpu and pspace_distinct and pspace_aligned) \ + (do y \ invoke_vcpu_ack_vppi vcpu vppi; + return [] + od) + (invokeVCPUAckVPPI vcpu vppi)" + unfolding invokeVCPUAckVPPI_def invoke_vcpu_ack_vppi_def write_vcpu_register_def + writeVCPUReg_def + by (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + (auto simp: vcpu_relation_def dest: vcpu_at_cross split: option.splits) + +lemma performARMVCPUInvocation_corres: + notes inv_corres = invokeVCPUInjectIRQ_corres invokeVCPUReadReg_corres + invokeVCPUWriteReg_corres associateVCPUTCB_corres + invokeVCPUAckVPPI_corres + shows "corres (=) (einvs and ct_active and valid_vcpu_invocation iv) + (invs' and ct_active' and valid_vcpuinv' (vcpu_invocation_map iv)) + (perform_vcpu_invocation iv) (performARMVCPUInvocation (vcpu_invocation_map iv))" + unfolding perform_vcpu_invocation_def performARMVCPUInvocation_def + apply (cases iv; simp add: vcpu_invocation_map_def valid_vcpu_invocation_def valid_vcpuinv'_def) + apply (rule inv_corres [THEN corres_guard_imp]; simp add: invs_no_0_obj' invs_implies)+ + done + +lemma arch_performInvocation_corres: + "archinv_relation ai ai' \ + corres (dc \ (=)) + (einvs and ct_active and valid_arch_inv ai) + (invs' and ct_active' and valid_arch_inv' ai') + (arch_perform_invocation ai) (Arch.performInvocation ai')" + apply (clarsimp simp: arch_perform_invocation_def + AARCH64_H.performInvocation_def + performARMMMUInvocation_def) + apply (clarsimp simp: archinv_relation_def) + apply (cases ai) + + \ \InvokeVSpace\ + apply (clarsimp simp: performARMMMUInvocation_def perform_vspace_invocation_def + performVSpaceInvocation_def) + apply ((corres simp: perform_flush_def do_flush_def doFlush_def + corres: corres_machine_op_Id_dc + term_simp: vspace_invocation_map_def + | corres_cases_both simp: vspace_invocation_map_def)+)[1] + + \ \InvokePageTable\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp, rule corres_split_nor) + apply (rule performPageTableInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokePage\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule performPageInvocation_corres) + apply (simp add: page_invocation_map_def) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokeASIDControl\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule performASIDControlInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + apply (clarsimp simp: archinv_relation_def) + + \ \InvokeASIDPool\ + apply (clarsimp simp: archinv_relation_def performARMMMUInvocation_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule performASIDPoolInvocation_corres; wpsimp) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (fastforce simp: valid_arch_inv_def) + apply (fastforce simp: valid_arch_inv'_def) + + \ \InvokeVCPU\ + apply (clarsimp simp: archinv_relation_def) + apply (rule corres_guard_imp[OF performARMVCPUInvocation_corres]; + clarsimp simp: valid_arch_inv_def valid_arch_inv'_def)+ + done + +lemma asid_pool_typ_at_ext': + "asid_pool_at' = obj_at' (\::asidpool \ bool)" + apply (rule ext)+ + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma st_tcb_strg': + "st_tcb_at' P p s \ tcb_at' p s" + by (auto simp: pred_tcb_at') + +lemma performASIDControlInvocation_tcb_at': + "\st_tcb_at' active' p and invs' and ct_active' and valid_aci' aci\ + performASIDControlInvocation aci + \\y. tcb_at' p\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ + apply (clarsimp simp: projectKO_opts_defs) + apply (strengthen st_tcb_strg' [where P=\]) + apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp + updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs')+ + apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of) + apply (strengthen refl order_refl + pred_tcb'_weakenE[mk_strg I E]) + apply (clarsimp simp: conj_comms invs_valid_pspace' isCap_simps + descendants_range'_def2 empty_descendants_range_in') + apply (frule ctes_of_valid', clarsimp, simp, + drule capFreeIndex_update_valid_cap'[where fb="2 ^ pageBits", rotated -1], + simp_all) + apply (simp add: pageBits_def is_aligned_def untypedBits_defs) + apply (simp add: valid_cap_simps' range_cover_def objBits_simps untypedBits_defs + capAligned_def unat_eq_0 and_mask_eq_iff_shiftr_0[symmetric] + word_bw_assocs) + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range, + fastforce simp add: cte_wp_at_ctes_of, assumption, simp_all) + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply clarsimp + done + +crunches performVSpaceInvocation, performARMVCPUInvocation + for tcb_at'[wp]: "\s. tcb_at' p s" + +lemma invokeArch_tcb_at': + "\invs' and valid_arch_inv' ai and ct_active' and st_tcb_at' active' p\ + Arch.performInvocation ai + \\rv. tcb_at' p\" + apply (simp add: AARCH64_H.performInvocation_def performARMMMUInvocation_def) + apply (wpsimp simp: performARMMMUInvocation_def pred_tcb_at' valid_arch_inv'_def + wp: performASIDControlInvocation_tcb_at') + done + +crunch pspace_no_overlap'[wp]: setThreadState "pspace_no_overlap' w s" + (simp: unless_def) + +lemma sts_cte_cap_to'[wp]: + "\ex_cte_cap_to' p\ setThreadState st t \\rv. ex_cte_cap_to' p\" + by (wp ex_cte_cap_to'_pres) + + +lemma sts_valid_arch_inv': (* FIXME AARCH64 cleanup *) + "\valid_arch_inv' ai\ setThreadState st t \\rv. valid_arch_inv' ai\" + apply (cases ai, simp_all add: valid_arch_inv'_def) + apply (clarsimp simp: valid_vsi'_def split: vspace_invocation.splits) + apply (rule conjI|clarsimp|wpsimp)+ + apply (clarsimp simp: valid_pti'_def split: page_table_invocation.splits) + apply (rule conjI|clarsimp|wpsimp)+ + apply (rename_tac page_invocation) + apply (case_tac page_invocation, simp_all add: valid_page_inv'_def)[1] + apply ((wp|simp)+)[2] + apply (clarsimp simp: isCap_simps pred_conj_def) + apply wpsimp + apply wpsimp + apply (clarsimp simp: valid_aci'_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_pre, wp) + apply clarsimp + apply (clarsimp simp: valid_apinv'_def split: asidpool_invocation.splits) + apply (rule hoare_pre, wp) + apply simp + apply (rename_tac vcpui) + apply (case_tac vcpui; wpsimp simp: valid_vcpuinv'_def) + done + +lemma inv_ASIDPool: + "inv ASIDPool = (\v. case v of ASIDPool a \ a)" + by (rule ext) + (simp split: asidpool.splits) + +lemma eq_arch_update': + "ArchObjectCap cp = cteCap cte \ is_arch_update' (ArchObjectCap cp) cte" + by (clarsimp simp: is_arch_update'_def isCap_simps) + +lemma decodeARMFrameInvocationFlush_valid_arch_inv'[wp]: + "\\\ + decodeARMFrameInvocationFlush label args (FrameCap word vmrights vmpage_size d option) + \valid_arch_inv'\, -" + unfolding decodeARMFrameInvocationFlush_def + by (wpsimp simp: valid_arch_inv'_def valid_page_inv'_def cong: if_cong) + +lemma decodeARMFrameInvocationMap_valid_arch_inv'[wp]: + "\invs' and valid_cap' (ArchObjectCap (FrameCap word vmrights vmpage_size d option)) and + cte_wp_at' ((=) (ArchObjectCap (FrameCap word vmrights vmpage_size d option)) \ cteCap) slot and + valid_cap' vspaceCap\ + decodeARMFrameInvocationMap slot (FrameCap word vmrights vmpage_size d option) + vptr rightsMask attr vspaceCap + \valid_arch_inv'\, -" + unfolding valid_arch_inv'_def decodeARMFrameInvocationMap_def + supply checkVPAlignment_inv[wp del] checkVP_wpR[wp] + apply (wpsimp wp: lookupPTSlot_inv getASID_wp + simp: checkVSpaceRoot_def if_apply_def2 valid_page_inv'_def valid_cap'_def + capAligned_def + split_del: if_split cong: if_cong + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac t="cteCap cte" in sym) + apply (clarsimp simp: valid_cap'_def wellformed_mapdata'_def is_arch_update'_def capAligned_def + isCap_simps not_less) + apply (fastforce simp: wellformed_mapdata'_def vmsz_aligned_user_region user_vtop_def mask_def) + done + +lemma decode_page_inv_wf[wp]: + "cap = (arch_capability.FrameCap word vmrights vmpage_size d option) \ + \invs' and valid_cap' (capability.ArchObjectCap cap ) and + cte_wp_at' ((=) (capability.ArchObjectCap cap) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + sch_act_simple\ + decodeARMFrameInvocation label args slot cap excaps + \valid_arch_inv'\, -" + apply (simp add: decodeARMFrameInvocation_def Let_def isCap_simps + cong: if_cong split del: if_split) + apply (wpsimp simp: valid_arch_inv'_def valid_page_inv'_def) + apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of is_arch_update'_def) + apply (drule_tac t="cteCap _" in sym)+ + apply clarsimp + apply (drule ctes_of_valid', fastforce)+ + apply clarsimp + done + +lemma below_pptrUserTop_in_user_region: + "p \ pptrUserTop \ p \ user_region" + apply (simp add: user_region_def canonical_user_def pptrUserTop_def) + apply (simp add: bit_simps is_aligned_mask) + done + +lemma checkVSpaceRoot_wp[wp]: + "\\s. \vspace asid x. cap = ArchObjectCap (PageTableCap vspace VSRootPT_T (Some (asid, x))) \ + Q (vspace, asid) s\ + checkVSpaceRoot cap n + \Q\, -" + unfolding checkVSpaceRoot_def + by wpsimp + +lemma decode_page_table_inv_wf[wp]: + "arch_cap = PageTableCap word pt_t option \ + \invs' and valid_cap' (capability.ArchObjectCap arch_cap) and + cte_wp_at' ((=) (capability.ArchObjectCap arch_cap) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + sch_act_simple\ + decodeARMPageTableInvocation label args slot arch_cap excaps + \valid_arch_inv'\, - " + supply if_cong[cong] if_split [split del] + apply (clarsimp simp: decodeARMPageTableInvocation_def Let_def isCap_simps) + apply (wpsimp simp: decodeARMPageTableInvocationMap_def valid_arch_inv'_def valid_pti'_def + maybeVSpaceForASID_def o_def if_apply_def2 + wp: getPTE_wp hoare_vcg_all_lift hoare_vcg_const_imp_lift + lookupPTSlot_inv isFinalCapability_inv + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: not_le isCap_simps cte_wp_at_ctes_of eq_arch_update') + apply (drule_tac t="cteCap cte" in sym) + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: is_arch_update'_def isCap_simps + split: if_split) + apply (drule_tac t="cteCap ctea" in sym) + apply (drule ctes_of_valid', fastforce)+ + apply (clarsimp simp: valid_cap'_def) + apply (simp add: wellformed_mapdata'_def below_pptrUserTop_in_user_region neg_mask_user_region) + done + +lemma capMaster_isPageTableCap: + "capMasterCap cap' = capMasterCap cap \ + isArchCap isPageTableCap cap' = isArchCap isPageTableCap cap" + by (simp add: capMasterCap_def isArchCap_def isPageTableCap_def + split: capability.splits arch_capability.splits) + +lemma decodeARMVCPUInvocation_valid_arch_inv'[wp]: + "\invs' and valid_cap' (ArchObjectCap (VCPUCap vcpu)) and + cte_wp_at' ((=) (ArchObjectCap (VCPUCap vcpu)) \ cteCap) slot and + (\s. \x\set excaps. cte_wp_at' ((=) (fst x) \ cteCap) (snd x) s) and + (\s. \x\set excaps. \r\cte_refs' (fst x) (irq_node' s). ex_cte_cap_wp_to' (\_. True) r s) and + (\s. \x\set excaps. valid_cap' (fst x) s) and + sch_act_simple\ + decodeARMVCPUInvocation label args cap_index slot (VCPUCap vcpu) excaps + \valid_arch_inv'\, -" + unfolding decodeARMVCPUInvocation_def + apply (wpsimp simp: decodeVCPUSetTCB_def decodeVCPUInjectIRQ_def Let_def decodeVCPUReadReg_def + decodeVCPUWriteReg_def decodeVCPUAckVPPI_def + wp: getVCPU_wp + split_del: if_split) + apply (clarsimp simp: valid_arch_inv'_def valid_vcpuinv'_def isCap_simps null_def neq_Nil_conv) + apply (rename_tac t_slot excaps0 t) + apply (rule conjI) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule conjI) + apply (drule_tac t="cteCap cte" for cte in sym) + apply fastforce + apply (rename_tac tcb_cte) + apply (drule_tac t="cteCap tcb_cte" in sym) + apply clarsimp + apply (rule_tac x=t_slot in exI) + apply fastforce + done + +lemma decodeARMVSpaceInvocation_valid_arch_inv'[wp]: + "\\\ + decodeARMVSpaceInvocation label args (PageTableCap vspace VSRootPT_T map_data) + \valid_arch_inv'\, -" + unfolding decodeARMVSpaceInvocation_def + by (wpsimp simp: Let_def valid_arch_inv'_def valid_vsi'_def + cong: if_cong + split_del: if_split) + +lemma arch_decodeInvocation_wf[wp]: + shows "\invs' and valid_cap' (ArchObjectCap arch_cap) and + cte_wp_at' ((=) (ArchObjectCap arch_cap) o cteCap) slot and + (\s. \x \ set excaps. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s) and + (\s. \x \ set excaps. \r \ cte_refs' (fst x) (irq_node' s). ex_cte_cap_to' r s) and + (\s. \x \ set excaps. s \' fst x) and + sch_act_simple\ + Arch.decodeInvocation label args cap_index slot arch_cap excaps + \valid_arch_inv'\,-" + apply (cases arch_cap) + apply (simp add: decodeARMMMUInvocation_def AARCH64_H.decodeInvocation_def + Let_def split_def isCap_simps decodeARMASIDControlInvocation_def + cong: if_cong invocation_label.case_cong arch_invocation_label.case_cong list.case_cong prod.case_cong + split del: if_split) + apply (rule hoare_pre) + apply ((wp whenE_throwError_wp ensureEmptySlot_stronger| + wpc| + simp add: valid_arch_inv'_def valid_aci'_def is_aligned_shiftl_self + split del: if_split)+)[1] + apply (rule_tac Q'= + "\rv. K (fst (hd [p\assocs asidTable . fst p \ 2 ^ asid_high_bits - 1 \ snd p = None]) + << asid_low_bits \ 2 ^ asid_bits - 1) and + real_cte_at' rv and + ex_cte_cap_to' rv and + cte_wp_at' (\cte. \idx. cteCap cte = (UntypedCap False frame pageBits idx)) (snd (excaps!0)) and + sch_act_simple and + (\s. descendants_of' (snd (excaps!0)) (ctes_of s) = {}) " + in hoare_post_imp_R) + apply (simp add: lookupTargetSlot_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of asid_wf_def mask_def) + apply (simp split del: if_split) + apply (wp ensureNoChildren_sp whenE_throwError_wp|wpc)+ + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: null_def neq_Nil_conv) + apply (drule filter_eq_ConsD) + apply clarsimp + apply (rule shiftl_less_t2n) + apply (simp add: asid_bits_def asid_low_bits_def asid_high_bits_def) + apply unat_arith + apply (simp add: asid_bits_def) + apply clarsimp + apply (rule conjI, fastforce) + apply (clarsimp simp: cte_wp_at_ctes_of objBits_simps) + + \ \ASIDPool cap\ + apply (simp add: decodeARMMMUInvocation_def AARCH64_H.decodeInvocation_def + Let_def split_def isCap_simps decodeARMASIDPoolInvocation_def + cong: if_cong split del: if_split) + apply (wpsimp simp: valid_arch_inv'_def valid_apinv'_def wp: getASID_wp cong: if_cong) + apply (clarsimp simp: word_neq_0_conv valid_cap'_def valid_arch_inv'_def valid_apinv'_def) + apply (rule conjI) + apply (erule cte_wp_at_weakenE') + apply (simp, drule_tac t="cteCap c" in sym, simp add: isCap_simps) + apply (subst (asm) conj_assoc [symmetric]) + apply (subst (asm) assocs_empty_dom_comp [symmetric]) + apply (drule dom_hd_assocsD) + apply (simp add: capAligned_def asid_wf_def mask_def) + apply (elim conjE) + apply (subst field_simps, erule is_aligned_add_less_t2n) + apply assumption + apply (simp add: asid_low_bits_def asid_bits_def) + apply assumption + + \ \PageCap\ + apply (simp add: decodeARMMMUInvocation_def isCap_simps AARCH64_H.decodeInvocation_def + cong: if_cong split del: if_split) + apply (wp decode_page_inv_wf, rule refl) + apply clarsimp + + \ \PageTableCap\ + apply (simp add: decodeARMMMUInvocation_def isCap_simps AARCH64_H.decodeInvocation_def + cong: if_cong split del: if_split) + apply (rename_tac pt_t map_data) + apply (case_tac pt_t; clarsimp) + apply wp + apply (wp decode_page_table_inv_wf, rule refl) + apply clarsimp + + \ \VCPUCap\ + apply (clarsimp simp: AARCH64_H.decodeInvocation_def) + apply wp + done + +crunch nosch[wp]: setMRs "\s. P (ksSchedulerAction s)" + (ignore: getRestartPC setRegister transferCapsToSlots + wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +crunch nosch [wp]: performARMMMUInvocation "\s. P (ksSchedulerAction s)" + (simp: crunch_simps + wp: crunch_wps getObject_cte_inv getASID_wp) + +lemmas setObject_cte_st_tcb_at' [wp] = setCTE_pred_tcb_at' [unfolded setCTE_def] + +crunch st_tcb_at': performPageTableInvocation, + performPageInvocation, + performASIDPoolInvocation "st_tcb_at' P t" + (wp: crunch_wps getASID_wp getObject_cte_inv simp: crunch_simps pteAtIndex_def) + +lemma performASIDControlInvocation_st_tcb_at': + "\st_tcb_at' (P and (\) Inactive and (\) IdleThreadState) t and + valid_aci' aci and invs' and ct_active'\ + performASIDControlInvocation aci + \\y. st_tcb_at' P t\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) + apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) + apply (rule hoare_pre) + apply (wp createObjects_orig_obj_at'[where P="P \ tcbState", folded st_tcb_at'_def] + updateFreeIndex_pspace_no_overlap' getSlotCap_wp + hoare_vcg_ex_lift + deleteObjects_cte_wp_at' deleteObjects_invs_derivatives + deleteObjects_st_tcb_at' + hoare_weak_lift_imp + | simp add: placeNewObject_def2)+ + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs')+ + apply (clarsimp simp:valid_cap'_def capAligned_def cte_wp_at_ctes_of) + apply (rule conjI) + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + subgoal by (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + subgoal by fastforce + apply simp + apply (rule conjI,assumption) + apply (clarsimp simp:invs_valid_pspace' objBits_simps range_cover_full descendants_range'_def2 + isCap_simps) + apply (intro conjI) + apply (fastforce simp:empty_descendants_range_in')+ + apply clarsimp + apply (drule (1) cte_cap_in_untyped_range) + apply (fastforce simp add: cte_wp_at_ctes_of) + apply assumption+ + apply (clarsimp simp: invs'_def valid_state'_def if_unsafe_then_cap'_def cte_wp_at_ctes_of) + apply fastforce + apply simp + apply auto + done + +lemmas arch_finalise_cap_aligned' = ArchRetypeDecls_H_AARCH64_H_finaliseCap_aligned' + +lemmas arch_finalise_cap_distinct' = ArchRetypeDecls_H_AARCH64_H_finaliseCap_distinct' + +crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" + (wp: crunch_wps getASID_wp simp: crunch_simps) + +lemma archThreadSet_ex_nonz_cap_to'[wp]: + "archThreadSet f t \ex_nonz_cap_to' v\" + unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of by wp + +lemma assoc_invs': + "\invs' and + ko_at' (vcpu\vcpuTCBPtr:= None\) v and + obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t and + ex_nonz_cap_to' v and ex_nonz_cap_to' t\ + do y \ archThreadSet (atcbVCPUPtr_update (\_. Some v)) t; + setObject v (vcpuTCBPtr_update (\_. Some t) vcpu) + od + \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_tcb_valid_objs setObject_vcpu_valid_objs' + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def + | wp (once) hoare_vcg_imp_lift)+ + apply (rule conjI) + apply (clarsimp simp: typ_at_to_obj_at_arches obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: typ_at_tcb' obj_at'_def) + apply (rule_tac rfs'="state_hyp_refs_of' s" in delta_sym_refs, assumption) + supply fun_upd_apply[simp] + apply (clarsimp simp: hyp_live'_def arch_live'_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def projectKOs tcb_vcpu_refs'_def + split: option.splits if_split_asm) + apply (clarsimp simp: hyp_live'_def arch_live'_def) + done + +lemma asUser_obj_at_vcpu[wp]: + "\obj_at' (P :: vcpu \ bool) t\ + asUser t' f + \\rv. obj_at' P t\" + apply (simp add: asUser_def threadGet_stateAssert_gets_asUser) + apply (wpsimp wp: threadSet_ko_wp_at2' simp: obj_at'_real_def) + done + +lemma archThreadSet_obj_at'_vcpu[wp]: + "archThreadSet f t \obj_at' (P::vcpu \ bool) p\" + unfolding archThreadSet_def + by (wpsimp wp: obj_at_setObject2 simp: updateObject_default_def in_monad) + +lemma asUser_atcbVCPUPtr[wp]: + "asUser t' f \obj_at' (\t. P (atcbVCPUPtr (tcbArch t))) t\" + unfolding asUser_def threadGet_stateAssert_gets_asUser + by (wpsimp simp: asUser_fetch_def obj_at'_def projectKOs atcbContextGet_def atcbContextSet_def) + +lemma dissociateVCPUTCB_no_vcpu[wp]: + "\\s. t \ t' \ obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t s\ + dissociateVCPUTCB vcpu t' \\rv. obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) t\" + unfolding dissociateVCPUTCB_def + by (wpsimp wp: getVCPU_wp setObject_tcb_strongest simp: archThreadSet_def archThreadGet_def) + +lemma dissociateVCPUTCB_no_tcb[wp]: + "\ko_at' v vcpu\ dissociateVCPUTCB vcpu tcb \\rv. ko_at' (vcpuTCBPtr_update Map.empty v) vcpu\" + unfolding dissociateVCPUTCB_def + apply (wpsimp wp: obj_at_setObject3 getVCPU_wp + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def archThreadGet_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma dissociateVCPUTCB_ex_nonz_cap_to'[wp]: + "dissociateVCPUTCB v' t \ex_nonz_cap_to' v\" + unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of by wp + +lemma vcpuTCBPtr_update_Some_vcpu_live[wp]: + "\if vcpuPtr = vcpuPtr' + then ko_wp_at' is_vcpu' vcpuPtr + else ko_wp_at' (is_vcpu' and hyp_live') vcpuPtr\ + setObject vcpuPtr' (vcpuTCBPtr_update (\_. Some tcbPtr) vcpu) + \\_. ko_wp_at' (is_vcpu' and hyp_live') vcpuPtr\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps archObjSize_def) + apply (clarsimp simp: vcpuBits_def pageBits_def) + by (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def hyp_live'_def + arch_live'_def + split: if_splits) + +lemma vcpuTCBPtr_update_Some_valid_arch_state'[wp]: + "setObject vcpuPtr (vcpuTCBPtr_update (\_. Some tptr) vcpu) \valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift + | rule hoare_lift_Pf[where f=ksArchState]) + by (auto simp: pred_conj_def o_def ko_wp_at'_def) + +definition associateVCPUTCB_helper where + "associateVCPUTCB_helper vcpu v t = do + y \ archThreadSet (atcbVCPUPtr_update (\_. Some v)) t; + setObject v (vcpuTCBPtr_update (\_. Some t) vcpu) + od" + +lemma associateVCPUTCB_invs'[wp]: + "\invs' and ex_nonz_cap_to' vcpu and ex_nonz_cap_to' tcb and vcpu_at' vcpu\ + associateVCPUTCB vcpu tcb + \\_. invs'\" + apply (clarsimp simp: associateVCPUTCB_def) + apply (subst bind_assoc[symmetric], fold associateVCPUTCB_helper_def) + apply wpsimp + apply (rule_tac Q="\_ s. invs' s \ ko_wp_at' (is_vcpu' and hyp_live') vcpu s" in hoare_post_imp) + apply simp + apply (rule hoare_vcg_conj_lift) + apply (wpsimp wp: assoc_invs'[folded associateVCPUTCB_helper_def]) + apply (clarsimp simp: associateVCPUTCB_helper_def) + apply (wpsimp simp: vcpu_at_is_vcpu'[symmetric])+ + apply (wpsimp wp: getVCPU_wp) + apply (rule_tac Q="\_. invs' and obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = None) tcb and + ex_nonz_cap_to' vcpu and ex_nonz_cap_to' tcb and vcpu_at' vcpu" + in hoare_strengthen_post) + apply wpsimp + apply (clarsimp simp: obj_at'_def) + apply (rename_tac v obj) + apply (case_tac v, simp) + apply (wpsimp wp: getObject_tcb_wp simp: archThreadGet_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma invokeVCPUInjectIRQ_invs'[wp]: + "invokeVCPUInjectIRQ v ir idx \invs'\" + unfolding invokeVCPUInjectIRQ_def + apply (wpsimp wp: dmo_invs' + simp: set_gic_vcpu_ctrl_lr_def machine_op_lift_def machine_rest_lift_def) + apply (clarsimp simp: in_monad select_f_def) + done + +lemma invokeVCPUAckVPPI_invs'[wp]: + "invokeVCPUAckVPPI vcpu_ptr irq \invs'\" + unfolding invokeVCPUAckVPPI_def + by (wpsimp wp: dmo_invs' setVCPU_VPPIMasked_invs' + simp: set_gic_vcpu_ctrl_lr_def machine_op_lift_def machine_rest_lift_def vcpuUpdate_def) + +lemma invokeVCPUReadReg_inv[wp]: + "invokeVCPUReadReg vcpu r \P\" + unfolding invokeVCPUReadReg_def readVCPUReg_def vcpuReadReg_def + by (wpsimp wp: dmo_inv' simp: readVCPUHardwareReg_def getSCTLR_def) + +lemma invokeVCPUWriteReg_invs'[wp]: + "invokeVCPUWriteReg vcpu r v \invs'\" + unfolding invokeVCPUWriteReg_def writeVCPUReg_def vcpuWriteReg_def vcpuUpdate_def + by (wpsimp wp: dmo_machine_op_lift_invs' setVCPU_regs_invs') + +lemma performARMVCPUInvocation_invs'[wp]: + "\invs' and valid_vcpuinv' i\ performARMVCPUInvocation i \\_. invs'\" + unfolding performARMVCPUInvocation_def valid_vcpuinv'_def by wpsimp + + +lemma invs_asid_table_strengthen': + "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ + invs' (s\ksArchState := + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" + apply (clarsimp simp: invs'_def valid_state'_def) + apply (rule conjI) + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (clarsimp simp: valid_arch_state'_def) + apply (clarsimp simp: valid_asid_table'_def ran_def mask_def) + apply (rule conjI) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: mask_def) + apply (rule conjI) + apply (clarsimp simp: valid_pspace'_def) + apply (simp add: valid_machine_state'_def split: option.splits prod.splits) + done + +lemma ex_cte_not_in_untyped_range: + "\(ctes_of s) cref = Some (CTE (capability.UntypedCap d ptr bits idx) mnode); + descendants_of' cref (ctes_of s) = {}; invs' s; + ex_cte_cap_wp_to' (\_. True) x s; valid_global_refs' s\ + \ x \ mask_range ptr bits" + apply clarsimp + apply (drule(1) cte_cap_in_untyped_range) + apply (fastforce simp:cte_wp_at_ctes_of)+ + done + +lemma makeObject_ASIDPool_not_live[simp]: + "\ (live' (KOArch (KOASIDPool makeObject)))" + by (simp add: makeObject_asidpool live'_def hyp_live'_def arch_live'_def) + +lemma performASIDControlInvocation_invs' [wp]: + "\invs' and ct_active' and valid_aci' aci\ + performASIDControlInvocation aci + \\y. invs'\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: performASIDControlInvocation_def valid_aci'_def + placeNewObject_def2 cte_wp_at_ctes_of + split: asidcontrol_invocation.splits) + apply (rename_tac w1 w2 w3 w4 cte ctea idx) + apply (case_tac ctea) + apply (clarsimp) + apply (frule ctes_of_valid_cap') + apply fastforce + apply (rule hoare_pre) + apply (wp hoare_vcg_const_imp_lift) + apply (strengthen invs_asid_table_strengthen') + apply (wp cteInsert_simple_invs) + apply (wp createObjects'_wp_subst[OF + createObjects_no_cte_invs[where sz = pageBits and ty="Inl (KOArch (KOASIDPool pool))" for pool]] + createObjects_orig_cte_wp_at'[where sz = pageBits] hoare_vcg_const_imp_lift + |simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def cong: rev_conj_cong + |strengthen safe_parent_strg'[where idx= "2^ pageBits"])+ + apply (rule hoare_vcg_conj_lift) + apply (rule descendants_of'_helper) + apply (wp createObjects_null_filter' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap] + createObjects_valid_pspace' + [where sz = pageBits and ty="Inl (KOArch (KOASIDPool ap))" for ap] + | simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def + cong: rev_conj_cong)+ + apply (simp add: objBits_simps valid_cap'_def capAligned_def range_cover_full) + apply (wp createObjects'_wp_subst[OF createObjects_ex_cte_cap_to[where sz = pageBits]] + createObjects_orig_cte_wp_at'[where sz = pageBits] + hoare_vcg_const_imp_lift + |simp add: makeObjectKO_def asid_pool_typ_at_ext' valid_cap'_def + isCap_simps + cong: rev_conj_cong + |strengthen safe_parent_strg'[where idx = "2^ pageBits"] + | simp add: bit_simps)+ + apply (simp add:asid_pool_typ_at_ext'[symmetric]) + apply (wp createObject_typ_at') + apply (simp add: objBits_simps valid_cap'_def + capAligned_def range_cover_full makeObjectKO_def + asid_pool_typ_at_ext' + cong: rev_conj_cong) + apply (clarsimp simp:conj_comms + descendants_of_null_filter' + | strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_pspace_aligned' invs_valid_pspace')+ + apply (wp updateFreeIndex_forward_invs' + updateFreeIndex_cte_wp_at + updateFreeIndex_pspace_no_overlap' + updateFreeIndex_caps_no_overlap'' + updateFreeIndex_descendants_of2 + updateFreeIndex_caps_overlap_reserved + updateCap_cte_wp_at_cases hoare_weak_lift_imp + getSlotCap_wp)+ + apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask + | strengthen invs_valid_pspace' invs_pspace_aligned' + invs_pspace_distinct' empty_descendants_range_in')+ + apply (wp deleteObjects_invs'[where p="makePoolParent aci"] + hoare_vcg_ex_lift + deleteObjects_caps_no_overlap''[where slot="makePoolParent aci"] + deleteObject_no_overlap + deleteObjects_cap_to'[where p="makePoolParent aci"] + deleteObjects_ct_active'[where cref="makePoolParent aci"] + deleteObjects_descendants[where p="makePoolParent aci"] + deleteObjects_cte_wp_at' + deleteObjects_null_filter[where p="makePoolParent aci"]) + apply (frule valid_capAligned) + apply (clarsimp simp: invs_mdb' invs_valid_pspace' capAligned_def + cte_wp_at_ctes_of is_simple_cap'_def isCap_simps) + apply (strengthen refl ctes_of_valid_cap'[mk_strg I E]) + apply (clarsimp simp: conj_comms invs_valid_objs') + apply (frule_tac ptr="w1" in descendants_range_caps_no_overlapI'[where sz = pageBits]) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (simp add:empty_descendants_range_in') + apply (frule(1) if_unsafe_then_capD'[OF _ invs_unsafe_then_cap',rotated]) + apply (fastforce simp:cte_wp_at_ctes_of) + apply (drule ex_cte_not_in_untyped_range[rotated -2]) + apply (simp add:invs_valid_global')+ + apply (drule ex_cte_not_in_untyped_range[rotated -2]) + apply (simp add:invs_valid_global')+ + apply (subgoal_tac "is_aligned (2 ^ pageBits) minUntypedSizeBits") + prefer 2 + apply (rule is_aligned_weaken) + apply (rule is_aligned_shiftl_self[unfolded shiftl_t2n,where p = 1, simplified]) + apply (simp add: pageBits_def untypedBits_defs) + apply (frule_tac cte="CTE (capability.UntypedCap False a b c) m" for a b c m in valid_global_refsD', clarsimp) + apply (simp add: Int_commute) + by (auto simp:empty_descendants_range_in' objBits_simps max_free_index_def + asid_low_bits_def word_bits_def + range_cover_full descendants_range'_def2 is_aligned_mask + null_filter_descendants_of'[OF null_filter_simp'] bit_simps + valid_cap_simps' mask_def) + +lemma performVSpaceInvocation_invs[wp]: + "performVSpaceInvocation vspace \invs'\" + unfolding performVSpaceInvocation_def + by wpsimp + +lemma arch_performInvocation_invs': + "\invs' and ct_active' and valid_arch_inv' invocation\ + Arch.performInvocation invocation + \\rv. invs'\" + unfolding AARCH64_H.performInvocation_def + apply (cases invocation; clarsimp simp: performARMMMUInvocation_def valid_arch_inv'_def) + apply wpsimp+ + done + +end + +end diff --git a/proof/refine/AARCH64/Bits_R.thy b/proof/refine/AARCH64/Bits_R.thy new file mode 100644 index 0000000000..a548deb670 --- /dev/null +++ b/proof/refine/AARCH64/Bits_R.thy @@ -0,0 +1,460 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Bits_R +imports Corres +begin + +crunch_ignore (add: + bind withoutFailure throw catchFailure rethrowFailure capFaultOnFailure lookupErrorOnFailure + nullCapOnFailure nothingOnFailure withoutPreemption preemptionPoint maskInterrupt unifyFailure + ignoreFailure emptyOnFailure clearMemoryVM assertDerived + setObject getObject updateObject loadObject) + +context Arch +begin + +crunch_ignore (add: lookupPTSlotFromLevel lookupPTFromLevel) + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma throwE_R: "\\\ throw f \P\,-" + by (simp add: validE_R_def) wp + +lemma withoutFailure_wp [wp]: + "\P\ f \Q\ \ \P\ withoutFailure f \Q\,\E\" + "\P\ f \Q\ \ \P\ withoutFailure f \Q\,-" + "\\\ withoutFailure f -,\E\" + by (auto simp: validE_R_def validE_E_def valid_def) + +lemma no_fail_typeError [simp, wp]: + "no_fail \ (typeError xs ko)" + by (simp add: typeError_def) + +lemma isCap_simps: + "isZombie v = (\v0 v1 v2. v = Zombie v0 v1 v2)" + "isArchObjectCap v = (\v0. v = ArchObjectCap v0)" + "isThreadCap v = (\v0. v = ThreadCap v0)" + "isCNodeCap v = (\v0 v1 v2 v3. v = CNodeCap v0 v1 v2 v3)" + "isNotificationCap v = (\v0 v1 v2 v3. v = NotificationCap v0 v1 v2 v3)" + "isEndpointCap v = (\v0 v1 v2 v3 v4 v5. v = EndpointCap v0 v1 v2 v3 v4 v5)" + "isUntypedCap v = (\d v0 v1 f. v = UntypedCap d v0 v1 f)" + "isReplyCap v = (\v0 v1 v2. v = ReplyCap v0 v1 v2)" + "isIRQControlCap v = (v = IRQControlCap)" + "isIRQHandlerCap v = (\v0. v = IRQHandlerCap v0)" + "isNullCap v = (v = NullCap)" + "isDomainCap v = (v = DomainCap)" + "isFrameCap w = (\v0 v1 v2 v3 v4. w = FrameCap v0 v1 v2 v3 v4)" + "isArchFrameCap v = (\v0 v1 v2 v3 v4. v = ArchObjectCap (FrameCap v0 v1 v2 v3 v4))" + "isPageTableCap w = (\v0 v1 v2. w = PageTableCap v0 v1 v2)" + "isASIDControlCap w = (w = ASIDControlCap)" + "isASIDPoolCap w = (\v0 v1. w = ASIDPoolCap v0 v1)" + "isVCPUCap w = (\v. w = VCPUCap v)" + by (auto simp: isCap_defs split: capability.splits arch_capability.splits) + +lemma untyped_not_null [simp]: + "\ isUntypedCap NullCap" by (simp add: isCap_simps) + +text \Miscellaneous facts about low level constructs\ + +lemma projectKO_tcb: + "(projectKO_opt ko = Some t) = (ko = KOTCB t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_cte: + "(projectKO_opt ko = Some t) = (ko = KOCTE t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ep: + "(projectKO_opt ko = Some t) = (ko = KOEndpoint t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ntfn: + "(projectKO_opt ko = Some t) = (ko = KONotification t)" + by (cases ko) (auto simp: projectKO_opts_defs) + +lemma projectKO_ASID: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOASIDPool t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_PTE: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOPTE t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_user_data: + "(projectKO_opt ko = Some (t :: user_data)) = (ko = KOUserData)" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_user_data_device: + "(projectKO_opt ko = Some (t :: user_data_device)) = (ko = KOUserDataDevice)" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemma projectKO_VCPU: + "(projectKO_opt ko = Some t) = (ko = KOArch (KOVCPU t))" + by (cases ko) + (auto simp: projectKO_opts_defs split: arch_kernel_object.splits) + +lemmas projectKOs[simp] = + projectKO_ntfn projectKO_ep projectKO_cte projectKO_tcb + projectKO_ASID projectKO_PTE projectKO_user_data projectKO_user_data_device projectKO_VCPU + projectKO_eq projectKO_eq2 + +lemma capAligned_epI: + "ep_at' p s \ capAligned (EndpointCap p a b c d e)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def) + apply (drule ko_wp_at_norm) + apply clarsimp + apply (drule ko_wp_at_aligned) + apply (simp add: objBits_simps capUntypedPtr_def isCap_simps objBits_defs) + done + +lemma capAligned_ntfnI: + "ntfn_at' p s \ capAligned (NotificationCap p a b c)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma capAligned_tcbI: + "tcb_at' p s \ capAligned (ThreadCap p)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma capAligned_reply_tcbI: + "tcb_at' p s \ capAligned (ReplyCap p m r)" + apply (clarsimp simp: obj_at'_real_def capAligned_def + objBits_simps word_bits_def capUntypedPtr_def isCap_simps) + apply (fastforce dest: ko_wp_at_norm + dest!: ko_wp_at_aligned simp: objBits_simps') + done + +lemma ko_at_valid_objs': + assumes ko: "ko_at' k p s" + assumes vo: "valid_objs' s" + assumes k: "\ko. projectKO_opt ko = Some k \ injectKO k = ko" + shows "valid_obj' (injectKO k) s" using ko vo + by (clarsimp simp: valid_objs'_def obj_at'_def project_inject ranI) + +lemma obj_at_valid_objs': + "\ obj_at' P p s; valid_objs' s \ \ + \k. P k \ + ((\ko. projectKO_opt ko = Some k \ injectKO k = ko) + \ valid_obj' (injectKO k) s)" + apply (drule obj_at_ko_at') + apply clarsimp + apply (rule_tac x=ko in exI) + apply clarsimp + apply (erule (1) ko_at_valid_objs') + apply simp + done + +lemma tcb_in_valid_state': + "\ st_tcb_at' P t s; valid_objs' s \ \ \st. P st \ valid_tcb_state' st s" + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_valid_objs') + apply fastforce + apply (fastforce simp add: valid_obj'_def valid_tcb'_def) + done + +lemma getCurThread_corres: "corres (=) \ \ (gets cur_thread) getCurThread" + by (simp add: getCurThread_def curthread_relation) + +lemma gct_wp [wp]: "\\s. P (ksCurThread s) s\ getCurThread \P\" + by (unfold getCurThread_def, wp) + +lemma getIdleThread_corres: + "corres (=) \ \ (gets idle_thread) getIdleThread" + by (simp add: getIdleThread_def state_relation_def) + +lemma git_wp [wp]: "\\s. P (ksIdleThread s) s\ getIdleThread \P\" + by (unfold getIdleThread_def, wp) + +lemma gsa_wp [wp]: "\\s. P (ksSchedulerAction s) s\ getSchedulerAction \P\" + by (unfold getSchedulerAction_def, wp) + +text \Shorthand names for the relations between faults, errors and failures\ + +definition + fr :: "ExceptionTypes_A.fault \ Fault_H.fault \ bool" +where + fr_def[simp]: + "fr x y \ (y = fault_map x)" + +definition + ser :: "ExceptionTypes_A.syscall_error \ Fault_H.syscall_error \ bool" +where + ser_def[simp]: + "ser x y \ (y = syscall_error_map x)" + +definition + lfr :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure \ bool" +where + lfr_def[simp]: + "lfr x y \ (y = lookup_failure_map x)" + +text \Correspondence and weakest precondition + rules for the "on failure" transformers\ + +lemma corres_injection: + assumes x: "t = injection_handler fn" + assumes y: "t' = injection_handler fn'" + assumes z: "\ft ft'. f' ft ft' \ f (fn ft) (fn' ft')" + shows "corres (f' \ r) P P' m m' + \ corres (f \ r) P P' (t m) (t' m')" + apply (simp add: injection_handler_def handleE'_def x y) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply assumption + apply (case_tac v, (clarsimp simp: z)+) + apply (rule wp_post_taut) + apply (rule wp_post_taut) + apply simp + apply simp + done + +lemma rethrowFailure_injection: + "rethrowFailure = injection_handler" + by (intro ext, simp add: rethrowFailure_def injection_handler_def o_def) + +lemma capFault_injection: + "capFaultOnFailure addr b = injection_handler (Fault_H.CapFault addr b)" + apply (rule ext) + apply (simp add: capFaultOnFailure_def rethrowFailure_injection) + done + +lemma lookupError_injection: + "lookupErrorOnFailure b = injection_handler (Fault_H.FailedLookup b)" + apply (rule ext) + apply (simp add: lookupErrorOnFailure_def rethrowFailure_injection) + done + +lemma corres_cap_fault: + "corres (lfr \ r) P P' f g \ + corres (fr \ r) P P' (cap_fault_on_failure addr b f) + (capFaultOnFailure addr b g)" + by (fastforce intro: corres_injection[where f'=lfr] + simp: cap_fault_injection capFault_injection) + +lemmas capFault_wp[wp] = injection_wp[OF capFault_injection] +lemmas capFault_wp_E[wp] = injection_wp_E[OF capFault_injection] + +lemmas capFault_bindE = injection_bindE[OF capFault_injection capFault_injection] + +lemmas capFault_liftE[simp] = injection_liftE[OF capFault_injection] + +lemma corres_lookup_error: + "\ corres (lfr \ r) P P' f g \ + \ corres (ser \ r) P P' (lookup_error_on_failure b f) (lookupErrorOnFailure b g)" + by (fastforce intro: corres_injection[where f'=lfr] + simp: lookup_error_injection lookupError_injection) + +lemmas lookupError_wp[wp] = injection_wp[OF lookupError_injection] +lemmas lookupError_wp_E[wp] = injection_wp_E[OF lookupError_injection] + +lemmas lookupError_bindE = injection_bindE[OF lookupError_injection lookupError_injection] + +lemmas lookupError_liftE[simp] = injection_liftE[OF lookupError_injection] + + +lemma unifyFailure_injection: + "unifyFailure = injection_handler (\x. ())" + by (rule ext, + simp add: unifyFailure_def injection_handler_def + rethrowFailure_def o_def) + +lemmas unifyFailure_injection_corres + = corres_injection [where f=dc, simplified, OF _ unifyFailure_injection] + +lemmas unifyFailure_discard + = unifyFailure_injection_corres [OF id_injection, simplified] + +lemmas unifyFailure_wp = injection_wp [OF unifyFailure_injection] + +lemmas unifyFailure_wp_E[wp] = injection_wp_E [OF unifyFailure_injection] + +lemmas corres_unify_failure = + corres_injection [OF unify_failure_injection unifyFailure_injection, rotated] + +lemma ignoreFailure_wp[wp_split]: + "\P\ v \\rv. Q ()\,\\rv. Q ()\ \ + \P\ ignoreFailure v \Q\" + by (simp add: ignoreFailure_def const_def) wp + +lemma ep'_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\q. \P_B\ b q \Q\" + assumes "\q. \P_C\ c q \Q\" + shows + "\P_A and P_B and P_C\ + case ts of + IdleEP \ a + | SendEP q \ b q + | RecvEP q \ c q \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemma ntfn'_cases_weak_wp: + assumes "\P_A\ a \Q\" + assumes "\q. \P_B\ b q \Q\" + assumes "\bdg. \P_C\ c bdg \Q\" + shows + "\P_A and P_B and P_C\ + case ts of + IdleNtfn \ a + | WaitingNtfn q \ b q + | ActiveNtfn bdg \ c bdg \Q\" + apply (cases ts) + apply (simp, rule hoare_weaken_pre, rule assms, simp)+ + done + +lemma ko_at_imp_cte_wp_at': + fixes x :: cte + shows "\ ko_at' x ptr s \ \ cte_wp_at' (\cte. cte = x) ptr s" + apply (erule obj_atE') + apply (clarsimp simp: objBits_simps') + apply (erule cte_wp_at_cteI'; simp add: cte_level_bits_def) + done + +lemma modify_map_casesD: + "modify_map m p f p' = Some cte \ + (p \ p' \ m p' = Some cte) \ + (p = p' \ (\cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte))" + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (case_tac z) + apply auto + done + +lemma modify_map_casesE: + "\ modify_map m p f p' = Some cte; + \ p \ p'; m p' = Some cte \ \ P; + \cap node. \ p = p'; m p = Some (CTE cap node); cte = f (CTE cap node) \ \ P + \ \ P" + by (auto dest: modify_map_casesD) + + +lemma modify_map_cases: + "(modify_map m p f p' = Some cte) = + ((p \ p' \ m p' = Some cte) \ + (p = p' \ (\cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte)))" + apply (rule iffI) + apply (erule modify_map_casesD) + apply (clarsimp simp: modify_map_def) + done + + +lemma no_0_modify_map [simp]: + "no_0 (modify_map m p f) = no_0 m" + by (simp add: no_0_def modify_map_def) + + +lemma modify_map_0 [simp]: + "no_0 m \ modify_map m 0 f = m" + by (rule ext) (auto simp add: modify_map_def no_0_def) + + +lemma modify_map_exists: + "\cap node. m p = Some (CTE cap node) \ \cap' node'. modify_map m q f p = Some (CTE cap' node')" + apply clarsimp + apply (case_tac "f (CTE cap node)") + apply (cases "q=p") + apply (auto simp add: modify_map_cases) + done + + +lemma modify_map_exists_rev: + "modify_map m q f p = Some (CTE cap node) \ \cap' node'. m p = Some (CTE cap' node')" + apply (case_tac "f (CTE cap node)") + apply (cases "q=p") + apply (auto simp add: modify_map_cases) + done + + +lemma modify_map_if: + "(modify_map m p f p' = Some cte) = + (if p = p' + then \cap node. m p = Some (CTE cap node) \ f (CTE cap node) = cte + else \cap node. m p' = Some (CTE cap node) \ cte = CTE cap node)" + apply (cases cte) + apply (rule iffI) + apply (drule modify_map_casesD) + apply auto[1] + apply (auto simp: modify_map_def) + done + +lemma corres_empty_on_failure: + "corres ((\x y. r [] []) \ r) P P' m m' \ + corres r P P' (empty_on_failure m) (emptyOnFailure m')" + apply (simp add: empty_on_failure_def emptyOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply assumption + apply (rule corres_trivial, simp) + apply wp+ + apply simp+ + done + + + +lemma emptyOnFailure_wp[wp]: + "\P\ m \Q\,\\rv. Q []\ \ \P\ emptyOnFailure m \Q\" + by (simp add: emptyOnFailure_def) wp + +lemma withoutPreemption_lift: + "\P\ f \Q\ \ \P\ withoutPreemption f \Q\, \E\" + by simp + +lemma withoutPreemption_R: + "\\\ withoutPreemption f -, \Q\" + by (wp withoutPreemption_lift) + +lemma ko_at_cte_ipcbuffer: + "ko_at' tcb p s \ cte_wp_at' (\x. x = tcbIPCBufferFrame tcb) (p + tcbIPCBufferSlot * 0x20) s" + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (2) cte_wp_at_tcbI') + apply (fastforce simp add: tcb_cte_cases_def tcbIPCBufferSlot_def cteSizeBits_def) + apply simp + done + +lemma set_ep_arch': "\\s. P (ksArchState s)\ setEndpoint ntfn p \\_ s. P (ksArchState s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv|simp)+ + done + +lemma corres_const_on_failure: + "corres ((\_ _. r x y) \ r) P P' m m' \ + corres r P P' (const_on_failure x m) (constOnFailure y m')" + apply (simp add: const_on_failure_def constOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch) + apply assumption + apply (rule corres_trivial, simp) + apply (clarsimp simp: const_def) + apply wp+ + apply simp+ + done + +lemma constOnFailure_wp : + "\P\ m \Q\, \\rv. Q n\ \ \P\ constOnFailure n m \Q\" + apply (simp add: constOnFailure_def const_def) + apply (wp|simp)+ + done + +end +end diff --git a/proof/refine/AARCH64/CNodeInv_R.thy b/proof/refine/AARCH64/CNodeInv_R.thy new file mode 100644 index 0000000000..f724a14ba8 --- /dev/null +++ b/proof/refine/AARCH64/CNodeInv_R.thy @@ -0,0 +1,9037 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Results about CNode Invocations, particularly the + recursive revoke and delete operations. +*) + +theory CNodeInv_R +imports Ipc_R Invocations_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + valid_cnode_inv' :: "Invocations_H.cnode_invocation \ kernel_state \ bool" +where + "valid_cnode_inv' (Insert cap ptr ptr') = + (valid_cap' cap and + (\s. cte_wp_at' (is_derived' (ctes_of s) ptr cap \ cteCap) ptr s) and + cte_wp_at' (untyped_derived_eq cap \ cteCap) ptr and + cte_wp_at' (\c. cteCap c = NullCap) ptr' and (\s. ptr \ ptr') and + ex_cte_cap_to' ptr')" +| "valid_cnode_inv' (Move cap ptr ptr') = + (cte_wp_at' (\c. weak_derived' cap (cteCap c)) ptr and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = cap) ptr and + cte_wp_at' (\c. cteCap c \ NullCap) ptr and valid_cap' cap and + cte_wp_at' (\c. cteCap c = NullCap) ptr' and ex_cte_cap_to' ptr')" +| "valid_cnode_inv' (Revoke ptr) = cte_at' ptr" +| "valid_cnode_inv' (Delete ptr) = cte_at' ptr" +| "valid_cnode_inv' (Rotate s_cap p_cap src pivot dest) = + (valid_cap' s_cap and valid_cap' p_cap and + cte_wp_at' (\c. weak_derived' s_cap (cteCap c)) src and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = s_cap) src and + cte_wp_at' (\c. weak_derived' p_cap (cteCap c)) pivot and + cte_wp_at' (\c. isUntypedCap (cteCap c) \ (cteCap c) = p_cap) pivot and + K (src \ pivot \ pivot \ dest \ s_cap \ capability.NullCap \ + p_cap \ capability.NullCap) and + (\s. src \ dest \ cte_wp_at' (\c. cteCap c = NullCap) dest s) and + (\s. ex_cte_cap_to' pivot s \ ex_cte_cap_to' dest s))" +| "valid_cnode_inv' (SaveCaller slot) = + (ex_cte_cap_to' slot and cte_wp_at' (\c. cteCap c = NullCap) slot)" +| "valid_cnode_inv' (CancelBadgedSends cap) = + (valid_cap' cap and K (hasCancelSendRights cap))" + +lemma rightsFromWord_correspondence: + "rightsFromWord w = rights_mask_map (data_to_rights w)" + by (simp add: rightsFromWord_def rights_mask_map_def data_to_rights_def Let_def) + +primrec + cnodeinv_relation :: "Invocations_A.cnode_invocation \ Invocations_H.cnode_invocation \ bool" +where + "cnodeinv_relation (InsertCall c cp1 cp2) x = ( + \c'. cap_relation c c' \ (x = + Insert c' (cte_map cp1) (cte_map cp2)))" +| "cnodeinv_relation (MoveCall c cp1 cp2) x = ( + \c'. cap_relation c c' \ (x = + Move c' (cte_map cp1) (cte_map cp2)))" +| "cnodeinv_relation (RevokeCall cp) x = (x = + Revoke (cte_map cp))" +| "cnodeinv_relation (DeleteCall cp) x = (x = + Delete (cte_map cp))" +| "cnodeinv_relation (RotateCall sc pc src pvt dst) x = (\sc' pc'. + cap_relation sc sc' \ cap_relation pc pc' \ + x = Rotate sc' pc' (cte_map src) (cte_map pvt) (cte_map dst))" +| "cnodeinv_relation (SaveCall p) x = (x = SaveCaller (cte_map p))" +| "cnodeinv_relation (CancelBadgedSendsCall c) x = (\c'. cap_relation c c' \ x = CancelBadgedSends c')" + + +lemma cap_relation_NullCap: + "cap_relation cap cap' \ + (update_cap_data P x cap = cap.NullCap) = (RetypeDecls_H.updateCapData P x cap' = capability.NullCap)" + apply (cases cap) + apply (simp_all add: Let_def mask_cap_def cap_rights_update_def update_cap_data_closedform + arch_update_cap_data_def word_bits_def updateCapData_def isCap_simps + split del: if_split) + apply simp + apply simp + apply (clarsimp simp: word_size word_size_def cnode_padding_bits_def cnode_guard_size_bits_def + cteRightsBits_def cteGuardBits_def) + apply (clarsimp simp: AARCH64_H.updateCapData_def isCap_simps split del: if_split) + done + +(* Sometimes I need something about the state. This is neater (IMHO) and req *) +lemma whenE_throwError_corres': + assumes P: "frel f f'" + assumes Q: "\s s'. \(s, s') \ state_relation; R s; R' s'\ \ P = P'" + assumes R: "\ P \ corres (frel \ rvr) Q Q' m m'" + shows "corres (frel \ rvr) (R and Q) (R' and Q') + (whenE P (throwError f ) >>=E (\_. m )) + (whenE P' (throwError f') >>=E (\_. m'))" + unfolding whenE_def + apply (rule corres_req) + apply (erule Q) + apply simp + apply simp + apply (cases P) + apply (simp add: P) + apply simp + apply (erule corres_guard_imp [OF R]) + apply simp + apply simp + done + +(* FIXME: move *) +lemma corres_split_liftM2: + assumes corr: "corres (\x y. r' x (f y)) P P' a c" + and r1: "\rv rv'. r' rv rv' \ corres r (R rv) (R' rv') (b rv) (d rv')" + and h1: "\Q\ a \R\" and h2: "\Q'\ c \\x. R' (f x)\" + shows "corres r (P and Q) (P' and Q') (a >>= b) (liftM f c >>= d)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF _ _ h1]) + apply (simp add: o_def) + apply (rule corr) + apply (erule r1) + apply wp + apply (simp add: o_def) + apply (rule h2) + apply simp + apply simp + done + +lemma cap_relation_NullCapI: + "cap_relation c c' \ (c = cap.NullCap) = (c' = NullCap)" + by (cases c, auto) + +lemma isCNodeCap_CNodeCap: + "isCNodeCap (CNodeCap a b c d)" + by (simp add: isCap_simps) + +lemma get_cap_corres': + "cte_ptr' = cte_map cte_ptr \ + corres (\x y. cap_relation x (cteCap y)) (cte_at cte_ptr) + (pspace_aligned' and pspace_distinct') (get_cap cte_ptr) + (getCTE cte_ptr')" + by (simp add: get_cap_corres) + +lemma cnode_invok_case_cleanup: + "i \ {CNodeRevoke, CNodeDelete, CNodeCancelBadgedSends, CNodeRotate, CNodeSaveCaller} + \ (case i of CNodeRevoke \ P | CNodeDelete \ Q | CNodeCancelBadgedSends \ R + | CNodeRotate \ S | CNodeSaveCaller \ T + | _ \ U) = U" + by (simp split: gen_invocation_labels.split) + +lemma cancelSendRightsEq: + "cap_relation cap cap' \ hasCancelSendRights cap' = has_cancel_send_rights cap" + by (auto simp: hasCancelSendRights_def has_cancel_send_rights_def all_rights_def + vmrights_map_def + split: cap.splits bool.splits if_splits | + case_tac x)+ + +lemma decodeCNodeInvocation_corres: + "\ cap_relation (cap.CNodeCap w n list) cap'; list_all2 cap_relation cs cs'; + length list \ 64 \ \ + corres + (ser \ cnodeinv_relation) + (invs and cap_table_at n w and K (n \ 0) and (\s. \x \ set cs. s \ x)) (invs' and valid_cap' cap' and (\s. \x \ set cs'. s \' x)) + (decode_cnode_invocation (mi_label mi) args + (cap.CNodeCap w n list) cs) + (decodeCNodeInvocation (mi_label mi) args + cap' cs')" + apply (rule decode_cnode_cases2[where args=args and exs=cs and label="mi_label mi"]) + \ \Move / Insert\ + apply (clarsimp simp: list_all2_Cons1 decode_cnode_invocation_def + decodeCNodeInvocation_def split_def Let_def + unlessE_whenE isCNodeCap_CNodeCap + cnode_invok_case_cleanup + split del: if_split + cong: if_cong list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rule corres_splitEE) + apply (rule ensureEmptySlot_corres; simp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp(no_asm) add: liftE_bindE del: de_Morgan_conj split del: if_split) + apply (rule corres_split[OF get_cap_corres']) + apply (simp add: split_def) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply auto[1] + apply (rule_tac r'="\a b. fst b = rights_mask_map (fst a) + \ snd b = fst (snd a) + \ snd (snd a) = (gen_invocation_type (mi_label mi) + \ {CNodeMove, CNodeMutate})" + in corres_splitEE) + apply (rule corres_trivial) + subgoal by (auto split: list.split gen_invocation_labels.split, + auto simp: returnOk_def all_rights_def + rightsFromWord_correspondence) + apply (rule_tac r'=cap_relation in corres_splitEE) + apply (simp add: returnOk_def del: imp_disjL) + apply (rule conjI[rotated], rule impI) + apply (rule deriveCap_corres) + apply (clarsimp simp: cap_relation_mask + cap_map_update_data + split: option.split) + apply clarsimp + apply (clarsimp simp: cap_map_update_data + split: option.split) + apply (rule corres_trivial) + subgoal by (auto simp add: whenE_def, auto simp add: returnOk_def) + apply (wp | wpc | simp(no_asm))+ + apply (wp hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + hoare_vcg_all_lift_R hoare_vcg_all_lift lsfco_cte_at' hoare_drop_imps + | clarsimp)+ + subgoal by (auto elim!: valid_cnode_capI) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Revoke\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp, rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Delete\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \SaveCall\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp add: split_beta) + apply (rule corres_split_norE) + apply (rule ensureEmptySlot_corres) + apply simp + apply (rule corres_returnOkTT) + apply simp + apply (wp hoare_drop_imps)+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \CancelBadgedSends\ + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE del: ser_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (simp(no_asm) add: split_beta liftE_bindE) + apply (rule corres_split[OF get_cap_corres'], simp) + apply (rule corres_split_norE) + apply (simp add: cancelSendRightsEq) + apply (rule corres_trivial, auto simp add: whenE_def returnOk_def)[1] + apply (rule corres_trivial) + apply (clarsimp simp add: returnOk_def) + apply (wp get_cap_wp getCTE_wp | simp only: whenE_def | clarsimp)+ + apply (rule hoare_trivE_R[where P="\"]) + apply (simp add: cte_wp_at_ctes_of pred_conj_def cong: conj_cong) + apply (fastforce elim!: valid_cnode_capI simp: invs_def valid_state_def valid_pspace_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Rotate\ + apply (frule list_all2_lengthD) + apply (clarsimp simp: list_all2_Cons1) + apply (simp add: le_diff_conv2 split_def decode_cnode_invocation_def decodeCNodeInvocation_def + isCap_simps Let_def unlessE_whenE whenE_whenE_body + del: disj_not1 ser_def split del: if_split) + apply (rule corres_guard_imp, rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rename_tac dest_slot destSlot) + apply (rule corres_splitEE, (rule lookupSlotForCNodeOp_corres; simp))+ + apply (rule_tac R = "\s. cte_at pivot_slot s \ cte_at dest_slot s + \ cte_at src_slot s \ invs s" in + whenE_throwError_corres' [where R' = \]) + apply simp + apply (elim conjE) + apply rule + apply fastforce + apply (erule disjE) + apply (clarsimp simp add: split_def) + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (clarsimp simp add: split_def) + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (rule corres_split_norE) + apply (rule_tac F = "(src_slot \ dest_slot) = (srcSlot \ destSlot)" + and P = "\s. cte_at src_slot s \ cte_at dest_slot s \ invs s" and P' = invs' in corres_req) + apply simp + apply rule + apply clarsimp + apply clarsimp + apply (drule (2) cte_map_inj_eq, clarsimp+)[1] + apply (rule corres_guard_imp) + apply (erule corres_whenE) + apply (rule ensureEmptySlot_corres) + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (simp add: liftE_bindE del: de_Morgan_conj disj_not1 split del: if_split) + apply (rule corres_split_liftM2, simp only: split_beta, rule get_cap_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (erule cap_relation_NullCapI) + apply (rule corres_split_liftM2, simp only: split_beta, rule get_cap_corres) + apply (rule whenE_throwError_corres) + apply (simp add: lookup_failure_map_def) + apply (erule cap_relation_NullCapI) + apply (rule whenE_throwError_corres) + apply simp + apply (simp add: cap_relation_NullCap) + apply (rule corres_returnOkTT) + apply simp + apply (intro conjI) + apply (erule cap_map_update_data)+ + apply (wp hoare_drop_imps)+ + apply simp + apply (wp lsfco_cte_at' lookup_cap_valid lookup_cap_valid') + apply (simp add: if_apply_def2) + apply (wp hoare_drop_imps) + apply wp + apply simp + apply (wp lsfco_cte_at' lookup_cap_valid lookup_cap_valid' hoare_drop_imps + | simp add: if_apply_def2 del: de_Morgan_conj split del: if_split)+ + apply (auto elim!: valid_cnode_capI)[1] + apply (clarsimp dest!: list_all2_lengthD simp: invs'_def valid_state'_def valid_pspace'_def) + \ \Errors\ + apply (elim disjE) + apply (simp add: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE + split: list.split) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap unlessE_whenE) + apply clarsimp + apply (elim disjE) + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap split_def unlessE_whenE + cnode_invok_case_cleanup + split del: if_split cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; simp) + apply (rule corres_trivial, clarsimp split: list.split_asm) + apply wp+ + apply (auto elim!: valid_cnode_capI)[1] + apply fastforce + apply (clarsimp simp: decode_cnode_invocation_def decodeCNodeInvocation_def + isCNodeCap_CNodeCap split_def unlessE_whenE + split del: if_split cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForCNodeOp_corres _ wp_post_tautE wp_post_tautE]) + apply simp + apply simp + apply (clarsimp simp: list_all2_Cons1 list_all2_Nil + split: list.split_asm split del: if_split) + apply (auto elim!: valid_cnode_capI)[1] + apply fastforce + done + +lemma capBadge_updateCapData_True: + "updateCapData True x c \ NullCap \ capBadge (updateCapData True x c) = capBadge c" + apply (simp add: updateCapData_def isCap_simps Let_def + split: if_split_asm split del: if_split) + apply (simp add: AARCH64_H.updateCapData_def) + done + +lemma badge_derived_updateCapData: + "\ updateCapData False x cap \ NullCap; badge_derived' cap cap' \ + \ badge_derived' (updateCapData False x cap) cap'" + by (simp add: badge_derived'_def updateCapData_Master + updateCapData_ordering) + +lemma deriveCap_Null_helper: + assumes "\P\ deriveCap x cap \\rv s. rv \ NullCap \ Q rv s\,-" + shows "\\s. cap \ NullCap \ P s\ deriveCap x cap \\rv s. rv \ NullCap \ Q rv s\,-" + apply (cases "cap = NullCap") + apply (simp add: deriveCap_def isCap_simps) + apply (wp | simp)+ + apply (rule hoare_post_imp_R, rule assms) + apply simp + done + +lemma hasCancelSendRights_not_Null: + "hasCancelSendRights cap \ isEndpointCap cap" + by (clarsimp simp: hasCancelSendRights_def isCap_simps split: capability.splits) + +declare if_split [split del] + +lemma untyped_derived_eq_maskCapRights: + "untyped_derived_eq (RetypeDecls_H.maskCapRights m cap) cap' + = untyped_derived_eq cap cap'" + apply (simp add: untyped_derived_eq_def) + apply (rule imp_cong) + apply (rule capMaster_isUntyped, simp) + apply (clarsimp simp: isCap_simps) + done + +lemma untyped_derived_eq_updateCapData: + "RetypeDecls_H.updateCapData x y cap \ NullCap + \ untyped_derived_eq (RetypeDecls_H.updateCapData x y cap) cap' + = untyped_derived_eq cap cap'" + apply (simp add: untyped_derived_eq_def) + apply (rule imp_cong) + apply (rule capMaster_isUntyped) + apply (erule updateCapData_Master) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: updateCapData_def isCap_simps) + done + +lemma untyped_derived_eq_refl: + "untyped_derived_eq c c" + by (simp add: untyped_derived_eq_def) + +lemma decodeCNodeInv_wf[wp]: + "\invs' and valid_cap' (CNodeCap w n w2 n2) + and (\s. \r\cte_refs' (CNodeCap w n w2 n2) (irq_node' s). + ex_cte_cap_to' r s) + and (\s. \cap \ set cs. s \' cap) + and (\s. \cap \ set cs. \r\cte_refs' cap (irq_node' s). ex_cte_cap_to' r s)\ + decodeCNodeInvocation label args + (CNodeCap w n w2 n2) cs + \valid_cnode_inv'\, -" + apply (rule decode_cnode_cases2[where label=label and args=args and exs=cs]) + \ \Move/Insert\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + split_def cnode_invok_case_cleanup unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp) + apply (rule deriveCap_Null_helper) + apply (simp add: imp_conjR) + apply ((wp deriveCap_derived deriveCap_untyped_derived + | wp (once) hoare_drop_imps)+)[1] + apply (wp whenE_throwError_wp getCTE_wp | wpc | simp(no_asm))+ + apply (rule_tac Q'="\rv. invs' and cte_wp_at' (\cte. cteCap cte = NullCap) destSlot + and ex_cte_cap_to' destSlot" + in hoare_post_imp_R, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule invs_valid_objs') + apply (simp add: ctes_of_valid' valid_updateCapDataI + weak_derived_updateCapData capBadge_updateCapData_True + badge_derived_updateCapData + badge_derived_mask untyped_derived_eq_maskCapRights + untyped_derived_eq_updateCapData + untyped_derived_eq_refl) + apply (auto simp:isCap_simps updateCapData_def)[1] + apply (wp ensureEmptySlot_stronger | simp | wp (once) hoare_drop_imps)+ + \ \Revoke\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp)+ + apply clarsimp + \ \Delete\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp)+ + apply clarsimp + \ \SaveCaller\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE) + apply (rule hoare_pre) + apply (wp lsfco_cte_at' | simp | wp (once) hoare_drop_imps)+ + \ \CancelBadgedSends\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_whenE) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp getCTE_wp | simp)+ + apply (rule_tac Q'="\rv s. invs' s \ cte_wp_at' (\_. True) rv s" in hoare_post_imp_R) + apply (wp lsfco_cte_at') + apply (simp add: cte_wp_at_ctes_of imp_ex hasCancelSendRights_not_Null) + apply (clarsimp simp: ctes_of_valid' invs_valid_objs') + apply (simp add: invs_valid_objs') + \ \Rotate\ + apply (simp add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + unlessE_def) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp getCTE_wp ensureEmptySlot_stronger + | simp add: o_def)+ + apply (rule_tac Q'="\rv s. cte_at' rv s \ cte_at' destSlot s + \ cte_at' srcSlot s \ ex_cte_cap_to' rv s + \ ex_cte_cap_to' destSlot s + \ invs' s" in hoare_post_imp_R) + apply (wp lsfco_cte_at') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule invs_valid_objs') + apply (simp add: weak_derived_updateCapData capBadge_updateCapData_True + valid_updateCapDataI ctes_of_valid') + apply (fastforce simp:isCap_simps updateCapData_def) + apply (wp lsfco_cte_at')+ + apply clarsimp + \ \Errors\ + apply (elim disjE exE conjE, + simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + unlessE_whenE cnode_invok_case_cleanup + split: list.split_asm list.split) + by (auto simp: valid_def validE_def validE_R_def in_monad) + +lemma decodeCNodeInvocation_inv[wp]: + "\P\ decodeCNodeInvocation label args cap cs \\rv. P\" + apply (cases "\isCNodeCap cap") + apply (simp only: decodeCNodeInvocation_def Let_def split_def + fst_conv snd_conv, simp) + apply (rule decode_cnode_cases2[where label=label and args=args and exs=cs]) + apply (simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap split_def + Let_def whenE_def unlessE_def cnode_invok_case_cleanup + split del: if_split cong del: if_cong)[6] + apply (fold_subgoals (prefix))[6] + subgoal premises prems + by (safe intro!: hoare_pre[where P=P], + (wp hoare_drop_imps | simp | wpcw)+) + apply (elim disjE exE conjE, + simp_all add: decodeCNodeInvocation_def isCNodeCap_CNodeCap + cnode_invok_case_cleanup unlessE_whenE + split: list.split_asm split del: if_split) + apply (simp_all split: list.split add: unlessE_whenE) + apply safe + apply (wp | simp)+ + done + +text \Various proofs about the two recursive deletion operations. + These call out to various functions in Tcb and Ipc, and are + thus better proved here than in CSpace_R.\ + +text \Proving the termination of rec_del\ + +crunch typ_at[wp]: cancel_ipc "\s. P (typ_at T p s)" + (wp: crunch_wps hoare_vcg_if_splitE simp: crunch_simps) + +declare if_split [split] + +text \Proving desired properties about rec_del/cap_delete\ + +declare of_nat_power [simp del] + +text \Proving desired properties about recursiveDelete/cteDelete\ + +text \Proving the termination of finaliseSlot\ + +definition + not_recursive_ctes :: "kernel_state \ machine_word set" +where + "not_recursive_ctes s \ {ptr. \cap. cteCaps_of s ptr = Some cap + \ \ (isZombie cap \ capZombiePtr cap = ptr)}" + +lemma not_recursive_ctes_wu [simp]: + "not_recursive_ctes (ksWorkUnitsCompleted_update f s) = not_recursive_ctes s" + by (simp add: not_recursive_ctes_def) + +lemma not_recursive_ctes_irq_state_independent[simp, intro!]: + "not_recursive_ctes (s \ ksMachineState := ksMachineState s \ irq_state := x \\) = not_recursive_ctes s" + by (simp add: not_recursive_ctes_def) + +lemma capSwap_not_recursive: + "\\s. card (not_recursive_ctes s) \ n + \ cte_wp_at' (\cte. \ (isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = p1)) p1 s + \ cte_wp_at' (\cte. isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = p1) p2 s + \ p1 \ p2\ + capSwapForDelete p1 p2 + \\rv s. card (not_recursive_ctes s) < n\" + apply (simp add: not_recursive_ctes_def cteSwap_def capSwapForDelete_def) + apply (wp | simp add: o_def | rule getCTE_cteCap_wp)+ + apply (simp add: cte_wp_at_ctes_of modify_map_def cteCaps_of_def + cong: option.case_cong) + apply (elim conjE exE) + apply (simp cong: conj_cong) + apply (erule order_less_le_trans[rotated]) + apply (rule psubset_card_mono) + apply simp + apply (rule psubsetI) + apply clarsimp + apply (rule_tac f="\S. p1 \ S" in distinct_lemma) + apply simp + done + +lemma updateCap_not_recursive: + "\\s. card (not_recursive_ctes s) \ n + \ cte_wp_at' (\cte. isZombie (cteCap cte) \ capZombiePtr (cteCap cte) = ptr + \ isZombie cap \ capZombiePtr cap = ptr) + ptr s\ + updateCap ptr cap + \\rv s. card (not_recursive_ctes s) \ n\" + apply (simp add: not_recursive_ctes_def) + apply wp + apply clarsimp + apply (erule order_trans[rotated]) + apply (rule card_mono, simp) + apply clarsimp + apply (simp add: modify_map_def split: if_split_asm) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + done + +lemma suspend_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + suspend t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + apply (rule hoare_chain) + apply (rule suspend_cte_wp_at'[where P="(=) (ThreadCap t)" and p=x]) + apply (clarsimp simp add: finaliseCap_def Let_def isCap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + +lemma unbindNotification_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + unbindNotification t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + by wp + +lemma prepareThreadDelete_ctes_of_thread: + "\\s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\ + prepareThreadDelete t + \\rv s. \node. ctes_of s x = Some (CTE (ThreadCap t) node)\" + by (wpsimp simp: prepareThreadDelete_def fpuThreadDelete_def) + +lemma suspend_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + suspend t + \\rv s. P (not_recursive_ctes s)\" + apply (simp only: suspend_def not_recursive_ctes_def cteCaps_of_def updateRestartPC_def) + apply (wp threadSet_ctes_of | simp add: unless_def del: o_apply)+ + apply (fold cteCaps_of_def) + apply (wp cancelIPC_cteCaps_of) + apply (clarsimp elim!: rsubst[where P=P] intro!: set_eqI) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (auto simp: isCap_simps finaliseCap_def Let_def) + done + +lemma unbindNotification_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + unbindNotification t + \\rv s. P (not_recursive_ctes s)\" + apply (simp only: not_recursive_ctes_def cteCaps_of_def) + apply wp + done + +lemma prepareThreadDelete_not_recursive_ctes: + "\\s. P (not_recursive_ctes s)\ + prepareThreadDelete t + \\rv s. P (not_recursive_ctes s)\" + by (wpsimp simp: prepareThreadDelete_def not_recursive_ctes_def cteCaps_of_def fpuThreadDelete_def) + +definition + finaliseSlot_recset :: "((machine_word \ bool \ kernel_state) \ (machine_word \ bool \ kernel_state)) set" +where + "finaliseSlot_recset \ + wf_sum (\(slot, exposed, state). exposed) + (inv_image (less_than <*lex*> less_than) + (\(x, exp, s). case ctes_of s x of + Some (CTE NullCap node) \ (0, 0) + | Some (CTE (Zombie p zb n) node) \ + (if p = x then 1 else 2, n) + | _ \ (3, 0))) + (measure (\(x, exp, s). card (not_recursive_ctes s)))" + +lemma finaliseSlot_recset_wf: "wf finaliseSlot_recset" + unfolding finaliseSlot_recset_def + by (intro wf_sum_wf wf_rdcall_finalise_ord_lift wf_measure + wf_inv_image wf_lex_prod wf_less_than) + +lemma in_preempt': + "(Inr rv, s') \ fst (preemptionPoint s) \ + \f g. s' = ksWorkUnitsCompleted_update f + (s \ ksMachineState := ksMachineState s \ irq_state := g (irq_state (ksMachineState s)) \\)" + apply (simp add: preemptionPoint_def alternative_def in_monad + getActiveIRQ_def doMachineOp_def split_def + select_f_def select_def getWorkUnits_def setWorkUnits_def + modifyWorkUnits_def return_def returnOk_def + split: option.splits if_splits) + apply (erule disjE) + apply (cases "workUnitsLimit \ ksWorkUnitsCompleted s + 1", drule (1) mp, + rule exI[where x="\x. 0"], rule exI[where x=Suc], force, + rule exI[where x="\x. x + 1"], rule exI[where x=id], force)+ + apply (rule exI[where x="\x. x + 1"], rule exI[where x=id], force) + done + +lemma updateCap_implies_cte_at: + "(rv, s') \ fst (updateCap ptr cap s) + \ cte_at' ptr s" + apply (clarsimp simp: updateCap_def in_monad) + apply (frule in_inv_by_hoareD [OF getCTE_inv]) + apply (drule use_valid [OF _ getCTE_cte_wp_at], simp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma case_Zombie_assert_fold: + "(case cap of Zombie ptr zb n \ haskell_assertE (P ptr) str | _ \ returnOk ()) + = assertE (isZombie cap \ P (capZombiePtr cap))" + by (cases cap, simp_all add: isCap_simps assertE_def) + +termination finaliseSlot' + apply (rule finaliseSlot'.termination, + rule finaliseSlot_recset_wf) + apply (simp add: finaliseSlot_recset_def wf_sum_def) + apply (clarsimp simp: in_monad dest!: in_preempt') + apply (drule in_inv_by_hoareD [OF isFinalCapability_inv]) + apply (frule use_valid [OF _ getCTE_cte_wp_at, OF _ TrueI]) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply (clarsimp simp: in_monad split: if_split_asm) + apply (clarsimp simp: Let_def in_monad finaliseSlot_recset_def + wf_sum_def liftM_def + case_Zombie_assert_fold) + apply (frule use_valid [OF _ getCTE_cte_wp_at, OF _ TrueI]) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply clarsimp + apply (erule use_valid [OF _ capSwap_not_recursive]) + apply (simp add: cte_wp_at_ctes_of) + apply (frule updateCap_implies_cte_at) + apply (erule use_valid [OF _ hoare_vcg_conj_lift, + OF _ updateCap_not_recursive updateCap_ctes_of_wp]) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac rv, simp) + apply (simp add: isCap_simps, elim conjE disjE exE) + apply simp + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad + getThreadCSpaceRoot_def locateSlot_conv) + apply (frule(1) use_valid [OF _ unbindNotification_ctes_of_thread, OF _ exI]) + apply (frule(1) use_valid [OF _ suspend_ctes_of_thread]) + apply (frule(1) use_valid [OF _ prepareThreadDelete_ctes_of_thread]) + apply clarsimp + apply (erule use_valid [OF _ prepareThreadDelete_not_recursive_ctes]) + apply (erule use_valid [OF _ suspend_not_recursive_ctes]) + apply (erule use_valid [OF _ unbindNotification_not_recursive_ctes]) + apply simp + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (clarsimp simp: in_monad Let_def locateSlot_conv + finaliseSlot_recset_def wf_sum_def + cte_wp_at_ctes_of cong: if_cong) + apply (clarsimp split: if_split_asm + simp: in_monad + dest!: in_getCTE) + apply (erule use_valid [OF _ updateCap_ctes_of_wp])+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (case_tac ourCTE) + apply (rename_tac cap node) + apply (case_tac rv, simp) + apply (rename_tac cap' node') + apply (case_tac cap'; simp) + apply (erule use_valid [OF _ updateCap_ctes_of_wp])+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac ourCTE, case_tac rv, + clarsimp simp: isCap_simps) + apply (elim disjE conjE exE, simp_all)[1] + apply (clarsimp simp: finaliseCap_def Let_def isCap_simps in_monad) + apply (frule use_valid [OF _ finaliseCap_cases], simp) + apply (case_tac rv, case_tac ourCTE) + apply (clarsimp simp: isCap_simps cte_wp_at_ctes_of) + apply (elim disjE conjE exE, simp_all)[1] + done + +lemmas finaliseSlot'_simps_ext = + finaliseSlot'.simps [THEN ext [where f="finaliseSlot' slot exp" for slot exp]] + +lemmas finalise_spec_induct = finaliseSlot'.induct[where P= + "\sl exp s. s \ \P sl exp\ finaliseSlot' sl exp \Q sl exp\,\E sl exp\" for P Q E] + +lemma finaliseSlot'_preservation: + assumes wp: + "\cap final. \P\ finaliseCap cap final False \\rv. P\" + "\sl opt. \P\ emptySlot sl opt \\rv. P\" + "\sl1 sl2. \P\ capSwapForDelete sl1 sl2 \\rv. P\" + "\sl cap. \P\ updateCap sl cap \\rv. P\" + "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows + "st \ \P\ finaliseSlot' slot exposed \\rv. P\, \\rv. P\" +proof (induct rule: finalise_spec_induct) + case (1 sl exp s) + show ?case + apply (rule hoare_pre_spec_validE) + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply wp + apply (simp, wp wp) + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv snd_conv + case_Zombie_assert_fold haskell_fail_def) + apply (wp wp preemptionPoint_inv| simp add: o_def irq)+ + apply (wp hoare_drop_imps) + apply (wp wp | simp)+ + apply (wp hoare_drop_imps | simp(no_asm))+ + apply (wp wp)[1] + apply (simp(no_asm)) + apply (rule "1.hyps", (assumption | rule refl)+) + apply (wp wp hoare_drop_imps isFinalCapability_inv + | simp add: locateSlot_conv)+ + done +qed + +lemmas finaliseSlot_preservation + = validE_valid [OF use_spec(2) [OF finaliseSlot'_preservation], + folded finaliseSlot_def] + +lemma cteDelete_preservation: + assumes wp: + "\cap final. \P\ finaliseCap cap final False \\rv. P\" + "\sl opt. \P\ emptySlot sl opt \\rv. P\" + "\sl1 sl2. \P\ capSwapForDelete sl1 sl2 \\rv. P\" + "\sl cap. \P\ updateCap sl cap \\rv. P\" + "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows + "\P\ cteDelete p e \\rv. P\" + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp wp) + apply (simp only: simp_thms cases_simp) + apply (wpsimp wp: finaliseSlot_preservation wp simp: irq)+ + done + +crunch aligned'[wp]: capSwapForDelete pspace_aligned' +crunch distinct'[wp]: capSwapForDelete pspace_distinct' + +lemma cte_wp_at_ctes_ofI: + "\ cte_wp_at' ((=) cte) ptr s \ \ ctes_of s ptr = Some cte" + by (rule ctes_of_eq_cte_wp_at') + +declare modify_map_dom[simp] + +(* subsumes update_prev_next_trancl *) +lemma modify_map_next_trancl: + assumes nxt: "m \ x \\<^sup>+ y" + and inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "(modify_map m ptr f) \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ f cte)" + + from nxt have "?m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some inv r_into_trancl next_unfold' + by fastforce + next + case (step q r) + show ?case + proof (rule trancl_into_trancl) + show "?m \ q \ r" using step(2) Some inv + by (simp only: mdb_next_update, clarsimp simp: next_unfold') + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + + +(* subsumes update_prev_next_trancl2 *) +lemma modify_map_next_trancl2: + assumes nxt: "(modify_map m ptr f) \ x \\<^sup>+ y" + and inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "m \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ f cte)" + + from nxt have "m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some inv + by (auto intro!: r_into_trancl + simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + next + case (step q r) + show ?case + proof + show "m \ q \ r" using step(2) Some inv + by (auto simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_next_trancl_iff: + assumes inv: "\cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte)" + shows "(modify_map m ptr f) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + using inv + by (auto intro: modify_map_next_trancl modify_map_next_trancl2) + +lemma mdb_chain_0_cap_update: + "mdb_chain_0 (modify_map ctemap ptr (cteCap_update f)) = + mdb_chain_0 ctemap" + unfolding mdb_chain_0_def + by (auto simp: modify_map_next_trancl_iff) + +lemma modify_map_dlist: + assumes nxt: "valid_dlist m" + and inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist (modify_map m ptr f)" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some ptrcte) + let ?m = "m(ptr \ f ptrcte)" + + have "valid_dlist ?m" + proof + fix p cte + assume cp: "?m p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'.(m(ptr \ f ptrcte)) (mdbPrev (cteMDBNode cte)) = Some cte' \ + mdbNext (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEp) + hence ?thesis using peq mdb cp Some + by (cases "ptr = mdbPrev (cteMDBNode cte)") simp_all + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbPrev (cteMDBNode cte)) = ptr") (auto elim: valid_dlistEp simp: inv) + } + ultimately show ?thesis by (cases "p = ptr") auto + next + fix p cte + assume cp: "?m p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'.(m(ptr \ f ptrcte)) (mdbNext (cteMDBNode cte)) = Some cte' \ + mdbPrev (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEn) + hence ?thesis using peq mdb cp Some + by (cases "ptr = mdbNext (cteMDBNode cte)") simp_all + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbNext (cteMDBNode cte)) = ptr") (auto elim: valid_dlistEn simp: inv) + } + ultimately show ?thesis by (cases "p = ptr") auto + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_dlist2: + assumes nxt: "valid_dlist (modify_map m ptr f)" + and inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist m" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some ptrcte) + let ?m = "modify_map m ptr f" + + have "valid_dlist m" + proof + fix p cte + assume cp: "m p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. ?m (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEp [where p = ptr] simp: modify_map_same inv) + hence ?thesis using peq cp Some + by (cases "ptr = mdbPrev (cteMDBNode cte)") (clarsimp simp: inv modify_map_same modify_map_other)+ + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbPrev (cteMDBNode cte)) = ptr") (auto elim!: valid_dlistEp simp: inv modify_map_apply) + } + ultimately show ?thesis by (cases "p = ptr") auto + next + fix p cte + assume cp: "m p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + + { + assume peq: "p = ptr" + + hence mdb: "cteMDBNode cte = cteMDBNode ptrcte" using cp Some + by (clarsimp simp: inv) + + hence "\cte'. ?m (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + using nxt Some n0 peq + by (auto elim: valid_dlistEn [where p = ptr] simp: modify_map_same inv) + hence ?thesis using peq cp Some + by (cases "ptr = mdbNext (cteMDBNode cte)") (clarsimp simp: inv modify_map_same modify_map_other)+ + } moreover + { + assume pne: "p \ ptr" + hence ?thesis using cp Some nxt n0 + by (cases "(mdbNext (cteMDBNode cte)) = ptr") (auto elim!: valid_dlistEn simp: inv modify_map_apply) + } + ultimately show ?thesis by (cases "p = ptr") auto + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma modify_map_dlist_iff: + assumes inv: "\cte. cteMDBNode (f cte) = cteMDBNode cte" + shows "valid_dlist (modify_map m ptr f) = valid_dlist m" + using inv + by (auto intro: modify_map_dlist modify_map_dlist2) + +lemma mdb_chain_0_modify_map_inv: + "\ mdb_chain_0 m; \cte. mdbNext (cteMDBNode (f cte)) = mdbNext (cteMDBNode cte) \ \ mdb_chain_0 (modify_map m ptr f)" + unfolding mdb_chain_0_def + by (auto simp: modify_map_next_trancl_iff) + +lemma mdb_chain_0_modify_map_replace: + assumes unf: "mdb_chain_0 (modify_map m p (cteMDBNode_update (mdbNext_update (%_. (mdbNext node)))))" + shows "mdb_chain_0 (modify_map m p (cteMDBNode_update (\m. node)))" +proof - + have "modify_map m p (cteMDBNode_update (\m. node)) = + modify_map (modify_map (modify_map (modify_map m p (cteMDBNode_update (mdbNext_update (%_. (mdbNext node))))) p + (cteMDBNode_update (mdbPrev_update (%_. (mdbPrev node))))) p + (cteMDBNode_update (mdbRevocable_update (%_. (mdbRevocable node))))) p + (cteMDBNode_update (mdbFirstBadged_update (%_. (mdbFirstBadged node))))" + apply (cases node) + apply (cases "m p") + apply (simp add: modify_map_None) + apply (case_tac a) + apply (rename_tac mdbnode) + apply (case_tac mdbnode) + apply (clarsimp simp add: next_update_is_modify [symmetric]) + done + + thus ?thesis + apply simp + apply (rule mdb_chain_0_modify_map_inv) + apply (rule mdb_chain_0_modify_map_inv) + apply (rule mdb_chain_0_modify_map_inv [OF unf]) + apply simp_all + done +qed + +lemmas mdb_chain_0_mm_rep_next = + mdb_chain_0_modify_map_replace [OF mdb_chain_0_modify_map_next] + +lemma setCTE_cte_wp_at_other: + "\cte_wp_at' P p and (\s. ptr \ p)\ + setCTE ptr cte + \\uu s. cte_wp_at' P p s\" + apply (simp add: cte_wp_at_ctes_of) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +(* CLAG from _next *) +lemma mdb_chain_0_modify_map_0: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + shows + "mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbNext_update (%_. 0))))" + (is "mdb_chain_0 ?M") + unfolding mdb_chain_0_def +proof + fix x + assume "x \ dom ?M" + hence xd: "x \ dom m" + by (clarsimp simp: modify_map_def dom_def split: if_split_asm) + hence x0: "m \ x \\<^sup>+ 0" using chain unfolding mdb_chain_0_def by simp + + show "?M \ x \\<^sup>+ 0" + proof (cases "m ptr") + case None + thus ?thesis + by (simp add: modify_map_def) (rule x0) + next + case (Some cte) + show ?thesis + proof (cases "m \ x \\<^sup>* ptr") + case False + thus ?thesis + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF x0]) + done + next + case True + hence "?M \ x \\<^sup>* ptr" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule next_rtrancl_tranclE) + apply simp + apply (rule trancl_into_rtrancl) + apply (erule no_loops_upd_last [OF mdb_chain_0_no_loops [OF chain no0]]) + done + moreover have "?M \ ptr \ 0" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (simp add: mdb_next_update) + done + ultimately show ?thesis by simp + qed + qed +qed + +lemma no_0_lhs_tranclI: "\ no_0 m; dest \ 0 \ \ \ m \ 0 \\<^sup>* dest" + apply rule + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule (1) no_0_lhs_trancl) + apply simp + done + +lemma no_next_prev_rtrancl: + assumes c0: "valid_mdb_ctes m" + and src: "m src = Some (CTE cap src_node)" + and "mdbPrev src_node \ 0" + shows "\ m \ mdbNext src_node \\<^sup>* mdbPrev src_node" +proof + assume asm: "m \ mdbNext src_node \\<^sup>* mdbPrev src_node" + + from c0 have n0: "no_0 m" .. + from c0 have chain: "mdb_chain_0 m" .. + + have "m \ src \\<^sup>+ mdbPrev src_node" + using src + by - (rule rtrancl_into_trancl2 [OF _ asm], clarsimp simp: next_unfold') + + moreover + from c0 have vd: "valid_dlist m" .. + have "m \ mdbPrev src_node \ src" by (rule prev_leadstoI [OF _ _ vd]) fact+ + ultimately have "m \ src \\<^sup>+ src" .. + thus False using mdb_chain_0_no_loops [OF chain n0] + by (simp add: no_loops_trancl_simp) +qed + +lemma ctes_of_strng: + "(\cte. ctes_of s ptr = Some cte \ P cte) + \ (\cte. cte_wp_at' ((=) cte) ptr s \ P cte)" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma updateCap_valid_cap [wp]: + "\valid_cap' cap\ updateCap ptr cap' \\r. valid_cap' cap\" + unfolding updateCap_def + by (wp setCTE_valid_cap getCTE_wp) (clarsimp dest!: cte_at_cte_wp_atD) + +lemma mdb_chain_0_trancl: + assumes chain: "mdb_chain_0 m" + and n0: "no_0 m" + and ab: "m \ a \\<^sup>+ b" + shows "m \ b \\<^sup>* 0" + using ab +proof induct + case (base y) + thus ?case using chain + by (clarsimp simp: next_unfold') (erule (1) mdb_chain_0_nextD) +next + case (step y z) + thus ?case using n0 + apply - + apply (erule next_rtrancl_tranclE) + apply (simp add: next_unfold') + apply (drule tranclD [where x = y]) + apply clarsimp + apply (drule (1) next_single_value) + apply simp + done +qed + +lemma mdb_chain_0_cases [consumes 3, case_names srcdest destsrc indep]: + assumes chain: "mdb_chain_0 m" + and no: "no_0 m" + and ds: "dest \ src" + and srcdest: "\ m \ src \\<^sup>+ dest; \ m \ dest \\<^sup>* src; m \ dest \\<^sup>* 0 \ \ R" + and destsrc: "\ m \ dest \\<^sup>+ src; \ m \ src \\<^sup>* dest; m \ src \\<^sup>* 0 \ \ R" + and neither: "\ \ m \ src \\<^sup>+ dest; \ m \ dest \\<^sup>+ src \ \ R" + shows "R" +proof (cases "m \ src \\<^sup>+ dest") + case True + + thus ?thesis + proof (rule srcdest) + show "\ m \ dest \\<^sup>* src" by (rule no_loops_tranclE [OF mdb_chain_0_no_loops]) fact+ + + show "m \ dest \\<^sup>* 0" + by (rule mdb_chain_0_trancl) fact+ + qed +next + case False + + note F = False + + show ?thesis + proof (cases "m \ dest \\<^sup>+ src") + case True + thus ?thesis + proof (rule destsrc) + show "\ m \ src \\<^sup>* dest" using False ds + by (clarsimp elim!: next_rtrancl_tranclE) + show "m \ src \\<^sup>* 0" + by (rule mdb_chain_0_trancl) fact+ + qed + next + case False + with F show ?thesis + by (rule neither) + qed +qed + +lemma next_fold: + "\ m a = Some cte; mdbNext (cteMDBNode cte) = b\ \ m \ a \ b" + by (clarsimp simp: next_unfold') + + +lemma cteMDBNode_update_comp [simp]: + "(cteMDBNode_update f \ cteMDBNode_update g) = cteMDBNode_update (f \ g)" + by rule (case_tac x, simp) + +lemma modify_map_lhs_trancl: + "\ m p = Some cte; \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* p \ \ + modify_map m p f \ p \\<^sup>+ x = m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* x" + by (clarsimp simp: next_update_is_modify [symmetric] intro!: next_update_lhs_trancl) + +lemma modify_map_lhs_rtrancl: + "\ m p = Some cte; \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* p \ \ + modify_map m p f \ p \\<^sup>* x = (x = p \ m \ mdbNext (cteMDBNode (f cte)) \\<^sup>* x)" + apply rule + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule (2) iffD1 [OF modify_map_lhs_trancl]) + apply simp + apply (erule disjE) + apply simp + apply (drule (2) iffD2 [OF modify_map_lhs_trancl]) + apply (erule trancl_into_rtrancl) + done + +lemma next_prev: + assumes cte: "m p = Some cte" + and vd: "valid_dlist m" + and no0: "no_0 m" + and nxt: "m \ q \ p" + shows "q = mdbPrev (cteMDBNode cte)" +proof - + from no0 have p0: "p \ 0" using cte unfolding no_0_def + by - (rule, clarsimp) + + thus ?thesis + using nxt vd cte + apply - + apply (simp add: next_unfold') + apply (erule exE conjE)+ + apply (erule (1) valid_dlistEn, fastforce) + apply simp + done +qed + +declare modify_map_ndom[simp] + +lemma mdb_trancl_other_update_iff: + "\ m \ x \\<^sup>* p \ m(p \ cte) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + by (auto intro: mdb_trancl_other_update mdb_trancl_update_other) + + + +lemma modify_map_trancl_other_iff: + "\ m \ x \\<^sup>* p \ modify_map m p f \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply - + apply (cases "m p") + apply (simp add: modify_map_None) + apply (subst next_update_is_modify [symmetric]) + apply assumption + apply simp + apply (erule mdb_trancl_other_update_iff) + done + +lemma next_modify_map_trancl_last: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + and nxt: "m \ x \\<^sup>+ p" + shows "modify_map m p f \ x \\<^sup>+ p" +proof - + note noloop = mdb_chain_0_no_loops [OF chain no0] + + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "modify_map m p f \ y \ p" + by (clarsimp simp: next_unfold' modify_map_other) + + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "modify_map m p f \ y \ z" using step + by (clarsimp simp: next_unfold' modify_map_other) + moreover from xp have "modify_map m p f \ z \\<^sup>+ p" using step.hyps by simp + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + +lemma next_modify_map_trancl_last2: + assumes chain: "mdb_chain_0 (modify_map m p f)" + and no0: "no_0 m" + and nxt: "modify_map m p f \ x \\<^sup>+ p" + shows "m \ x \\<^sup>+ p" +proof - + let ?m = "modify_map m p f" + have no0': "no_0 ?m" using no0 by simp + note noloop = mdb_chain_0_no_loops [OF chain no0'] + + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "m \ y \ p" + by (clarsimp simp: next_unfold' modify_map_other) + + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "m \ y \ z" using step + by (clarsimp simp: next_unfold' modify_map_other) + moreover from xp have "m \ z \\<^sup>+ p" using step.hyps by simp + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + +lemma next_modify_map_trancl_last_iff: + assumes c1: "mdb_chain_0 m" + and chain: "mdb_chain_0 (modify_map m p f)" + and no0: "no_0 m" + shows "modify_map m p f \ x \\<^sup>+ p = m \ x \\<^sup>+ p" + using c1 chain no0 + by (auto intro: next_modify_map_trancl_last next_modify_map_trancl_last2) + +lemma next_modify_map_last: + shows "x \ p \ modify_map m p f \ x \ p = m \ x \ p" + by (clarsimp simp: next_unfold' modify_map_other) + +lemma next_rtrancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ mdbNext node \\<^sup>* ptr'" + using nl node + by (clarsimp dest!: tranclD elim!: next_rtrancl_tranclE simp: next_unfold') + +lemma next_trancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "m \ ptr \\<^sup>+ ptr'" + and neq: "mdbNext node \ ptr'" + shows "m \ mdbNext node \\<^sup>+ ptr'" + using nl node neq + by (clarsimp dest!: tranclD elim!: next_rtrancl_tranclE simp: next_unfold') + +lemma next_rtrancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and vd: "valid_dlist m" + and no0: "no_0 m" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ ptr \\<^sup>* mdbPrev node" + using nl node + apply - + apply (drule tranclD2) + apply clarsimp + apply (drule (1) next_prev [OF _ vd no0]) + apply simp + done + +lemma next_trancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and vd: "valid_dlist m" + and no0: "no_0 m" + and neq: "mdbPrev node \ ptr" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ ptr \\<^sup>+ mdbPrev node" + using neq node nl + apply - + apply (drule (1) next_rtrancl_xp [OF _ vd no0]) + apply (erule next_rtrancl_tranclE) + apply simp + apply simp + done + +lemma next_trancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and vd: "valid_dlist m" + and no0: "no_0 m" + and neq: "mdbPrev node' \ ptr" + and neq': "mdbNext node \ mdbPrev node'" + and nl: "m \ ptr \\<^sup>+ ptr'" + shows "m \ mdbNext node \\<^sup>+ mdbPrev node'" + by (rule next_trancl_nx [OF _ next_trancl_xp]) fact+ + +lemma neg_next_trancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>+ ptr'" + using nl +proof (rule contrapos_nn) + assume "m \ mdbNext node \\<^sup>+ ptr'" + show "m \ ptr \\<^sup>+ ptr'" + proof (rule trancl_into_trancl2) + show "m \ ptr \ mdbNext node" using node by (rule next_fold, simp) + qed fact+ +qed + +lemma neg_next_rtrancl_nx: + assumes node: "m ptr = Some (CTE cap node)" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>* ptr'" + using nl +proof (rule contrapos_nn) + assume "m \ mdbNext node \\<^sup>* ptr'" + show "m \ ptr \\<^sup>+ ptr'" + proof (rule rtrancl_into_trancl2) + show "m \ ptr \ mdbNext node" using node by (rule next_fold, simp) + qed fact+ +qed + +lemma dom_into_not0 [intro?]: + "\ no_0 m; p \ dom m \ \ p \ 0" + by (rule, clarsimp) + +lemma neg_next_trancl_xp: + assumes node: "m ptr' = Some (CTE cap node)" + and dom: "mdbPrev node \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ ptr \\<^sup>+ mdbPrev node" + using nl +proof (rule contrapos_nn) + assume "m \ ptr \\<^sup>+ mdbPrev node" + + show "m \ ptr \\<^sup>+ ptr'" + proof (rule trancl_into_trancl) + have "mdbPrev node \ 0" using assms by auto + thus "m \ mdbPrev node \ ptr'" using vd node + apply - + apply (erule (1) valid_dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + qed fact+ +qed + +lemma neg_next_trancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and dom: "mdbPrev node' \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>+ mdbPrev node'" + by (rule neg_next_trancl_nx [OF _ neg_next_trancl_xp]) fact+ + +lemma neg_next_rtrancl_np: + assumes node: "m ptr = Some (CTE cap node)" + and node': "m ptr' = Some (CTE cap' node')" + and dom: "mdbPrev node' \ dom m" + and no0: "no_0 m" + and vd: "valid_dlist m" + and nl: "\ m \ ptr \\<^sup>+ ptr'" + shows "\ m \ mdbNext node \\<^sup>* mdbPrev node'" + by (rule neg_next_rtrancl_nx [OF _ neg_next_trancl_xp]) fact+ + +lemma neg_next_trancl_trancl: + assumes nxt: "m \ a \\<^sup>* a'" + and ab: "\ m \ b \\<^sup>* a'" + and nl: "\ m \ a' \\<^sup>* b" + shows "\ m \ a \\<^sup>+ b" + using nl nxt + apply - + apply (erule contrapos_nn) + apply (erule next_rtrancl_tranclE) + apply simp + apply (erule (1) next_trancl_split_tt [OF _ _ ab]) + done + +declare domE[elim?] + +lemma ndom_is_0D: + "\ mdbNext node \ dom m; mdb_chain_0 m; no_0 m; m ptr = Some (CTE cap node) \ + \ mdbNext node = 0" + apply - + apply (frule (1) mdb_chain_0_nextD) + apply simp + apply (erule next_rtrancl_tranclE) + apply simp + apply (drule tranclD) + apply (clarsimp simp: next_unfold') + done + +end + +(* almost exactly 1000 lines --- yuck. There is a lot of redundancy here, but I doubt it is worth + exploiting above the cut'n'paste already here. + *) + +lemma (in mdb_swap) cteSwap_chain: + "mdb_chain_0 n" +proof - + have chain: "mdb_chain_0 m" using valid .. + + let ?m = "(modify_map + (modify_map + (modify_map + (modify_map (modify_map m (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbNext src_node) (cteMDBNode_update (mdbPrev_update (%_. dest)))) + src (cteMDBNode_update (\m. dest2_node))) + dest (cteMDBNode_update (\m. src_node))) + (mdbPrev dest2_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + let ?n' = "modify_map m src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node))))" + + have [simp]: "src \ dom m" by (rule domI, rule src) + have [simp]: "dest \ dom m" by (rule domI, rule dest) + + have dn: "m \ dest \ mdbNext dest_node" using dest by (rule next_fold, simp) + + have dp: "mdbPrev dest_node \ dom m + \ m \ mdbPrev dest_node \ dest" + proof - + assume "mdbPrev dest_node \ dom m" + hence "mdbPrev dest_node \ 0" using no_0 by - (rule, clarsimp) + thus ?thesis using dest + apply - + apply (clarsimp dest!: dest_prev [where p = "mdbPrev dest_node", simplified]) + apply (erule next_fold) + apply simp + done + qed + + have [simp]: "\ m \ dest \\<^sup>+ dest" + using mdb_chain_0_no_loops [OF chain no_0] + by (simp add: no_loops_trancl_simp) + + have [simp]: "\ m \ src \\<^sup>+ src" + using mdb_chain_0_no_loops [OF chain no_0] + by (simp add: no_loops_trancl_simp) + + have [simp]: "\ m \ mdbNext src_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx, rule src, simp) + + + have sn: "mdbPrev src_node \ dom m + \ m \ mdbPrev src_node \ src" + proof - + assume "mdbPrev src_node \ dom m" + hence "mdbPrev src_node \ 0" using no_0 by - (rule, clarsimp) + thus ?thesis using src + apply - + apply (clarsimp dest!: src_prev [where p = "mdbPrev src_node", simplified]) + apply (erule next_fold) + apply simp + done + qed + + from chain no_0 neq [symmetric] + have "mdb_chain_0 ?m" + proof (cases rule: mdb_chain_0_cases) + case srcdest + + note [simp] = neg_rtrancl_into_trancl [OF srcdest(2)] + note [simp] = srcdest(2) + + have dsneq: "dest \ mdbPrev src_node" + proof + assume "dest = mdbPrev src_node" + hence "m \ dest \\<^sup>* src" + by - (rule r_into_rtrancl, rule next_fold [where m = m, OF dest], simp) + + thus False using srcdest by simp + qed + + from dest have n1 [simp]:"\ m \ mdbNext dest_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) fact+ + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext dest_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using dest chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + from dest src + have n4: "mdbPrev src_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + using neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl] + by auto + + hence n2 [simp]: "\ ?n' \ src \\<^sup>* dest" + using dn src + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + hence n3: "mdbPrev src_node \ dom m + \ \ modify_map ?n' dest (cteMDBNode_update (mdbNext_update (%_. src))) \ dest \\<^sup>* mdbPrev src_node" + using dest dsneq src n1 + by (simp add: modify_map_lhs_rtrancl modify_map_app) (rule n4) + + from srcdest(1) + show ?thesis + proof (cases rule: tranclE2') + case base + hence ds: "dest = mdbNext src_node" by (clarsimp simp: next_unfold' src) + hence d2: "dest2_node = MDB (mdbNext dest_node) dest (mdbRevocable dest_node) (mdbFirstBadged dest_node)" + using dsneq + unfolding dest2_node_def by clarsimp + + let ?m' = "(modify_map + (modify_map ?n' dest (cteMDBNode_update (mdbNext_update (%_. src)))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest))))" + + let ?goal = "mdb_chain_0 ?m'" + { + assume d1: "mdbPrev src_node \ dom m" and d2: "mdbNext dest_node \ dom m" + hence ?goal + apply (intro mdb_chain_0_modify_map_next) + apply (auto simp: no_0 chain n1 n2 n3[OF d1]) + done + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence ?goal + by simp ((rule mdb_chain_0_modify_map_next)+, simp_all add: no_0 chain n1 n2) + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal using chain_n' d1 src dest + apply - + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply (simp_all add: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal using d1 chain_n' + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (simp_all add: no_0 chain n1 n2) + done + } + ultimately have ?goal + apply (cases "mdbPrev src_node \ dom m") + apply (cases "mdbNext dest_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext dest_node \ dom m") + apply auto + done + + thus ?thesis using ds [symmetric] d2 neqs dsneq + apply simp + apply (subst modify_map_addr_com [OF neqs(2)]) + apply (subst modify_map_comp [symmetric]) + apply (subst modify_map_comp [symmetric]) + apply (simp) + apply (simp add: o_def) + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [OF dsneq [symmetric]]) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply assumption + done + next + case (trancl c) + hence dsneq': "dest \ mdbNext src_node" using src + apply - + apply rule + apply simp + apply (drule next_fold) + apply simp + apply (drule (1) next_single_value) + apply simp + done + + hence d2n: "dest2_node = dest_node" + unfolding dest2_node_def + by (cases dest_node, simp add: dsneq) + + from trancl obtain d where dnext: "m \ d \ dest" and ncd: "m \ c \\<^sup>* d" + by (clarsimp dest!: tranclD2) + + have ddest: "d = mdbPrev (cteMDBNode (CTE dest_cap dest_node))" + using dest dlist no_0 dnext + by (rule next_prev) + + hence d2: "mdbPrev dest_node \ dom m" using dnext + by (clarsimp simp: next_unfold') + + have dnz: "mdbPrev dest_node \ 0" + by (rule dom_into_not0 [OF no_0 d2]) + + have n5 [simp]: "\ ?n' \ src \\<^sup>* mdbPrev dest_node" + proof - + have "src \ mdbPrev dest_node" + by (simp add: dsneq' [symmetric]) + hence "?n' \ mdbPrev dest_node \ dest" using dp [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + let ?n2 = "modify_map ?n' (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))" + have chain_n2: "mdb_chain_0 ?n2" + by ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + + have r [simp]: "\ m \ mdbNext dest_node \\<^sup>* mdbPrev dest_node" + by (rule neg_next_rtrancl_np [OF _ _ d2 no_0 dlist], rule dest, rule dest, simp) + + have r3 [simp]: "\ m \ mdbNext dest_node \\<^sup>* src" + by (rule neg_next_rtrancl_nx, rule dest, simp) + + have r4 [simp]: "\ m \ dest \\<^sup>+ mdbPrev dest_node" + by (rule neg_next_trancl_xp [OF _ d2 no_0 dlist], rule dest, simp) + + let ?m'' = + "(modify_map (modify_map + (modify_map ?n' (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node)))))" + + have n2_2 [simp]: + "?n2 \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + apply (cases "mdbNext src_node = mdbPrev dest_node") + apply simp + apply (rule trancl_into_rtrancl) + apply (rule next_modify_map_trancl_last [OF chain_n'], simp add: no_0) + apply (subst modify_map_trancl_other_iff) + apply simp + apply (rule next_trancl_np [OF _ _ dlist no_0]) + apply (rule src, rule dest) + apply (simp add: dsneq' [symmetric]) + apply assumption + apply (rule srcdest(1)) + done + + hence n2_3 [simp]: "\ ?n2 \ mdbNext src_node \\<^sup>+ dest" + proof (rule neg_next_trancl_trancl) + show "\ ?n2 \ dest \\<^sup>* mdbPrev dest_node" + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply (simp add: modify_map_trancl_other_iff) + done + + show "\ ?n2 \ mdbPrev dest_node \\<^sup>* dest" using d2 + by (clarsimp simp: modify_map_lhs_rtrancl modify_map_other dsneq' [symmetric]) + qed + + have r5 [simp]: "mdbPrev src_node \ dom m \ \ m \ dest \\<^sup>+ mdbPrev src_node" + by (rule neg_next_trancl_xp [OF _ _ no_0 dlist], rule src, simp_all) + + have n2_4 [simp]: + "mdbPrev src_node \ dom m \ \ ?n2 \ dest \\<^sup>* mdbPrev src_node" + apply - + apply (rule neg_rtranclI [OF dsneq]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp_all add: modify_map_trancl_other_iff) + done + + let ?goal = "mdb_chain_0 ?m''" + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + + have r2 [simp]: "\ m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + using dest src + by (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl]) fact+ + + have ?goal + proof ((rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + + have n2_1: + "\ ?n2 \ mdbPrev dest_node \\<^sup>* mdbPrev src_node" using d2 dsneq' [symmetric] + apply - + apply (erule domE) + apply (subst modify_map_lhs_rtrancl) + apply (clarsimp simp: modify_map_other) + apply simp + apply simp + apply (simp add: dom_into_not0 [OF no_0 d2]) + apply (subst modify_map_lhs_rtrancl, rule src) + apply simp + apply (simp) + done + + have "\ ?n' \ mdbPrev src_node \\<^sup>+ mdbPrev dest_node" + apply (rule neg_next_rtrancl_trancl [where y = src]) + apply (subst modify_map_lhs_rtrancl) + apply (rule src) + apply simp + apply (simp add: dsneq' [symmetric]) + apply (subst next_modify_map_last) + apply simp + apply (rule sn [OF d1]) + done + hence "mdbPrev src_node \ 0 \ \ ?n2 \ mdbPrev src_node \\<^sup>* mdbPrev dest_node" + apply - + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply assumption + done + moreover from no_0 have "mdbPrev src_node \ 0" using d1 by auto + ultimately show + "\ modify_map ?n2 (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest))) \ mdbNext src_node \\<^sup>* dest" using n2_1 + apply - + apply (rule neg_rtranclI) + apply (simp add: dsneq' [symmetric]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply simp + apply (rule neg_next_trancl_trancl [OF n2_2]) + apply auto + done + qed fact+ + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + + have ?goal + proof (simp add: d1, (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1) + show "\ ?n2 \ mdbNext src_node \\<^sup>* dest" + by (rule neg_rtranclI [OF _ n2_3], simp add: dsneq' [symmetric]) + qed fact+ + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal + by (simp add: m0, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } moreover + { + assume d1: "mdbPrev src_node \ dom m" and d3: "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal + by (simp add: m0 d1, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } ultimately have ?goal + apply (cases "mdbPrev src_node \ dom m") + apply (cases "mdbNext src_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext src_node \ dom m") + apply auto + done + + thus ?thesis using no_0 d2n + apply simp + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src]) + apply (simp add: dsneq' [symmetric]) + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = dest], simp add: dsneq [symmetric] dsneq')+ + apply (subst modify_map_addr_com [where y = "mdbPrev src_node"], simp add: dsneq) + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"], simp add: dsneq dnz)+ + apply (subst modify_map_addr_com [where y = src], simp add: dsneq dsneq' [symmetric] dnz)+ + apply assumption + done + qed + next + case destsrc (* Dual of srcdest *) + + let ?n' = "modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))" + + note [simp] = neg_rtrancl_into_trancl [OF destsrc(2)] + note [simp] = destsrc(2) + + have dsneq: "src \ mdbPrev dest_node" + proof + assume "src = mdbPrev dest_node" + hence "m \ src \\<^sup>* dest" + by - (rule r_into_rtrancl, rule next_fold [where m = m, OF src], simp) + + thus False using destsrc by simp + qed + + from src have n1 [simp]:"\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) fact+ + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext src_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using src chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + from src dest + have n4: "mdbPrev dest_node \ dom m \ \ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl] + by auto + + hence n2 [simp]: "\ ?n' \ dest \\<^sup>* src" + using sn dest + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + hence n3: "mdbPrev dest_node \ dom m + \ \ modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. dest))) \ src \\<^sup>* mdbPrev dest_node" + using dest dsneq src n1 + by (simp add: modify_map_lhs_rtrancl modify_map_app) (rule n4) + + from destsrc(1) + show ?thesis + proof (cases rule: tranclE2') + case base + hence ds: "src = mdbNext dest_node" by (clarsimp simp: next_unfold' dest) + hence d2: "dest2_node = MDB dest (mdbPrev dest_node) (mdbRevocable dest_node) (mdbFirstBadged dest_node)" + using dsneq + unfolding dest2_node_def by simp + + let ?m' = "(modify_map + (modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + let ?goal = "mdb_chain_0 ?m'" + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence ?goal + apply (intro mdb_chain_0_modify_map_next) + apply (auto simp: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence ?goal + by simp ((rule mdb_chain_0_modify_map_next)+, simp_all add: no_0 chain n1 n2) + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal using chain_n' d1 src dest + apply - + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply (simp_all add: no_0 chain n1 n2 n3 [OF d1]) + done + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and "mdbNext src_node \ dom m" + hence m0: "mdbNext src_node = 0" + by (clarsimp dest!: src_next [where p = "mdbNext src_node", simplified]) + + have ?goal using d1 chain_n' + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (simp_all add: no_0 chain n1 n2) + done + } + ultimately have ?goal + apply (cases "mdbPrev dest_node \ dom m") + apply (cases "mdbNext src_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext src_node \ dom m") + apply auto + done + thus ?thesis using ds [symmetric] d2 neqs dsneq + apply simp + apply (subst modify_map_addr_com [where x = "mdbNext src_node"], simp)+ + apply (subst modify_map_addr_com [OF neqs(1)]) + apply (subst modify_map_comp [symmetric]) + apply (simp) + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where x = src]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (subst modify_map_addr_com [where x = dest], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply (subst modify_map_addr_com [where y = dest], simp)+ + apply assumption + done + next + case (trancl c) + hence dsneq': "src \ mdbNext dest_node" using dest + apply - + apply rule + apply simp + apply (drule next_fold) + apply simp + apply (drule (1) next_single_value) + apply simp + done + + hence d2n: "dest2_node = dest_node" + unfolding dest2_node_def using dsneq + by simp + + from trancl obtain d where dnext: "m \ d \ src" and ncd: "m \ c \\<^sup>* d" + by (clarsimp dest!: tranclD2) + + have ddest: "d = mdbPrev (cteMDBNode (CTE src_cap src_node))" + using src dlist no_0 dnext + by (rule next_prev) + + hence d2: "mdbPrev src_node \ dom m" using dnext + by (clarsimp simp: next_unfold') + + have dnz: "mdbPrev src_node \ 0" + by (rule dom_into_not0 [OF no_0 d2]) + + have n5 [simp]: "\ ?n' \ dest \\<^sup>* mdbPrev src_node" + proof - + have "dest \ mdbPrev src_node" + by (simp add: dsneq' [simplified, symmetric]) + hence "?n' \ mdbPrev src_node \ src" using sn [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + let ?n2 = "modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))" + have chain_n2: "mdb_chain_0 ?n2" + by ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + + have r [simp]: "\ m \ mdbNext src_node \\<^sup>* mdbPrev src_node" + by (rule neg_next_rtrancl_np [OF _ _ d2 no_0 dlist], rule src, rule src, simp) + + have r3 [simp]: "\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule src, simp) + + have r5 [simp]: "\ m \ mdbNext dest_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule dest, simp) + + have r4 [simp]: "\ m \ src \\<^sup>+ mdbPrev src_node" + by (rule neg_next_trancl_xp [OF _ d2 no_0 dlist], rule src, simp) + + let ?m'' = + "(modify_map (modify_map + (modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src)))) + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))))" + + have n2_2 [simp]: + "?n2 \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + apply (cases "mdbNext dest_node = mdbPrev src_node") + apply simp + apply (rule trancl_into_rtrancl) + apply (rule next_modify_map_trancl_last [OF chain_n'], simp add: no_0) + apply (subst modify_map_trancl_other_iff) + apply simp + apply (rule next_trancl_np [OF _ _ dlist no_0]) + apply (rule dest, rule src) + apply (simp add: dsneq' [simplified]) + apply assumption + apply (rule destsrc(1)) + done + + hence n2_3 [simp]: "\ ?n2 \ mdbNext dest_node \\<^sup>+ src" + proof (rule neg_next_trancl_trancl) + show "\ ?n2 \ src \\<^sup>* mdbPrev src_node" + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply (simp add: modify_map_trancl_other_iff) + done + + show "\ ?n2 \ mdbPrev src_node \\<^sup>* src" using d2 + by (clarsimp simp: modify_map_lhs_rtrancl modify_map_other dsneq' [simplified, symmetric]) + qed + + have r6 [simp]: "mdbPrev dest_node \ dom m \ \ m \ src \\<^sup>+ mdbPrev dest_node" + by (rule neg_next_trancl_xp [OF _ _ no_0 dlist], rule dest, simp_all) + + have n2_4 [simp]: + "mdbPrev dest_node \ dom m \ \ ?n2 \ src \\<^sup>* mdbPrev dest_node" + apply - + apply (rule neg_rtranclI [OF dsneq]) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp_all add: modify_map_trancl_other_iff) + done + + let ?goal = "mdb_chain_0 ?m''" + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + + have r2 [simp]: "\ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using src dest + by (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist neg_rtrancl_into_trancl]) fact+ + + have ?goal + proof ((rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + + have n2_1: + "\ ?n2 \ mdbPrev src_node \\<^sup>* mdbPrev dest_node" using d2 dsneq' [symmetric] + apply - + apply (erule domE) + apply (subst modify_map_lhs_rtrancl) + apply (clarsimp simp: modify_map_other) + apply simp + apply simp + apply (simp add: dom_into_not0 [OF no_0 d2]) + apply (subst modify_map_lhs_rtrancl, rule dest) + apply simp + apply (simp) + done + have "\ ?n' \ mdbPrev dest_node \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_rtrancl_trancl [where y = dest]) + apply (subst modify_map_lhs_rtrancl) + apply (rule dest) + apply simp + apply (simp add: dsneq' [simplified]) + apply (subst next_modify_map_last) + apply simp + apply (rule dp [OF d1]) + done + hence "mdbPrev dest_node \ 0 \ \ ?n2 \ mdbPrev dest_node \\<^sup>* mdbPrev src_node" + apply - + apply (rule neg_rtranclI) + apply simp + apply (subst next_modify_map_trancl_last_iff [OF chain_n' chain_n2]) + apply (simp add: no_0) + apply assumption + done + moreover from no_0 have "mdbPrev dest_node \ 0" using d1 by auto + ultimately show + "\ modify_map ?n2 (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))) \ mdbNext dest_node \\<^sup>* src" using n2_1 dsneq' [symmetric] + apply - + apply (rule neg_rtranclI) + apply (simp) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply simp + apply (rule neg_next_trancl_trancl [OF n2_2]) + apply auto + done + qed fact+ + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + + have ?goal + proof (simp add: d1, (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1) + show "\ ?n2 \ mdbNext dest_node \\<^sup>* src" + by (rule neg_rtranclI [OF _ n2_3], simp add: dsneq' [simplified]) + qed fact+ + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal + by (simp add: m0, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } moreover + { + assume d1: "mdbPrev dest_node \ dom m" and d3: "mdbNext dest_node \ dom m" + hence m0: "mdbNext dest_node = 0" + by (clarsimp dest!: dest_next [where p = "mdbNext dest_node", simplified]) + + have ?goal + by (simp add: m0 d1, + (rule chain_n' | rule chain_n2 | rule mdb_chain_0_modify_map_0 | rule mdb_chain_0_modify_map_next)+, + simp_all add: no_0 chain n1 d1) + } ultimately have ?goal + apply (cases "mdbPrev dest_node \ dom m") + apply (cases "mdbNext dest_node \ dom m") + apply (auto)[2] + apply (cases "mdbNext dest_node \ dom m") + apply auto + done + thus ?thesis using no_0 d2n dsneq dsneq' + apply simp + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"]) + apply simp + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = src], simp)+ + apply (subst modify_map_addr_com [where y = "mdbPrev dest_node"], simp add: dnz)+ + apply (subst modify_map_addr_com [where y = "mdbPrev src_node"], simp add: dnz)+ + apply (subst modify_map_addr_com [where y = dest], simp add: dnz)+ + apply assumption + done + qed + next + case indep + + have indep_rt1: "\ m \ src \\<^sup>* dest" + by (rule neg_rtranclI, simp) fact+ + + have indep_rt2: "\ m \ dest \\<^sup>* src" + by (rule neg_rtranclI, simp) fact+ + + have dsneq: "src \ mdbPrev dest_node" + proof + assume "src = mdbPrev dest_node" + hence "m \ src \\<^sup>+ dest" + by - (rule r_into_trancl, rule next_fold [where m = m, OF src], simp) + + thus False using indep by simp + qed + + note [simp] = dsneq [simplified] + + have sdneq: "dest \ mdbPrev src_node" + proof + assume "dest = mdbPrev src_node" + hence "m \ dest \\<^sup>+ src" + by - (rule r_into_trancl, rule next_fold [where m = m, OF dest], simp) + + thus False using indep by simp + qed + + note [simp] = sdneq [simplified] + + have dsneq' [simp]: "dest \ mdbNext src_node" + proof + assume "dest = mdbNext src_node" + hence "m \ src \\<^sup>+ dest" + apply - + apply (rule r_into_trancl) + apply (rule next_fold) + apply (rule src) + apply simp + done + thus False using indep by simp + qed + + have dsnp: "mdbPrev src_node \ dom m \ mdbNext dest_node \ mdbPrev src_node" + proof + assume "mdbPrev src_node \ dom m" and "mdbNext dest_node = mdbPrev src_node" + hence "m \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + by simp + moreover have "m \ dest \ mdbNext dest_node" using dest by (rule next_fold, simp) + moreover have "m \ mdbPrev src_node \ src" by (rule sn) fact+ + ultimately have "m \ dest \\<^sup>+ src" by auto + thus False using indep by simp + qed + + have d2n: "dest2_node = dest_node" + unfolding dest2_node_def by (cases dest_node, simp) + + let ?n' = "modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))" + + let ?n2 = "modify_map ?n' (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))" + + from src have n1 [simp]:"\ m \ mdbNext src_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx [OF _ neg_rtrancl_into_trancl]) (rule indep_rt1) + + have chain_n': "mdb_chain_0 ?n'" + proof (cases "mdbNext src_node \ dom m") + case True + thus ?thesis using n1 + by (rule mdb_chain_0_modify_map_next [OF chain no_0]) + next + case False + thus ?thesis using src chain no_0 + by - (drule (3) ndom_is_0D, simp, erule (1) mdb_chain_0_modify_map_0) + qed + + have chain_n2: "mdb_chain_0 ?n2" + apply (cases "mdbPrev src_node \ dom m") + apply ((rule chain_n' | rule mdb_chain_0_modify_map_next)+, simp_all add: no_0) + apply (subst modify_map_lhs_rtrancl) + apply (rule dest) + apply simp + apply (simp add: sdneq [symmetric]) + apply (rule neg_next_rtrancl_np [OF _ _ _ no_0 dlist]) + apply (rule src, rule src) + apply assumption + apply simp + apply (rule chain_n') + done + + let ?m' = "(modify_map + (modify_map ?n2 + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node))))) + (mdbPrev dest_node) (cteMDBNode_update (mdbNext_update (%_. src))))" + + have r1 [simp]: "mdbPrev src_node \ dom m \ \ m \ src \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_trancl_xp) + apply (rule src, assumption, rule no_0, rule dlist) + apply simp + done + + have r [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ src \\<^sup>+ mdbPrev src_node" + by (simp add: modify_map_trancl_other_iff [OF indep_rt1]) + + have r2 [simp]: "mdbPrev dest_node \ dom m \ \ m \ mdbNext src_node \\<^sup>* mdbPrev dest_node" + using src dest indep neg_next_rtrancl_np [OF _ _ _ no_0 dlist] + by auto + + have n2 [simp]: "\ ?n' \ dest \\<^sup>* src" + using sn dest + by (auto dest: rtrancl_into_trancl2 simp: modify_map_lhs_rtrancl) + + have n5 [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ dest \\<^sup>* mdbPrev src_node" + proof - + assume d2: "mdbPrev src_node \ dom m" + have "?n' \ mdbPrev src_node \ src" using sn [OF d2] + by (clarsimp simp: next_unfold' modify_map_other) + thus ?thesis using n2 + by - (erule contrapos_nn, erule (1) rtrancl_into_rtrancl) + qed + + have r4 [simp]: "mdbPrev src_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>+ mdbPrev src_node" + apply (rule neg_next_trancl_np [OF _ _ _ no_0 dlist]) + apply (rule dest) + apply (rule src) + apply assumption + apply (rule indep(2)) + done + + have r5 [simp]: "\ m \ mdbNext dest_node \\<^sup>* dest" + by (rule neg_next_rtrancl_nx, rule dest, simp) + have r6 [simp]: " \ m \ mdbNext dest_node \\<^sup>+ src" + by (rule neg_next_trancl_nx, rule dest, rule indep(2)) + have r7 [simp]: " mdbPrev dest_node \ dom m \ \ m \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + apply (rule neg_next_trancl_np [OF _ _ _ no_0 dlist]) + apply (rule dest) + apply (rule dest) + apply assumption + apply simp + done + + have n6 [simp]: "\ ?n' \ mdbNext dest_node \\<^sup>+ src" + by (subst modify_map_trancl_other_iff) simp_all + + have n6_r [simp]: "\ ?n' \ mdbNext dest_node \\<^sup>* src" + by (rule neg_rtranclI) (simp_all add: sdneq [symmetric]) + + have n2_3 [simp]: "mdbPrev src_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>+ src" + apply (subst modify_map_trancl_other_iff) + apply (rule neg_rtranclI) + apply (simp add: dsnp) + apply (subst modify_map_trancl_other_iff) + apply (rule neg_next_rtrancl_nx) + apply (rule dest) + apply simp_all + done + + have n7 [simp]: "mdbPrev src_node \ dom m \ \ ?n' \ mdbNext dest_node \\<^sup>* mdbPrev src_node" + apply (rule neg_rtranclI) + apply (erule dsnp) + apply (subst modify_map_trancl_other_iff) + apply simp_all + done + + have n8 [simp]: "mdbPrev dest_node \ dom m + \ \ ?n' \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + by (simp add: modify_map_trancl_other_iff) + + have n2_5 [simp]: "mdbPrev dest_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>+ mdbPrev dest_node" + by (cases "mdbPrev src_node \ dom m", simp_all add: modify_map_trancl_other_iff) + + have n2_4 [simp]: "mdbPrev dest_node \ dom m \ \ ?n2 \ mdbNext dest_node \\<^sup>* mdbPrev dest_node" + apply (frule dom_into_not0 [OF no_0]) + apply (cases "mdbPrev src_node \ dom m") + apply (rule neg_rtranclI) + apply (drule dom_into_not0 [OF no_0]) + apply simp + apply simp + apply simp + apply (rule neg_rtranclI) + apply simp + apply simp + done + + have n9 [simp]: "mdbPrev dest_node \ dom m \ + \ modify_map ?n' src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))) \ src \\<^sup>* mdbPrev dest_node" + apply (subst modify_map_lhs_rtrancl) + apply (simp add: src modify_map_other) + apply simp + apply simp + apply (rule neg_rtranclI) + apply (drule dom_into_not0 [OF no_0]) + apply simp + apply simp + done + + have chain_n3: "mdbPrev src_node \ dom m \ mdb_chain_0 + (modify_map + (modify_map (modify_map m dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node))))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) + src (cteMDBNode_update (mdbNext_update (%_. (mdbNext dest_node)))))" + apply - + apply (cases "mdbNext dest_node \ dom m") + apply (rule mdb_chain_0_modify_map_next [OF chain_n2]) + apply (simp add: no_0) + apply simp + apply (rule neg_rtranclI) + apply (simp add: sdneq [symmetric]) + apply simp + apply (frule ndom_is_0D [OF _ chain no_0]) + apply (rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_0 [OF chain_n2]) + apply (simp_all add: no_0) + done + + have "mdb_chain_0 ?m'" + proof (cases rule: cases2 [of "mdbPrev src_node \ dom m" "mdbPrev dest_node \ dom m"]) + case pos_pos + + thus ?thesis + apply - + apply (rule mdb_chain_0_modify_map_next [OF chain_n3]) + apply (simp_all add: no_0) + apply (subst modify_map_lhs_rtrancl) + apply (simp add: modify_map_other src) + apply simp + apply (rule neg_rtranclI) + apply (simp add: sdneq [symmetric]) + apply simp + apply simp + done + next + case pos_neg + thus ?thesis + by simp (rule chain_n3) + next + case neg_pos + thus ?thesis using no_0 + apply - + apply simp + apply (cases "mdbNext dest_node \ dom m") + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply simp_all + apply (drule ndom_is_0D [OF _ chain no_0], rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_0 [OF chain_n']) + apply simp_all + apply (subst modify_map_lhs_rtrancl) + apply (simp add: modify_map_other src) + apply simp_all + apply (rule no_0_no_0_lhs_rtrancl) + apply simp + apply (erule (1) dom_into_not0) + done + next + case neg_neg + thus ?thesis using no_0 + apply - + apply (cases "mdbNext dest_node \ dom m") + apply simp + apply (rule mdb_chain_0_modify_map_next [OF chain_n']) + apply simp + apply simp + apply simp + apply (drule ndom_is_0D [OF _ chain no_0], rule dest) + apply simp + apply (rule mdb_chain_0_modify_map_0 [OF chain_n']) + apply simp + done + qed + + thus ?thesis using d2n + apply simp + apply (subst modify_map_addr_com [where x = dest], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [where x = src], simp)+ + apply (rule mdb_chain_0_modify_map_replace) + apply simp + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [where y = dest], simp add: sdneq [symmetric])+ + apply (subst modify_map_addr_com [where y = src], simp) + apply assumption + done + qed + thus ?thesis + unfolding n_def n'_def + apply (simp add: const_def) + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_com [where g = "cteCap_update (%_. scap)"], case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply (subst modify_map_com [where g = "cteCap_update (%_. dcap)"], case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply simp_all + done +qed + +lemma (in mdb_swap) next_m_n2: + "n \ p \ p' = m \ s_d_swp p \ s_d_swp p'" + by (simp add: next_m_n) + +lemma (in mdb_swap) n_src [simp]: + "n src = Some (CTE dcap dest2_node)" + unfolding n_def n'_def + apply (simp) + apply (subst modify_map_same | subst modify_map_other, simp add: dest2_node_def)+ + apply (simp add: src) + done + +lemma (in mdb_swap) swap_cases [case_names src_dest dest_src other]: + assumes src_dest: + "\mdbNext src_node = dest; mdbPrev dest_node = src; mdbNext dest_node \ src; mdbPrev src_node \ dest\ \ P" + and dest_src: + "\mdbNext dest_node = src; mdbPrev src_node = dest; mdbNext src_node \ dest; mdbPrev dest_node \ src\ \ P" + and other: + "\mdbNext src_node \ dest; mdbPrev dest_node \ src; mdbNext dest_node \ src; mdbPrev src_node \ dest \ \ P" + shows "P" +proof (cases "mdbNext src_node = dest") + case True + thus ?thesis + proof (rule src_dest) + from True show "mdbPrev dest_node = src" + by simp + show "mdbNext dest_node \ src" + proof + assume "mdbNext dest_node = src" + hence "m \ dest \ src" using dest + by - (rule next_fold, simp+) + moreover have "m \ src \ dest" using src True + by - (rule next_fold, simp+) + finally show False by simp + qed + show "mdbPrev src_node \ dest" + proof + assume "mdbPrev src_node = dest" + hence "mdbNext dest_node = src" using src + by (clarsimp elim: dlistEp) + hence "m \ dest \ src" using dest + by - (rule next_fold, simp+) + moreover have "m \ src \ dest" using src True + by - (rule next_fold, simp+) + finally show False by simp + qed + qed +next + case False + + note firstFalse = False + + show ?thesis + proof (cases "mdbNext dest_node = src") + case True + thus ?thesis + proof (rule dest_src) + from True show "mdbPrev src_node = dest" by simp + show "mdbPrev dest_node \ src" + proof + assume "mdbPrev dest_node = src" + hence "mdbNext src_node = dest" using dest + by (clarsimp elim: dlistEp) + hence "m \ src \ dest" using src + by - (rule next_fold, simp+) + moreover have "m \ dest \ src" using dest True + by - (rule next_fold, simp+) + finally show False by simp + qed + qed fact+ + next + case False + from firstFalse show ?thesis + proof (rule other) + show "mdbPrev dest_node \ src" and "mdbPrev src_node \ dest" using False firstFalse + by simp+ + qed fact+ + qed +qed + +lemma (in mdb_swap) src_prev_next [intro?]: + "mdbPrev src_node \ 0 \ m \ mdbPrev src_node \ src" + using src + apply - + apply (erule dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + +lemma (in mdb_swap) dest_prev_next [intro?]: + "mdbPrev dest_node \ 0 \ m \ mdbPrev dest_node \ dest" + using dest + apply - + apply (erule dlistEp) + apply simp + apply (rule next_fold) + apply simp + apply simp + done + +lemma (in mdb_swap) n_dest: + "n dest = Some (CTE scap (MDB (if mdbNext src_node = dest then src else mdbNext src_node) (if mdbPrev src_node = dest then src else mdbPrev src_node) (mdbRevocable src_node) (mdbFirstBadged src_node)))" + unfolding n_def n'_def using dest p_0 + apply (simp only: dest2_next dest2_prev) + apply (cases "mdbPrev src_node = dest") + apply (subgoal_tac "dest \ mdbNext src_node") + apply (simp add: modify_map_same modify_map_other) + apply (cases src_node, simp) + apply clarsimp + apply (cases "mdbNext src_node = dest") + apply (simp add: modify_map_same modify_map_other) + apply (cases src_node, simp) + apply (simp add: modify_map_same modify_map_other) + done + +lemma (in mdb_swap) n_dest_prev: + assumes md: "m (mdbPrev dest_node) = Some cte" + shows "\cte'. n (mdbPrev dest_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if dest = mdbNext src_node then mdbNext dest_node else src) + \ mdbPrev (cteMDBNode cte') = + (if (mdbNext src_node = mdbPrev dest_node \ dest = mdbNext src_node) then dest else + mdbPrev (cteMDBNode cte))" +proof - + have nz: "(mdbPrev dest_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "dest = mdbNext src_node" "mdbNext src_node = mdbPrev dest_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + thus ?thesis using nz md + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + + hence "(mdbPrev dest_node) = src" by simp + thus ?thesis using pos_neg md p_0 + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (simp add: modify_map_same modify_map_other del: dest2_parts ) + apply (simp only: next_unfold' dest2_next dest2_prev) + apply (subst if_not_P) + apply simp+ + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + qed +qed + +(* Dual of above *) +lemma (in mdb_swap) n_dest_next: + assumes md: "m (mdbNext dest_node) = Some cte" + shows "\cte'. n (mdbNext dest_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if (src = mdbNext dest_node \ mdbNext dest_node = mdbPrev src_node) then dest else mdbNext (cteMDBNode cte)) + \ mdbPrev (cteMDBNode cte') = (if src = mdbNext dest_node then mdbPrev dest_node else src)" +proof - + have nz: "(mdbNext dest_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "src = mdbNext dest_node" "mdbNext dest_node = mdbPrev src_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + hence "(mdbPrev src_node) \ dest" + by - (rule, simp add: next_dest_prev_src_sym) + thus ?thesis using nz md neg_pos + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + hence pd: "mdbPrev src_node = dest" by simp + + have "mdbNext src_node \ dest" + proof + assume a: "mdbNext src_node = dest" + from pd have "mdbPrev src_node \ 0" by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" using src next_fold a + by auto + finally show False using pd by simp + qed + thus ?thesis using md p_0 pd pos_neg nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (simp add: modify_map_same modify_map_other del: dest2_parts ) + apply (simp only: dest2_next dest2_prev) + apply (subst if_P [OF refl]) + apply simp+ + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + qed +qed + +lemma (in mdb_swap) n_src_prev: + assumes md: "m (mdbPrev src_node) = Some cte" + shows "\cte'. n (mdbPrev src_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if src = mdbNext dest_node then mdbNext src_node else dest) + \ mdbPrev (cteMDBNode cte') = + (if (mdbNext dest_node = mdbPrev src_node \ src = mdbNext dest_node) then src else + mdbPrev (cteMDBNode cte))" +proof - + have nz: "(mdbPrev src_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "dest = mdbNext src_node" "mdbNext src_node = mdbPrev dest_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + thus ?thesis using nz md + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other) + done + next + case pos_neg + + hence "(mdbPrev dest_node) = src" by simp + thus ?thesis using pos_neg md p_0 + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + apply (clarsimp simp add: modify_map_same modify_map_other del: dest2_parts ) + done + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + qed +qed + +(* Dual of above *) +lemma (in mdb_swap) n_src_next: + assumes md: "m (mdbNext src_node) = Some cte" + shows "\cte'. n (mdbNext src_node) = Some cte' + \ mdbNext (cteMDBNode cte') = (if (dest = mdbNext src_node \ mdbNext src_node = mdbPrev dest_node) then src else mdbNext (cteMDBNode cte)) + \ mdbPrev (cteMDBNode cte') = (if dest = mdbNext src_node then mdbPrev src_node else dest)" +proof - + have nz: "(mdbNext src_node) \ 0" using md + by (rule dom_into_not0 [OF no_0 domI]) + + show ?thesis + proof (cases rule: cases2 [of "src = mdbNext dest_node" "mdbNext dest_node = mdbPrev src_node"]) + case pos_pos thus ?thesis by simp + next + case neg_pos + hence "(mdbPrev src_node) \ dest" + by - (rule, simp add: next_dest_prev_src_sym) + thus ?thesis using nz md neg_pos + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + next + case pos_neg + hence pd: "mdbPrev src_node = dest" by simp + + have "mdbNext src_node \ dest" + proof + assume a: "mdbNext src_node = dest" + from pd have "mdbPrev src_node \ 0" by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" using src using a next_fold by auto + finally show False using pd by simp + qed + thus ?thesis using md p_0 pd pos_neg nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other del: dest2_parts ) + next + case neg_neg + thus ?thesis using md nz + unfolding n_def n'_def + apply (simp only: dest2_next dest2_prev) + by (clarsimp simp add: modify_map_same modify_map_other) + qed +qed + +lemma (in mdb_swap) dest2_node_next: + "mdbNext dest2_node = (if dest = mdbPrev src_node then dest else mdbNext dest_node)" + unfolding dest2_node_def + by simp + +lemma (in mdb_swap) dest2_node_prev: + "mdbPrev dest2_node = (if dest = mdbNext src_node then dest else mdbPrev dest_node)" + unfolding dest2_node_def + by simp + +lemma (in mdb_swap) n_other: + assumes other: "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" + shows "n p = m p" + using other + unfolding n_def n'_def + by (simp add: modify_map_other dest2_node_next dest2_node_prev) + +lemma (in mdb_swap) dom_n_m: + "dom n = dom m" + unfolding n_def n'_def by simp + +lemma (in mdb_swap) other_src_next_dest_src: + fixes cte + defines "p \ mdbNext (cteMDBNode cte)" + assumes dest_src: "mdbNext dest_node = src" + and ps: "m (mdbNext src_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + have sn: "m \ src \ mdbNext src_node" .. + also have pn: "m \ mdbNext src_node \ p" using ps + by (simp add: next_unfold' p_def) + finally have sp [intro?]: "m \ src \\<^sup>+ p" . + + have "m \ dest \ mdbNext dest_node" .. + also have "mdbNext dest_node = src" by fact+ + finally have ds [intro?]: "m \ dest \ src" . + + show "p \ mdbPrev src_node" + proof + assume a: "p = mdbPrev src_node" + hence "mdbPrev src_node \ 0" using p0 by simp + hence "m \ mdbPrev src_node \ src" .. + hence "m \ p \ src" using a by simp + thus False using sp by - (drule (1) trancl_into_trancl2, simp) + qed + + show "p \ src" + proof + assume "p = src" + also have "m \ src \ mdbNext src_node" .. + also have "m \ mdbNext src_node \ p" by (rule pn) + finally show False by simp + qed + + show "p \ mdbNext src_node" using pn + by clarsimp + + show "p \ mdbPrev dest_node" + proof + assume a: "p = mdbPrev dest_node" + hence "mdbPrev dest_node \ 0" using p0 by simp + hence "m \ mdbPrev dest_node \ dest" .. + also have "m \ dest \ src" .. + also have "m \ src \\<^sup>+ p" .. + finally show False using a by simp + qed + + show "p \ dest" + proof + assume "p = dest" + also have "m \ dest \ src" .. + also have "m \ src \\<^sup>+ p" .. + finally show False by simp + qed + + show "p \ mdbNext dest_node" + proof + assume "p = mdbNext dest_node" + also have "mdbNext dest_node = src" by fact+ + also have "m \ src \\<^sup>+ p" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_src_prev_src_dest: + fixes cte + defines "p \ mdbPrev (cteMDBNode cte)" + assumes src_dest: "mdbNext src_node = dest" + and ps: "m (mdbPrev src_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + note really_annoying_simps [simp del] = word_neq_0_conv + + have pp: "m \ p \ mdbPrev src_node" + using p0 ps unfolding p_def + by (cases cte, simp) (erule (1) prev_leadstoI [OF _ _ dlist]) + also have "mdbPrev src_node \ 0" using ps no_0 + by (rule no_0_neq) + hence "m \ mdbPrev src_node \ src" .. + finally have ps' [intro?]: "m \ p \\<^sup>+ src" . + + from src_dest src have sd [intro?]: "m \ src \ dest" + by (simp add: next_unfold') + + from ps' sd have pd [intro?]: "m \ p \\<^sup>+ dest" .. + + show "p \ mdbPrev src_node" using pp + by clarsimp + + show "p \ src" using ps' by clarsimp + + show "p \ mdbNext src_node" + proof + assume a: "p = mdbNext src_node" + also have "m \ src \ mdbNext src_node" .. + also have "m \ p \\<^sup>+ src" .. + finally show False by simp + qed + + from src_dest have "mdbPrev dest_node = src" by simp + hence "mdbPrev dest_node \ 0" using mdb_ptr_src.p_0 + by (rule ssubst) + thus "p \ mdbPrev dest_node" + unfolding p_def using ps src_dest + by (cases cte, auto simp add: p_prev_qe) + + show "p \ dest" + proof + assume "p = dest" + hence "dest = p" .. + also have "m \ p \\<^sup>+ src" .. + also have "m \ src \ dest" .. + finally show False by simp + qed + + show "p \ mdbNext dest_node" + proof + assume "p = mdbNext dest_node" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ p \\<^sup>+ src" .. + also have "m \ src \ dest" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_dest_next_src_dest: + fixes cte + defines "p \ mdbNext (cteMDBNode cte)" + assumes src_dest: "mdbNext src_node = dest" + and ps: "m (mdbNext dest_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + have sn: "m \ dest \ mdbNext dest_node" .. + also have pn: "m \ mdbNext dest_node \ p" using ps + by (simp add: next_unfold' p_def) + finally have sp [intro?]: "m \ dest \\<^sup>+ p" . + + have "m \ src \ mdbNext src_node" .. + also have "mdbNext src_node = dest" by fact+ + finally have ds [intro?]: "m \ src \ dest" . + + show "p \ mdbPrev dest_node" + proof + assume a: "p = mdbPrev dest_node" + hence "mdbPrev dest_node \ 0" using p0 by simp + hence "m \ mdbPrev dest_node \ dest" .. + hence "m \ p \ dest" using a by simp + thus False using sp by - (drule (1) trancl_into_trancl2, simp) + qed + + show "p \ dest" + proof + assume "p = dest" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ mdbNext dest_node \ p" by (rule pn) + finally show False by simp + qed + + show "p \ mdbNext dest_node" using pn + by clarsimp + + show "p \ mdbPrev src_node" + proof + assume a: "p = mdbPrev src_node" + hence "mdbPrev src_node \ 0" using p0 by simp + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \ dest" .. + also have "m \ dest \\<^sup>+ p" .. + finally show False using a by simp + qed + + show "p \ src" + proof + assume "p = src" + also have "m \ src \ dest" .. + also have "m \ dest \\<^sup>+ p" .. + finally show False by simp + qed + + show "p \ mdbNext src_node" + proof + assume "p = mdbNext src_node" + also have "mdbNext src_node = dest" by fact+ + also have "m \ dest \\<^sup>+ p" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) other_dest_prev_dest_src: + fixes cte + defines "p \ mdbPrev (cteMDBNode cte)" + assumes dest_src: "mdbNext dest_node = src" + and ps: "m (mdbPrev dest_node) = Some cte" + and p0: "p \ 0" + shows "p \ mdbPrev src_node" "p \ src" "p \ mdbNext src_node" + "p \ mdbPrev dest_node" "p \ dest" "p \ mdbNext dest_node" +proof - + note really_annoying_simps [simp del] = word_neq_0_conv + + have pp: "m \ p \ mdbPrev dest_node" + using p0 ps unfolding p_def + by (cases cte, simp) (erule (1) prev_leadstoI [OF _ _ dlist]) + also have "mdbPrev dest_node \ 0" using ps no_0 + by (rule no_0_neq) + hence "m \ mdbPrev dest_node \ dest" .. + finally have ps' [intro?]: "m \ p \\<^sup>+ dest" . + + from dest_src dest have sd [intro?]: "m \ dest \ src" + by (simp add: next_unfold') + + from ps' sd have pd [intro?]: "m \ p \\<^sup>+ src" .. + + show "p \ mdbPrev dest_node" using pp + by clarsimp + + show "p \ dest" using ps' by clarsimp + + show "p \ mdbNext dest_node" + proof + assume a: "p = mdbNext dest_node" + also have "m \ dest \ mdbNext dest_node" .. + also have "m \ p \\<^sup>+ dest" .. + finally show False by simp + qed + + from dest_src have "mdbPrev src_node = dest" by simp + hence s0: "mdbPrev src_node \ 0" using p_0 + by (rule ssubst) + have sn: "mdbNext src_node \ dest" using dest_src + by (clarsimp simp: s0) + show "p \ mdbPrev src_node" + unfolding p_def using ps dest_src + by (cases cte) (clarsimp simp: mdb_ptr_src.p_prev_qe sn s0) + + show "p \ src" + proof + assume "p = src" + hence "src = p" .. + also have "m \ p \\<^sup>+ dest" .. + also have "m \ dest \ src" .. + finally show False by simp + qed + + show "p \ mdbNext src_node" + proof + assume "p = mdbNext src_node" + also have "m \ src \ mdbNext src_node" .. + also have "m \ p \\<^sup>+ dest" .. + also have "m \ dest \ src" .. + finally show False by simp + qed +qed + +lemma (in mdb_swap) swap_ptr_cases [case_names p_src_prev p_src p_src_next p_dest_prev p_dest p_dest_next p_other]: + "\p = mdbPrev src_node \ P; p = src \ P; p = mdbNext src_node \ P; + p = mdbPrev dest_node \ P; p = dest \ P; p = mdbNext dest_node \ P; + \p \ mdbPrev src_node; p \ src; p \ mdbNext src_node; + p \ mdbPrev dest_node; p \ dest; p \ mdbNext dest_node\ \ P\ \ P" + by auto + +lemma (in mdb_swap) prev_not0_into_dom: + assumes np: "n p = Some cte" + and n0: "mdbPrev (cteMDBNode cte) \ 0" + shows "mdbPrev (cteMDBNode cte) \ dom m" +proof - + note p_next_qe_src = mdb_ptr_src.p_next_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + note really_annoying_simps [simp del] = word_neq_0_conv + + from np have "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_src_prev) + apply (auto simp: elim: dlistEp) + done + next + case p_src + thus ?thesis using mp np n0 src dest + apply (clarsimp simp add: dest2_node_prev) + apply safe + apply simp+ + apply (erule dlistEp, fastforce) + apply simp + done + next + case p_src_next + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_src_next) + apply (auto simp: elim: dlistEp) + done + next + case p_dest_prev + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_dest_prev) + apply (auto elim: dlistEp) + done + next + case p_dest + thus ?thesis using mp np n0 src dest + apply (clarsimp simp: n_dest) + apply (erule dlistEp, fastforce) + apply simp + done + next + case p_dest_next + thus ?thesis using mp np n0 src dest + apply simp + apply (frule n_dest_next) + apply (auto simp: elim: dlistEp) + done + next + case p_other + thus ?thesis using mp np n0 src dest + by (auto simp: n_other elim: dlistEp) + qed +qed + +lemma (in mdb_swap) cteSwap_dlist_helper: + shows "valid_dlist n" +proof + fix p cte + assume np: "n p = Some cte" and n0: "mdbPrev (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. n (mdbPrev (cteMDBNode cte)) = Some cte' \ mdbNext (cteMDBNode cte') = p" + let ?mn = "mdbPrev (cteMDBNode cte)" + + note p_prev_qe_src = mdb_ptr_src.p_prev_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + note really_annoying_simps [simp del] = word_neq_0_conv + + from np have domn: "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + have dd: "mdbPrev (cteMDBNode cte) \ dom n" + by (subst dom_n_m, rule prev_not0_into_dom) fact+ + then obtain cte' where mmn: "m (mdbPrev (cteMDBNode cte)) = Some cte'" + by (clarsimp simp add: dom_n_m) + + have dest_src_pn: "\mdbPrev src_node \ 0; mdbNext src_node = dest \ + \ mdbNext dest_node \ mdbPrev src_node" + proof (rule not_sym, rule) + assume "mdbPrev src_node = mdbNext dest_node" and "mdbPrev src_node \ 0" + and msd: "mdbNext src_node = dest" + hence "m \ mdbNext dest_node \ src" + by (auto dest!: src_prev intro: next_fold) + also have "m \ src \ dest" using src next_fold msd by auto + also have "m \ dest \ mdbNext dest_node" .. + finally show False by simp + qed + + have src_dest_pn': "\ mdbPrev dest_node \ 0; mdbNext dest_node = src \ + \ mdbNext src_node \ mdbPrev dest_node" + proof (rule not_sym, rule) + assume a: "mdbPrev dest_node = mdbNext src_node" and "mdbPrev dest_node \ 0" + and msd: "mdbNext dest_node = src" + hence a': "mdbPrev dest_node \ 0" by simp + have "m \ src \ mdbPrev dest_node" by (rule next_fold, rule src, simp add: a) + also have "m \ mdbPrev dest_node \ dest" using a' .. + also have "m \ dest \ src" using dest msd + by - (rule next_fold, simp+) + finally show False by simp + qed + + from domn have domm: "p \ dom m" by (simp add: dom_n_m) + with no_0 have p0: "p \ 0" + by (rule dom_into_not0) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + + hence psrc [intro?]: "m \ p \ src" using p0 + by (clarsimp intro!: src_prev_next) + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using p_src_prev dest src np n0 + using [[hypsubst_thin = true]] + apply clarsimp + apply (drule n_src_prev) + apply (clarsimp simp: dest_src ) + done + thus ?thesis using p_src_prev mmn dest_src + by (simp add: dest2_node_def) + next + case src_dest + + hence "mdbNext dest_node \ mdbPrev src_node" using p_src_prev p0 + by - (rule dest_src_pn, simp) + hence "?mn = mdbPrev (cteMDBNode ctep)" using p_src_prev src np mp p0 dest src_dest + by simp (drule n_src_prev, clarsimp) + thus ?thesis using p_src_prev src_dest mmn n0 mp + apply simp + apply (subst n_other [OF other_src_prev_src_dest]) + apply simp+ + apply (erule dlistEp [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbPrev src_node = mdbNext dest_node") + case True thus ?thesis using p_src_prev mmn other np mp other + by simp (drule n_dest_next, simp add: dest2_node_next split: if_split_asm) + next + let ?mn' = "mdbPrev (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_src_prev src np mp p0 dest other + by simp (drule n_src_prev, clarsimp) + + have mnp: "m \ ?mn' \ p" using mp mnmn n0 dlist + by (cases ctep, auto intro!: prev_leadstoI) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbNext dest_node") + case True + thus ?thesis using mmn p_src_prev superFalse n0 mp + by (simp add: mnmn) (frule n_dest_next, auto elim: dlistEp simp: other [symmetric]) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + + show "?mn' \ mdbPrev dest_node" using mp other p_src_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe) + + show "?mn' \ dest" + proof + assume "?mn' = dest" + hence "mdbNext dest_node = mdbPrev src_node" using mnp dest p_src_prev + by (simp add: next_unfold') + thus False using superFalse by simp + qed + + show "?mn' \ mdbNext dest_node" by fact+ + + show "?mn' \ mdbPrev src_node" using mp other p_src_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe_src) + + show "?mn' \ src" using src mnp p_src_prev p0 + by (clarsimp simp add: next_unfold') + + show "?mn' \ mdbNext src_node" + proof + assume a: "?mn' = mdbNext src_node" + have "m \ ?mn' \ p" using mnp . + also have "m \ p \ src" .. + also have "m \ src \ mdbNext src_node" .. + finally show False using a by simp + qed + qed + thus ?thesis using mnmn mmn mp p_src_prev n0 + by - (erule dlistEp [where p = p], simp+) + qed + qed + qed + next + case p_src + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using p_src src dest np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src src_dest + by (simp add: n_dest) + next + case dest_src + hence "?mn = mdbPrev dest_node" using p_src src np + by (clarsimp simp: dest2_node_def) + thus ?thesis using p_src mmn dest_src + apply (simp add: n_dest dest2_node_prev) + apply (drule n_dest_prev) + apply clarsimp + done + next + case other + hence "?mn = mdbPrev dest_node" using p_src src np + by (clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn other + by simp (drule n_dest_prev, clarsimp) + qed + next + case p_src_next + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = mdbPrev src_node" using p_src_next src dest np mp + by (clarsimp simp: n_dest) + thus ?thesis using p_src_next mmn src_dest + by simp (drule n_src_prev, clarsimp) + next + case dest_src + hence "?mn = dest" using p_src_next src np mp + by simp (drule n_src_next, simp) + thus ?thesis using p_src_next dest_src + by (simp add: n_dest) + next + case other + hence "?mn = dest" using p_src_next src np mp + by simp (drule n_src_next, simp) + thus ?thesis using p_src_next mmn other + by (simp add: n_dest) + qed + next + case p_dest_prev + + hence pdest [intro?]: "m \ p \ dest" using p0 + by (clarsimp intro!: dest_prev_next) + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using p_dest_prev src dest np n0 + using [[hypsubst_thin = true]] + apply clarsimp + apply (drule n_dest_prev) + apply (clarsimp simp: src_dest ) + done + thus ?thesis using p_dest_prev mmn src_dest + by (simp add: n_dest) + next + case dest_src + + hence "mdbNext src_node \ mdbPrev dest_node" using p_dest_prev p0 + by - (rule src_dest_pn', simp) + hence "?mn = mdbPrev (cteMDBNode ctep)" using p_dest_prev dest np mp p0 src dest_src + by simp (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev dest_src mmn n0 mp + apply simp + apply (subst n_other [OF other_dest_prev_dest_src]) + apply simp+ + apply (erule dlistEp [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext src_node = mdbPrev dest_node") + case True thus ?thesis using p_dest_prev mmn other np mp other + by simp (drule n_dest_prev, simp add: n_dest) + next + let ?mn' = "mdbPrev (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_dest_prev src np mp p0 dest other + by simp (drule n_dest_prev, clarsimp) + + have mnp: "m \ ?mn' \ p" using mp mnmn n0 dlist + by (cases ctep, auto intro!: prev_leadstoI) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbNext src_node") + case True + thus ?thesis using mmn p_dest_prev superFalse n0 mp + by (simp add: mnmn) (frule n_src_next, auto elim: dlistEp simp: other [symmetric]) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + show "?mn' \ mdbPrev src_node" using mp other p_dest_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe_src) + + show "?mn' \ src" + proof + assume "?mn' = src" + hence "mdbNext src_node = mdbPrev dest_node" using mnp src p_dest_prev + by (simp add: next_unfold') + thus False using superFalse by simp + qed + + show "?mn' \ mdbNext src_node" by fact+ + + show "?mn' \ mdbPrev dest_node" using mp other p_dest_prev n0 mnmn + by (cases ctep, simp add: p_prev_qe) + + show "?mn' \ dest" using dest mnp p_dest_prev p0 + by (clarsimp simp add: next_unfold') + + show "?mn' \ mdbNext dest_node" + proof + assume a: "?mn' = mdbNext dest_node" + have "m \ ?mn' \ p" using mnp . + also have "m \ p \ dest" .. + also have "m \ dest \ mdbNext dest_node" .. + finally show False using a by simp + qed + qed + thus ?thesis using mnmn mmn mp p_dest_prev n0 + by - (erule dlistEp [where p = p], simp+) + qed + qed + qed + next + case p_dest + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using p_dest dest src np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest dest_src + by (simp add: dest2_node_next) + next + case src_dest + hence "?mn = mdbPrev src_node" using p_dest dest np + by (clarsimp simp: n_dest) + thus ?thesis using p_dest mmn src_dest + apply (simp add: n_src n_dest) + apply (drule n_src_prev) + apply clarsimp + done + next + case other + hence "?mn = mdbPrev src_node" using p_dest dest np + by (clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn other + by simp (drule n_src_prev, clarsimp) + qed + next + case p_dest_next + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = mdbPrev dest_node" using p_dest_next dest src np mp + by (clarsimp simp: dest2_node_def) + thus ?thesis using p_dest_next mmn dest_src + by simp (drule n_dest_prev, clarsimp) + next + case src_dest + hence "?mn = src" using p_dest_next dest np mp + by simp (drule n_dest_next, simp) + thus ?thesis using p_dest_next src_dest + by (simp add: dest2_node_def) + next + case other + hence "?mn = src" using p_dest_next dest np mp + by simp (drule n_dest_next, simp) + thus ?thesis using p_dest_next mmn other + by (simp add: dest2_node_def) + qed + next + case p_other + hence eq: "n p = m p" by (rule n_other) + hence eq': "cte = ctep" using mp np by simp + + have mns: "?mn \ src" + proof + assume "?mn = src" + hence "p = mdbNext src_node" using mp mmn src eq' n0 + by (auto elim: dlistEp) + thus False using p_other by simp + qed + + have mnsn: "?mn \ mdbPrev src_node" + proof + assume "?mn = mdbPrev src_node" + hence "src = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_prev_qe_src) + thus False using p_other by simp + qed + + have mnd: "?mn \ dest" + proof + assume "?mn = dest" + hence "p = mdbNext dest_node" using mp mmn dest eq' n0 + by (auto elim: dlistEp) + thus False using p_other by simp + qed + + have mndn: "?mn \ mdbPrev dest_node" + proof + assume "?mn = mdbPrev dest_node" + hence "dest = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_prev_qe) + thus False using p_other by simp + qed + + from dd obtain cten where nmn: "n ?mn = Some cten" by auto + + have mnext: "mdbNext (cteMDBNode cte') = p" using mp mmn + by - (erule dlistEp, rule dom_into_not0 [OF no_0], (clarsimp simp: eq')+) + + show ?thesis + proof (cases rule: cases2 [of "?mn = mdbNext src_node" "?mn = mdbNext dest_node"]) + case pos_pos + thus ?thesis using n0 by simp + next + case pos_neg + thus ?thesis using mmn nmn mnd mndn + by simp (drule n_src_next, simp add: mnext eq' next_dest_prev_src_sym) + next + case neg_pos + thus ?thesis using mmn nmn mns mnsn + by simp (drule n_dest_next, simp add: mnext eq' annoying_simps) + next + case neg_neg + thus ?thesis using mmn nmn mns mnsn mnd mndn mnext + by (simp add: n_other) + qed + qed +next + fix p cte + assume np: "n p = Some cte" and n0: "mdbNext (cteMDBNode cte) \ 0" + let ?thesis = + "\cte'. n (mdbNext (cteMDBNode cte)) = Some cte' \ mdbPrev (cteMDBNode cte') = p" + let ?mn = "mdbNext (cteMDBNode cte)" + + note p_next_qe_src = mdb_ptr_src.p_next_qe + + note annoying_simps [simp del] + = next_dest_prev_src next_dest_prev_src_sym prev_dest_next_src prev_dest_next_src_sym + + from np have domn: "p \ dom n" by (rule domI) + then obtain ctep where mp: "m p = Some ctep" + by (clarsimp simp add: dom_n_m) + + from n0 have dd: "mdbNext (cteMDBNode cte) \ dom n" using np + apply - + apply (erule contrapos_pp) + apply (cases cte) + apply (drule ndom_is_0D [OF _ cteSwap_chain no_0_n, where ptr = p]) + apply simp+ + done + + then obtain cte' where mmn: "m (mdbNext (cteMDBNode cte)) = Some cte'" + by (clarsimp simp add: dom_n_m) + + have src_dest_pn: "\mdbNext dest_node \ 0; mdbNext src_node = dest \ + \ mdbPrev src_node \ mdbNext dest_node" + proof + assume "mdbPrev src_node = mdbNext dest_node" and "mdbNext dest_node \ 0" + and msd: "mdbNext src_node = dest" + hence "m \ mdbNext dest_node \ src" + by (auto dest!: src_prev intro: next_fold) + also have "m \ src \ dest" using src using msd next_fold by auto + also have "m \ dest \ mdbNext dest_node" .. + finally show False by simp + qed + + have src_dest_pn': "\ mdbNext src_node \ 0; mdbNext dest_node = src \ + \ mdbPrev dest_node \ mdbNext src_node" + proof + assume a: "mdbPrev dest_node = mdbNext src_node" and "mdbNext src_node \ 0" + and msd: "mdbNext dest_node = src" + hence a': "mdbPrev dest_node \ 0" by simp + have "m \ src \ mdbPrev dest_node" by (rule next_fold, rule src, simp add: a) + also have "m \ mdbPrev dest_node \ dest" using a' .. + also have "m \ dest \ src" using dest msd + by - (rule next_fold, simp+) + finally show False by simp + qed + + from domn have domm: "p \ dom m" by (simp add: dom_n_m) + with no_0 have p0: "p \ 0" + by (rule dom_into_not0) + + from np have npp: "n \ p \ mdbNext (cteMDBNode cte)" + by (simp add: next_fold) + hence swp: "m \ s_d_swp p \ s_d_swp (mdbNext (cteMDBNode cte))" + by (simp add: next_m_n) + + show ?thesis + proof (cases rule: swap_ptr_cases [where p = p]) + case p_src_prev + + hence p0': "mdbPrev src_node \ 0" using p0 by simp + hence stp: "m \ mdbPrev src_node \ src" .. + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = dest" using stp np mp p_src_prev + by (simp add: next_m_n s_d_swap_def next_unfold') (drule n_src_prev, clarsimp) + thus ?thesis using p_src_prev n_dest src_dest + by auto + next + case dest_src + hence "?mn = mdbNext src_node" using stp np mp p_src_prev + by (clarsimp simp add: next_m_n s_d_swap_def next_unfold' n_dest) + thus ?thesis using p_src_prev mmn dest_src + by simp (drule n_src_next, clarsimp) + next + case other + hence "?mn = dest" using stp np mp p_src_prev + by (clarsimp simp add: next_m_n s_d_swap_def next_unfold' annoying_simps + dest!: n_src_prev) + thus ?thesis using p_src_prev other + by (simp add: n_dest) + qed + next + case p_src + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = mdbNext dest_node" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn src_dest + by simp (drule n_dest_next, clarsimp) + next + case dest_src + hence "?mn = dest" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn dest_src + by (simp add: n_dest) + next + case other + hence "?mn = mdbNext dest_node" using p_src src np + by (cases cte, clarsimp simp add: dest2_node_def) + thus ?thesis using p_src mmn other + by simp (drule n_dest_next, clarsimp) + qed + next + case p_src_next + + show ?thesis + proof (cases rule: swap_cases) + case src_dest + hence "?mn = src" using p_src_next dest np + by (cases cte, clarsimp simp: n_dest) + thus ?thesis using p_src_next mmn src_dest + by (simp add: dest2_node_def) + next + case dest_src + + hence "mdbPrev dest_node \ mdbNext src_node" using p_src_next p0 + by - (rule src_dest_pn', simp+) + hence "?mn = mdbNext (cteMDBNode ctep)" using p_src_next src np mp p0 dest dest_src + by simp (drule n_src_next, clarsimp) + thus ?thesis using p_src_next dest_src mmn n0 mp + apply simp + apply (subst n_other [OF other_src_next_dest_src]) + apply simp+ + apply (erule dlistEn [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext src_node = mdbPrev dest_node") + case True thus ?thesis using p_src_next mmn other np mp other + by simp (drule n_dest_prev, simp add: dest2_node_prev split: if_split_asm) + next + let ?mn' = "mdbNext (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_src_next src np mp p0 dest other + by simp (drule n_src_next, clarsimp) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbPrev dest_node") + case True + thus ?thesis using mmn p_src_next superFalse n0 mp + by (simp add: mnmn) (frule n_dest_prev, auto elim: dlistEn) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + have "m \ src \ mdbNext src_node" .. + hence sp [intro?]: "m \ src \ p" by (simp add: p_src_next) + also have mmn'[intro?]: "m \ p \ ?mn'" using mp by (simp add: next_unfold') + finally have smn [intro?]: "m \ src \\<^sup>+ ?mn'" . + (* Sigh *) + + show "?mn' \ mdbPrev src_node" + proof + assume a: "?mn' = mdbPrev src_node" + also have "mdbPrev src_node \ 0" using mmn + by - (rule dom_into_not0 [OF no_0 domI], simp add: a [symmetric] mnmn) + hence "m \ mdbPrev src_node \ src" .. + also have "m \ src \\<^sup>+ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ src" using smn + by clarsimp + + show "?mn' \ mdbNext src_node" + proof + assume "?mn' = mdbNext src_node" + also have "mdbNext src_node = p" by (simp add: p_src_next) + also have "m \ p \ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ mdbPrev dest_node" by fact+ + show "?mn' \ dest" using src mp p_src_next mnmn swp + by (clarsimp simp add: next_unfold' s_d_swap_def split: if_split_asm) + show "?mn' \ mdbNext dest_node" using mnmn mp p_src_next swp False superFalse other n0 + by (cases ctep, clarsimp simp add: next_unfold' s_d_swap_def dest!: p_next_eq) + qed + thus ?thesis using mnmn mmn mp p_src_next n0 + by - (erule dlistEn [where p = p], simp+) + qed + qed + qed + next + case p_dest_prev + hence p0': "mdbPrev dest_node \ 0" using p0 by simp + hence stp: "m \ mdbPrev dest_node \ dest" .. + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = src" using stp np mp p_dest_prev + by (simp add: next_m_n s_d_swap_def next_unfold') (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev dest_src + by (simp add: n_src dest2_node_prev) + next + case src_dest + hence "?mn = mdbNext dest_node" using stp np mp p_dest_prev + by (simp add: annoying_simps) (drule n_dest_prev, clarsimp) + thus ?thesis using p_dest_prev mmn src_dest + by simp (drule n_dest_next, clarsimp) + next + case other + hence "?mn = src" using stp np mp p_dest_prev + by simp (drule n_dest_prev, simp) + thus ?thesis using p_dest_prev other + by (simp add: n_src dest2_node_prev) + qed + next + case p_dest + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = mdbNext src_node" using p_dest dest src np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn dest_src + by simp (drule n_src_next, clarsimp) + next + case src_dest + hence "?mn = src" using p_dest dest np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn src_dest + by (simp add: n_src dest2_node_prev) + next + case other + hence "?mn = mdbNext src_node" using p_dest dest np + by (cases cte, clarsimp simp add: n_dest) + thus ?thesis using p_dest mmn other + by simp (drule n_src_next, clarsimp) + qed + next + case p_dest_next + + show ?thesis + proof (cases rule: swap_cases) + case dest_src + hence "?mn = dest" using p_dest_next src np + by (cases cte, clarsimp simp: n_src dest2_node_def) + thus ?thesis using p_dest_next mmn dest_src + by (simp add: dest2_node_def n_dest) + next + case src_dest + + hence "mdbPrev src_node \ mdbNext dest_node" using p_dest_next p0 + by - (rule src_dest_pn, simp+) + hence "?mn = mdbNext (cteMDBNode ctep)" using p_dest_next dest np mp p0 src src_dest + by simp (drule n_dest_next, clarsimp) + thus ?thesis using p_dest_next src_dest mmn n0 mp + apply simp + apply (subst n_other [OF other_dest_next_src_dest]) + apply simp+ + apply (erule dlistEn [OF mp, simplified]) + apply simp + done + next + case other + + show ?thesis + proof (cases "mdbNext dest_node = mdbPrev src_node") + case True thus ?thesis using p_dest_next mmn other np mp other + by simp (drule n_src_prev, simp add: dest2_node_prev n_dest ) + next + let ?mn' = "mdbNext (cteMDBNode ctep)" + case False + hence mnmn: "?mn = ?mn'" using p_dest_next src np mp p0 dest other + by simp (drule n_dest_next, clarsimp) + + note superFalse = False + + show ?thesis + proof (cases "?mn' = mdbPrev src_node") + case True + thus ?thesis using mmn p_dest_next superFalse n0 mp + by (simp add: mnmn) (frule n_src_prev, auto elim: dlistEn) + next + case False + + have eq: "n ?mn' = m ?mn'" + proof (rule n_other) + have "m \ dest \ mdbNext dest_node" .. + hence sp [intro?]: "m \ dest \ p" by (simp add: p_dest_next) + also have mmn'[intro?]: "m \ p \ ?mn'" using mp by (simp add: next_unfold') + finally have smn [intro?]: "m \ dest \\<^sup>+ ?mn'" . + (* Sigh *) + + show "?mn' \ mdbPrev dest_node" + proof + assume a: "?mn' = mdbPrev dest_node" + also have "mdbPrev dest_node \ 0" using mmn + by - (rule dom_into_not0 [OF no_0 domI], simp add: a [symmetric] mnmn) + hence "m \ mdbPrev dest_node \ dest" .. + also have "m \ dest \\<^sup>+ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ dest" using smn + by clarsimp + + show "?mn' \ mdbNext dest_node" + proof + assume "?mn' = mdbNext dest_node" + also have "mdbNext dest_node = p" by (simp add: p_dest_next) + also have "m \ p \ ?mn'" .. + finally show False by simp + qed + + show "?mn' \ mdbPrev src_node" by fact+ + show "?mn' \ src" using dest mp p_dest_next mnmn swp + by (clarsimp simp add: next_unfold' s_d_swap_def split: if_split_asm) + show "?mn' \ mdbNext src_node" using mnmn mp p_dest_next swp False superFalse other n0 + by (cases ctep, clarsimp simp add: next_unfold' s_d_swap_def + dest!: p_next_qe_src) + qed + thus ?thesis using mnmn mmn mp p_dest_next n0 + by - (erule dlistEn [where p = p], simp+) + qed + qed + qed + next + case p_other + hence eq: "n p = m p" by (rule n_other) + hence eq': "cte = ctep" using mp np by simp + + have mns: "?mn \ src" + proof + assume "?mn = src" + hence "p = mdbPrev src_node" using mp mmn src eq' n0 + by (auto elim: dlistEn) + thus False using p_other by simp + qed + + have mnsn: "?mn \ mdbNext src_node" + proof + assume "?mn = mdbNext src_node" + hence "src = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_next_qe_src) + thus False using p_other by simp + qed + + have mnd: "?mn \ dest" + proof + assume "?mn = dest" + hence "p = mdbPrev dest_node" using mp mmn dest eq' n0 + by (auto elim: dlistEn) + thus False using p_other by simp + qed + + have mndn: "?mn \ mdbNext dest_node" + proof + assume "?mn = mdbNext dest_node" + hence "dest = p" using mp eq' n0 + by (cases ctep, clarsimp dest!: p_next_qe) + thus False using p_other by simp + qed + + from dd obtain cten where nmn: "n ?mn = Some cten" by auto + + have mprev: "mdbPrev (cteMDBNode cte') = p" using mp mmn + by - (erule dlistEn, rule dom_into_not0 [OF no_0], (clarsimp simp: eq')+) + + show ?thesis + proof (cases rule: cases2 [of "?mn = mdbPrev src_node" "?mn = mdbPrev dest_node"]) + case pos_pos + thus ?thesis using n0 by simp + next + case pos_neg + thus ?thesis using mmn nmn mnd mndn + by simp (drule n_src_prev, simp add: mprev eq' next_dest_prev_src_sym) + next + case neg_pos + thus ?thesis using mmn nmn mns mnsn + by simp (drule n_dest_prev, simp add: mprev eq' annoying_simps) + next + case neg_neg + thus ?thesis using mmn nmn mns mnsn mnd mndn mprev + by (simp add: n_other) + qed + qed +qed + +lemma sameRegionAs_eq_child: + "\ sameRegionAs cap c; weak_derived' c c' \ + \ sameRegionAs cap c'" + by (clarsimp simp: weak_derived'_def sameRegionAs_def2) + +lemma sameRegionAs_eq_parent: + "\ sameRegionAs c cap; weak_derived' c c' \ + \ sameRegionAs c' cap" + by (clarsimp simp: weak_derived'_def sameRegionAs_def2) + +context mdb_swap +begin + +lemma sameRegionAs_dcap_parent: + "sameRegionAs dcap cap = sameRegionAs dest_cap cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_parent, rule weak_derived_sym', rule dest_derived) + apply (erule sameRegionAs_eq_parent, rule dest_derived) + done + +lemma sameRegionAs_dcap_child: + "sameRegionAs cap dcap = sameRegionAs cap dest_cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_child, rule weak_derived_sym', rule dest_derived) + apply (erule sameRegionAs_eq_child, rule dest_derived) + done + +lemma sameRegionAs_scap_parent: + "sameRegionAs scap cap = sameRegionAs src_cap cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_parent, rule weak_derived_sym', rule src_derived) + apply (erule sameRegionAs_eq_parent, rule src_derived) + done + +lemma sameRegionAs_scap_child: + "sameRegionAs cap scap = sameRegionAs cap src_cap" + apply (rule iffI) + apply (erule sameRegionAs_eq_child, rule weak_derived_sym', rule src_derived) + apply (erule sameRegionAs_eq_child, rule src_derived) + done + +lemmas region_simps = + sameRegionAs_scap_child sameRegionAs_scap_parent + sameRegionAs_dcap_child sameRegionAs_dcap_parent + +lemma master_srcI: + "\ \cap. F (capMasterCap cap) = F cap \ + \ F scap = F src_cap" + using src_derived + by (clarsimp simp: weak_derived'_def elim!: master_eqI) + +lemma isEPsrc: + "isEndpointCap scap = isEndpointCap src_cap" + by (rule master_srcI, rule isCap_Master) + +lemma isEPbadge_src: + "isEndpointCap src_cap \ capEPBadge scap = capEPBadge src_cap" + using src_derived + by (clarsimp simp: isCap_simps weak_derived'_def) + +lemma isNTFNsrc: + "isNotificationCap scap = isNotificationCap src_cap" + by (rule master_srcI, rule isCap_Master) + +lemma isNTFNbadge_src: + "isNotificationCap src_cap \ capNtfnBadge scap = capNtfnBadge src_cap" + using src_derived + by (clarsimp simp: isCap_simps weak_derived'_def) + +lemma isEPdest: + "isEndpointCap dcap = isEndpointCap dest_cap" + using dest_derived by (fastforce simp: isCap_simps weak_derived'_def) + +lemma isEPbadge_dest: + "isEndpointCap dest_cap \ capEPBadge dcap = capEPBadge dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemma isNTFNdest: + "isNotificationCap dcap = isNotificationCap dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemma isNTFNbadge_dest: + "isNotificationCap dest_cap \ capNtfnBadge dcap = capNtfnBadge dest_cap" + using dest_derived by (auto simp: weak_derived'_def isCap_simps) + +lemmas ep_simps = + isEPsrc isEPbadge_src isNTFNsrc isNTFNbadge_src + isEPdest isEPbadge_dest isNTFNdest isNTFNbadge_dest + +end + +lemma sameRegion_ep: + "\ sameRegionAs cap cap'; isEndpointCap cap \ \ isEndpointCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + +lemma sameRegion_ntfn: + "\ sameRegionAs cap cap'; isNotificationCap cap \ \ isNotificationCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + +lemma (in mdb_swap) cteSwap_valid_badges: + "valid_badges n" +proof - + from valid + have "valid_badges m" .. + thus ?thesis using src dest + apply (clarsimp simp add: valid_badges_def next_m_n2) + apply (frule_tac p=p in n_cap) + apply (frule_tac p=p' in n_cap) + apply (drule badge_n)+ + apply (clarsimp simp: s_d_swap_def sameRegion_ntfn sameRegion_ep + ep_simps region_simps + split: if_split_asm) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + done +qed + +lemma (in mdb_swap) m_trancl: + assumes "m \ p \\<^sup>+ p'" + shows "n \ s_d_swp p \\<^sup>+ s_d_swp p'" + using assms +proof induct + case (base x) + thus ?case by (fastforce simp: next_m_n) +next + case (step x y) + thus ?case by (fastforce simp: next_m_n elim: trancl_trans) +qed + +lemma (in mdb_swap) n_trancl: + "n \ p \\<^sup>+ p' = m \ s_d_swp p \\<^sup>+ s_d_swp p'" +proof + assume "n \ p \\<^sup>+ p'" + thus "m \ s_d_swp p \\<^sup>+ s_d_swp p'" + by induct (auto simp: next_m_n2 elim!: trancl_trans) +next + assume "m \ s_d_swp p \\<^sup>+ s_d_swp p'" + thus "n \ p \\<^sup>+ p'" + by (fastforce dest: m_trancl) +qed + +lemma (in mdb_swap) n_rtrancl: + "n \ p \\<^sup>* p' = m \ s_d_swp p \\<^sup>* s_d_swp p'" + by (simp add: rtrancl_eq_or_trancl n_trancl) + +lemma (in mdb_swap) n_cap_eq': + "(\n'. n p = Some (CTE cap n')) = + (if p = src + then cap = dcap + else if p = dest + then cap = scap + else \n'. m p = Some (CTE cap n'))" + using src dest + apply simp + apply (rule conjI, clarsimp) + apply (rule iffI) + apply (fastforce dest: n_cap) + subgoal by (simp add: n_def modify_map_if dest2_node_def n'_def, auto) + apply clarsimp + apply (rule conjI, fastforce) + apply clarsimp + apply (rule iffI) + apply (fastforce dest: n_cap) + apply (simp add: n_def modify_map_cases n'_def) + apply (simp add: dest2_node_def) + apply auto[1] + apply (cases "mdbNext dest_node = 0") + apply (cases "mdbNext src_node = 0") + apply simp + apply simp + apply (cases "mdbPrev dest_node = mdbNext src_node") + apply simp + apply simp + apply simp + apply (cases "mdbPrev dest_node = mdbNext src_node") + apply simp + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply simp + apply (cases "mdbPrev dest_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply (cases "mdbNext dest_node = p") + apply simp + apply (cases "mdbPrev dest_node = p") + apply simp + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + apply simp + apply (cases "mdbPrev dest_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev src_node = p", simp) + apply simp + done + +lemma (in mdb_swap) n_cap_eq: + "(\n'. n p = Some (CTE cap n')) = + (\n'. if p = src then m (s_d_swp p) = Some (CTE dest_cap n') \ cap = dcap + else if p = dest then m (s_d_swp p) = Some (CTE src_cap n') \ cap = scap + else m (s_d_swp p) = Some (CTE cap n'))" + apply (simp add: s_d_swp_def n_cap_eq' src dest) + apply (auto simp: s_d_swap_def) + done + +lemma (in mdb_swap) cteSwap_chunked: + "mdb_chunked n" +proof - + from valid + have "mdb_chunked m" .. + thus ?thesis + apply (clarsimp simp add: mdb_chunked_def is_chunk_def n_trancl n_rtrancl n_cap_eq) + apply (case_tac "p = dest") + apply simp + apply (case_tac "p' = src") + apply (clarsimp simp add: region_simps) + apply (erule_tac x=src in allE) + apply (erule_tac x=dest in allE) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (case_tac "p'' = dest", simp) + apply simp + apply (case_tac "p'' = src") + apply (clarsimp simp: region_simps) + apply simp + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply simp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (case_tac "p'' = dest") + apply (clarsimp simp: region_simps) + apply simp + apply (case_tac "p'' = src", simp) + apply simp + apply (clarsimp simp: region_simps) + apply (erule_tac x=src in allE) + apply clarsimp + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply clarsimp + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest", simp) + apply clarsimp + apply (case_tac "p''=src") + apply (clarsimp simp: dest) + apply (clarsimp simp: region_simps) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply clarsimp + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply clarsimp + apply (case_tac "p''=dest") + apply (clarsimp simp: region_simps) + apply (erule_tac x=src in allE) + apply clarsimp + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: dest region_simps) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p'=dest") + apply clarsimp + apply (case_tac "p=src") + apply (clarsimp simp: region_simps) + apply (erule_tac x=dest in allE) + apply (erule_tac x=src in allE) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply simp + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x="dest" in allE) + apply simp + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="src" in allE) + apply (clarsimp simp: region_simps) + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply (case_tac "p''=src") + apply (simp add: region_simps dest) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: region_simps) + apply (case_tac "p''=src") + apply (simp add: region_simps dest) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: dest) + apply simp + apply clarsimp + apply (case_tac "p'=src") + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x=dest in allE) + apply (clarsimp simp: region_simps) + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply clarsimp + apply clarsimp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply clarsimp + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (erule_tac x="dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (clarsimp simp: region_simps) + apply (erule impE) + apply (clarsimp simp: s_d_swap_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply simp + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + apply clarsimp + apply (case_tac "p''=dest") + apply (simp add: src region_simps) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (case_tac "p''=src") + apply (simp add: region_simps) + apply (erule_tac x=dest in allE) + apply (simp add: dest) + apply simp + done +qed + +(* FIXME: make this a locale from the start *) +locale weak_der' = + fixes old new + assumes derived: "weak_derived' new old" +begin + +lemma isUntyped_new: + "isUntypedCap new = isUntypedCap old" + using derived by (auto simp: weak_derived'_def isCap_simps) + +lemma capRange_new: + "capRange new = capRange old" + using derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capRange_Master) + apply simp + done + +lemma untypedRange_new: + "untypedRange new = untypedRange old" + using derived + apply (clarsimp simp add: weak_derived'_def) + apply (rule master_eqI, rule untypedRange_Master) + apply simp + done + +lemmas range_simps [simp] = + isUntyped_new capRange_new untypedRange_new + +lemma isReplyMaster_eq: + "(isReplyCap new \ capReplyMaster new) + = (isReplyCap old \ capReplyMaster old)" + using derived + by (fastforce simp: weak_derived'_def isCap_simps) + +end + +lemma master_eqE: + "\ capMasterCap cap = capMasterCap cap'; + \cap. F (capMasterCap cap) = F cap \ + \ F cap = F cap'" + by (rule master_eqI, assumption, simp) + +lemma weak_derived_Null' [simp]: + "weak_derived' cap NullCap = (cap = NullCap)" + by (auto simp add: weak_derived'_def) + +lemma Null_weak_derived_Null' [simp]: + "weak_derived' NullCap cap = (cap = NullCap)" + by (auto simp add: weak_derived'_def) + + + +lemma distinct_zombies_switchE: + "\ distinct_zombies m; m x = Some old_x; m y = Some old_y; + capMasterCap (cteCap old_x) = capMasterCap (cteCap new_y); + capMasterCap (cteCap old_y) = capMasterCap (cteCap new_x) \ + \ distinct_zombies (m(x \ new_x, y \ new_y))" + apply (cases "x = y") + apply clarsimp + apply (erule(1) distinct_zombies_sameMasterE) + apply simp + apply (drule_tac F="\cap. (isUntypedCap cap, isZombie cap, isArchFrameCap cap, + capClass cap, capUntypedPtr cap, capBits cap)" + in master_eqE, + simp add: isCap_Master capClass_Master capUntyped_Master capBits_Master)+ + apply (simp add: distinct_zombies_def distinct_zombie_caps_def + split del: if_split) + apply (intro allI) + apply (drule_tac x="(id (x := y, y := x)) ptr" in spec) + apply (drule_tac x="(id (x := y, y := x)) ptr'" in spec) + apply (clarsimp split del: if_split) + apply (clarsimp simp: isCap_Master + capBits_Master + capClass_Master + capUntyped_Master + split: if_split_asm ) + done + +context mdb_swap +begin + +lemma weak_der_src: + "weak_der' src_cap scap" + apply unfold_locales + apply (rule weak_derived_sym') + apply (rule src_derived) + done + +lemma weak_der_dest: + "weak_der' dest_cap dcap" + apply unfold_locales + apply (rule weak_derived_sym') + apply (rule dest_derived) + done + +lemmas src_range_simps [simp] = weak_der'.range_simps [OF weak_der_src] +lemmas dest_range_simps [simp] = weak_der'.range_simps [OF weak_der_dest] + +lemma caps_contained: + "caps_contained' n" + using valid + apply (clarsimp simp: valid_mdb_ctes_def caps_contained'_def) + apply (drule n_cap)+ + apply (simp split: if_split_asm) + apply (clarsimp dest!: capRange_untyped) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp dest!: capRange_untyped) + apply fastforce + apply fastforce + apply fastforce + done + +lemma untyped_mdb_n: + "untyped_mdb' n" + using untyped_mdb + apply (simp add: n_cap_eq untyped_mdb'_def descendants_of'_def parency) + apply clarsimp + apply (case_tac "p=dest") + apply clarsimp + apply (case_tac "p'=dest", simp) + apply (case_tac "p'=src", simp) + apply clarsimp + apply clarsimp + apply (case_tac "p'=dest") + apply clarsimp + apply (case_tac "p=src", simp) + apply clarsimp + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (case_tac "p'=src",simp) + apply clarsimp + apply clarsimp + apply (case_tac "p'=src",simp) + apply clarsimp + done + + +lemma untyped_inc_n: + assumes untyped_eq: "isUntypedCap src_cap \ scap = src_cap" + "isUntypedCap dest_cap \ dcap = dest_cap" + shows "untyped_inc' n" + using untyped_inc + apply (simp add: n_cap_eq untyped_inc'_def descendants_of'_def parency) + apply clarsimp + apply (erule_tac x="s_d_swap p src dest" in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (case_tac "p=dest") + apply simp + apply (case_tac "p'=src", simp) + apply (clarsimp simp:untyped_eq) + apply (case_tac "p'=dest", simp) + apply (clarsimp simp: s_d_swap_def untyped_eq) + apply clarsimp + apply (case_tac "p=src") + apply clarsimp + apply (case_tac "p'=dest", simp) + apply (clarsimp simp:untyped_eq) + apply (case_tac "p'=src", simp) + apply (clarsimp simp:untyped_eq) + apply clarsimp + apply (case_tac "p'=src") + apply (clarsimp simp:untyped_eq) + apply simp + apply (case_tac "p'=dest", clarsimp simp:untyped_eq) + apply (clarsimp simp:untyped_eq) + done + +lemma n_next: + "n p = Some cte \ \z. m (s_d_swp p) = Some z \ s_d_swp (mdbNext (cteMDBNode cte)) = mdbNext (cteMDBNode z)" + apply (drule conjI [THEN exI [THEN next_m_n2 [THEN iffD1, unfolded mdb_next_unfold]]]) + apply (rule refl) + apply assumption + done + +lemma n_prevD: + notes if_cong [cong] option.case_cong [cong] + shows "n \ p \ p' \ m \ s_d_swp p \ s_d_swp p'" + apply (cases "p'=0") + apply (simp add: mdb_prev_def) + apply (cases "p=0") + apply (clarsimp simp: mdb_prev_def s_d_swap_def) + apply (rule conjI) + apply clarsimp + apply (simp add: n_dest) + apply (case_tac z) + apply (clarsimp simp: src split: if_split_asm) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: dest) + apply (simp add: dest2_node_def split: if_split_asm) + apply clarsimp + apply (case_tac z) + apply clarsimp + apply (simp add: n_def n'_def modify_map_if dest2_node_def) + apply (insert src dest)[1] + apply (clarsimp split: if_split_asm) + apply (simp add: Invariants_H.valid_dlist_prevD [OF cteSwap_dlist_helper, symmetric]) + apply (simp add: Invariants_H.valid_dlist_prevD [OF dlist, symmetric] next_m_n2) + done + +lemma n_prev: + "n p = Some cte \ \z. m (s_d_swp p) = Some z \ s_d_swp (mdbPrev (cteMDBNode cte)) = mdbPrev (cteMDBNode z)" + apply (drule conjI [THEN exI [THEN n_prevD [unfolded mdb_prev_def]]]) + apply (rule refl) + apply assumption + done + +lemma nullcaps_n: "valid_nullcaps n" +proof - + from valid have "valid_nullcaps m" .. + thus ?thesis using dest_derived src_derived + apply (clarsimp simp: valid_nullcaps_def) + apply (frule n_cap) + apply (frule revokable) + apply (frule badge_n) + apply (frule n_prev) + apply (drule n_next) + apply (insert src dest) + apply (frule_tac x=src in spec) + apply (frule_tac x=dest in spec) + apply (erule_tac x=p in allE) + apply simp + apply (case_tac n) + apply (clarsimp simp: s_d_swap_def nullMDBNode_def AARCH64_H.nullPointer_def split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" +proof - + from valid have "ut_revocable' m" .. + thus ?thesis using dest_derived src_derived src dest + apply (clarsimp simp: ut_revocable'_def) + + apply (frule n_cap) + apply (frule revokable) + by (auto simp: weak_derived'_def dest2_node_def + split: if_split_asm) +qed + +lemma scap_class[simp]: + "capClass scap = capClass src_cap" + using src_derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capClass_Master) + apply simp + done + +lemma dcap_class[simp]: + "capClass dcap = capClass dest_cap" + using dest_derived + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule capClass_Master) + apply simp + done + +lemma class_links_n: "class_links n" +proof - + from valid have "class_links m" + by (simp add: valid_mdb_ctes_def) + thus ?thesis + apply (clarsimp simp: class_links_def) + apply (case_tac cte, case_tac cte', clarsimp) + apply (drule n_cap)+ + apply (simp add: imp_conjL[symmetric]) + apply (subst(asm) conj_commute) + apply (simp add: imp_conjL) + apply (simp add: imp_conjL[symmetric]) + apply (subst(asm) conj_commute) + apply (simp add: imp_conjL next_m_n2) + apply (elim allE, drule(1) mp) + apply (auto simp: s_d_swap_def src dest + split: if_split_asm) + done +qed + +lemma irq_control_n: "irq_control n" + using src dest dest_derived src_derived + apply (clarsimp simp: irq_control_def) + apply (frule revokable) + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (clarsimp simp: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (drule (1) irq_controlD, rule irq_control) + apply clarsimp + apply (drule (1) irq_controlD, rule irq_control) + apply clarsimp + done + +lemma distinct_zombies_m: + "distinct_zombies m" + using valid by auto + +lemma distinct_zombies_n: + "distinct_zombies n" + using distinct_zombies_m + apply (simp add: n_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: modify_map_apply src dest) + apply (erule distinct_zombies_switchE, rule src, rule dest) + apply (cut_tac weak_der_src) + apply (clarsimp simp: weak_der'_def weak_derived'_def) + apply (cut_tac weak_der_dest) + apply (clarsimp simp: weak_der'_def weak_derived'_def) + done + +lemma reply_masters_rvk_fb_m: + "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n: + "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + weak_der'.isReplyMaster_eq[OF weak_der_src] + weak_der'.isReplyMaster_eq[OF weak_der_dest] + apply (simp add: reply_masters_rvk_fb_def) + apply (frule bspec, rule ranI, rule m_p) + apply (frule bspec, rule ranI, rule mdb_ptr_src.m_p) + apply (clarsimp simp: ball_ran_eq) + apply (case_tac cte, clarsimp) + apply (frule n_cap, frule revokable, frule badge_n) + apply (simp split: if_split_asm) + apply clarsimp + apply (elim allE, drule(1) mp) + apply simp + done + +lemma cteSwap_valid_mdb_helper: + assumes untyped_eq: "isUntypedCap src_cap \ scap = src_cap" + "isUntypedCap dest_cap \ dcap = dest_cap" + shows "valid_mdb_ctes n" + using cteSwap_chain cteSwap_dlist_helper cteSwap_valid_badges + cteSwap_chunked caps_contained untyped_mdb_n untyped_inc_n + nullcaps_n ut_rev_n class_links_n irq_control_n + distinct_zombies_n reply_masters_rvk_fb_n + by (auto simp:untyped_eq) + +end + +lemma cteSwap_ifunsafe'[wp]: + "\if_unsafe_then_cap' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2 + and cte_wp_at' (\cte. cte_refs' (cteCap cte) = cte_refs' c) c1 + and cte_wp_at' (\cte. cte_refs' (cteCap cte) = cte_refs' c') c2\ + cteSwap c c1 c' c2 + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3 cteSwap_def) + apply (wp | simp add: o_def | rule getCTE_wp)+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) crefc" in exI) + apply (clarsimp simp: modify_map_def) + apply fastforce + apply (clarsimp dest!: modify_map_K_D + split: if_split_asm) + apply (drule_tac x=cref in spec) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply fastforce + done + +lemma cteSwap_iflive'[wp]: + "\if_live_then_nonz_cap' + and cte_wp_at' (\cte. zobj_refs' (cteCap cte) = zobj_refs' c) c1 + and cte_wp_at' (\cte. zobj_refs' (cteCap cte) = zobj_refs' c') c2\ + cteSwap c c1 c' c2 + \\rv. if_live_then_nonz_cap'\" + apply (simp add: cteSwap_def) + apply (wp | simp)+ + apply (rule hoare_post_imp, + simp only: if_live_then_nonz_cap'_def imp_conv_disj + ex_nonz_cap_to'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) if_live_then_nonz_capE') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) cref" in exI) + apply auto + done + +lemmas tcbSlots = + tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + +lemma cteSwap_valid_pspace'[wp]: + "\valid_pspace' and + cte_wp_at' (weak_derived' c o cteCap) c1 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 and + cte_wp_at' (weak_derived' c' o cteCap) c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 and + valid_cap' c and valid_cap' c' and + K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. valid_pspace'\" + unfolding cteSwap_def + apply (simp add: pred_conj_def valid_pspace'_def valid_mdb'_def) + apply (rule hoare_pre) + apply wp + apply (wp getCTE_inv getCTE_wp) + apply (strengthen imp_consequent, strengthen ctes_of_strng) + apply ((wp sch_act_wf_lift valid_queues_lift + cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp + hoare_vcg_ex_lift getCTE_wp + | simp add: cte_wp_at_ctes_ofI o_def + | rule hoare_drop_imps)+)[6] + apply (clarsimp simp: valid_pspace_no_0[unfolded valid_pspace'_def valid_mdb'_def] + cte_wp_at_ctes_of) + apply (subgoal_tac "c2 \ dom (modify_map + (modify_map + (modify_map + (modify_map (ctes_of s) c1 (cteCap_update (%_. c'))) c2 + (cteCap_update (%_. c))) + (mdbPrev (cteMDBNode cte)) + (cteMDBNode_update (mdbNext_update (%_. c2)))) + (mdbNext (cteMDBNode cte)) + (cteMDBNode_update (mdbPrev_update (%_. c2))))") + apply (erule domE) + apply (intro exI) + apply (rule conjI) + apply (clarsimp simp: modify_map_def cte_wp_at_ctes_of) + apply (rule refl) + apply (case_tac cte) + apply (case_tac cteb) + apply (rule_tac dest_node = "cteMDBNode cteb" in + mdb_swap.cteSwap_valid_mdb_helper [simplified const_def]) + apply (rule mdb_swap.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (rule mdb_ptr_axioms.intro) + apply simp + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (rule mdb_ptr_axioms.intro) + apply (simp add: cte_wp_at_ctes_of) + apply (erule mdb_swap_axioms.intro) + apply clarsimp + apply (erule weak_derived_sym') + apply clarsimp + apply (erule weak_derived_sym') + apply (simp) + apply clarsimp+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch tcb_at [wp]: cteSwap "tcb_at' t" +crunch sch [wp]: cteSwap "\s. P (ksSchedulerAction s)" +crunch inQ [wp]: cteSwap "obj_at' (inQ d p) tcb" +crunch ksQ [wp]: cteSwap "\s. P (ksReadyQueues s)" +crunch sym [wp]: cteSwap "\s. sym_refs (state_refs_of' s)" +crunch sym_hyp [wp]: cteSwap "\s. sym_refs (state_hyp_refs_of' s)" +crunch cur [wp]: cteSwap "\s. P (ksCurThread s)" +crunch ksCurDomain [wp]: cteSwap "\s. P (ksCurDomain s)" +crunch ksDomSchedule [wp]: cteSwap "\s. P (ksDomSchedule s)" +crunch it [wp]: cteSwap "\s. P (ksIdleThread s)" +crunch tcbDomain_obj_at'[wp]: cteSwap "obj_at' (\tcb. x = tcbDomain tcb) t" + +lemma cteSwap_idle'[wp]: + "\valid_idle'\ + cteSwap c c1 c' c2 + \\rv s. valid_idle' s\" + apply (simp add: cteSwap_def) + apply (wp updateCap_idle' | simp)+ + done + +lemma weak_derived_zobj: + "weak_derived' c c' \ zobj_refs' c' = zobj_refs' c" + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule zobj_refs_Master) + apply simp + done + +lemma weak_derived_cte_refs: + "weak_derived' c c' \ cte_refs' c' = cte_refs' c" + apply (clarsimp simp: weak_derived'_def) + apply (rule master_eqI, rule cte_refs_Master) + apply simp + done + +lemma weak_derived_capRange_capBits: + "weak_derived' c c' \ capRange c' = capRange c \ capBits c' = capBits c" + apply (clarsimp simp: weak_derived'_def) + apply (metis capRange_Master capBits_Master) + done + +lemma cteSwap_refs[wp]: + "\valid_global_refs' and cte_wp_at' (weak_derived' c \ cteCap) c1 + and cte_wp_at' (weak_derived' c' \ cteCap) c2\ + cteSwap c c1 c' c2 + \\rv. valid_global_refs'\" + apply (simp add: cteSwap_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) valid_global_refsD_with_objSize)+ + apply (drule weak_derived_capRange_capBits)+ + apply (clarsimp simp: global_refs'_def Int_Un_distrib2) + done + +crunch ksInterrupt[wp]: cteSwap "\s. P (ksInterruptState s)" + +crunch ksArch[wp]: cteSwap "\s. P (ksArchState s)" + +crunch typ_at'[wp]: cteSwap "\s. P (typ_at' T p s)" + +lemma cteSwap_valid_irq_handlers[wp]: + "\valid_irq_handlers' and cte_wp_at' (weak_derived' c \ cteCap) c1 + and cte_wp_at' (weak_derived' c' \ cteCap) c2\ + cteSwap c c1 c' c2 + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF cteSwap_ksInterrupt]) + apply (simp add: cteSwap_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def ran_def) + apply (clarsimp simp add: modify_map_def split: if_split_asm) + apply (auto simp add: weak_derived'_def isCap_simps) + done + +lemma weak_derived_untypedZeroRange: + "\ weak_derived' c c'; isUntypedCap c' \ c' = c \ + \ untypedZeroRange c = untypedZeroRange c'" + apply (clarsimp simp: untypedZeroRange_def isCap_simps) + apply (clarsimp simp: weak_derived'_def) + done + +lemma cteSwap_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' + and cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 + and cte_wp_at' (weak_derived' c' o cteCap) c2 + and cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 + and cte_wp_at' (weak_derived' c \ cteCap) c1 + and K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. untyped_ranges_zero'\" + apply (simp add: cteSwap_def) + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift) + apply wp+ + apply clarsimp + apply (erule untyped_ranges_zero_delta[where xs="[c1, c2]"]) + apply (simp add: modify_map_def) + apply clarsimp + apply clarsimp + apply (clarsimp simp: ran_restrict_map_insert cte_wp_at_ctes_of + cteCaps_of_def modify_map_def) + apply (drule(1) weak_derived_untypedZeroRange)+ + apply auto + done + +crunch valid_arch_state'[wp]: cteSwap "valid_arch_state'" + +crunch irq_states'[wp]: cteSwap "valid_irq_states'" + +crunch vq'[wp]: cteSwap "valid_queues'" + +crunch ksqsL1[wp]: cteSwap "\s. P (ksReadyQueuesL1Bitmap s)" + +crunch ksqsL2[wp]: cteSwap "\s. P (ksReadyQueuesL2Bitmap s)" + +crunch st_tcb_at'[wp]: cteSwap "st_tcb_at' P t" + +crunch vms'[wp]: cteSwap "valid_machine_state'" + +crunch pspace_domain_valid[wp]: cteSwap "pspace_domain_valid" + +crunch ct_not_inQ[wp]: cteSwap "ct_not_inQ" + +crunch ksDomScheduleIdx [wp]: cteSwap "\s. P (ksDomScheduleIdx s)" + +lemma cteSwap_invs'[wp]: + "\invs' and valid_cap' c and valid_cap' c' and + ex_cte_cap_to' c1 and ex_cte_cap_to' c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c) c1 and + cte_wp_at' (weak_derived' c' o cteCap) c2 and + cte_wp_at' (\cc. isUntypedCap (cteCap cc) \ (cteCap cc) = c') c2 and + cte_wp_at' (weak_derived' c \ cteCap) c1 and + K (c1 \ c2)\ + cteSwap c c1 c' c2 + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def pred_conj_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_conj_lift sch_act_wf_lift + valid_queues_lift cur_tcb_lift + valid_irq_node_lift irqs_masked_lift tcb_in_cur_domain'_lift + ct_idle_or_in_cur_domain'_lift2) + apply (clarsimp simp: cte_wp_at_ctes_of weak_derived_zobj weak_derived_cte_refs + weak_derived_capRange_capBits) + done + +lemma capSwap_invs'[wp]: + "\invs' and ex_cte_cap_to' c1 and ex_cte_cap_to' c2\ + capSwapForDelete c1 c2 + \\rv. invs'\" + apply (simp add: capSwapForDelete_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (auto dest!: ctes_of_valid') + done + +lemma Zombie_isZombie[simp]: + "isZombie (Zombie x y z)" + by (simp add: isZombie_def) + +lemmas sameObject_sameRegion = sameObjectAs_sameRegionAs + +lemma mdb_next_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \ p' = m \ p \ p'" + by (simp add: mdb_next_unfold) + +lemma trancl_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \\<^sup>+ p' = m \ p \\<^sup>+ p'" + apply (rule iffI) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply elim: trancl_trans) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply) + apply (fastforce simp: mdb_next_cap_upd simp del: fun_upd_apply elim: trancl_trans) + done + +lemma rtrancl_cap_upd: + "m sl = Some (CTE cap' mdbnode) \ + m (sl \ CTE cap mdbnode) \ p \\<^sup>* p' = m \ p \\<^sup>* p'" + by (simp add: trancl_cap_upd rtrancl_eq_or_trancl) + +lemma no_loops_tranclD: + "\ m \ p \\<^sup>+ p'; no_loops m \ \ \ m \ p' \\<^sup>+ p" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + +lemmas mdb_chain_0_tranclD = no_loops_tranclD [OF _ mdb_chain_0_no_loops] + +lemma caps_contained_subrange: + "\ caps_contained' m; m sl = Some (CTE cap n'); capRange cap' \ capRange cap; \isUntypedCap cap; \ isUntypedCap cap' \ + \ caps_contained' (modify_map m sl (cteCap_update (%_. cap')))" + apply (simp add: caps_contained'_def modify_map_apply notUntypedRange) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=sl in allE) + apply simp + apply blast + done +lemma ex_cte_cap_to'_cteCap: + "ex_cte_cap_to' p = (\s. \p' c. cteCaps_of s p' = Some c \ p \ cte_refs' c (irq_node' s))" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def) + apply (fastforce intro!: ext) + done + +lemma updateCap_ifunsafe': + "\\s. if_unsafe_then_cap' s \ valid_objs' s + \ cte_wp_at' (\cte. \r\cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s). + cte_wp_at' (\cte. cteCap cte = NullCap) r s + \ (r = sl \ cap = NullCap)) sl s + \ (cap \ NullCap \ ex_cte_cap_to' sl s)\ + updateCap sl cap + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3 o_def) + apply wp + apply clarsimp + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x=crefa in exI) + apply (clarsimp simp: cteCaps_of_def modify_map_def) + apply (rule ccontr, drule bspec, clarsimp, erule(1) conjI) + apply (clarsimp split: if_split_asm) + apply (drule_tac x=cref in spec) + apply (clarsimp dest!: modify_map_K_D + simp: ex_cte_cap_to'_cteCap + split: if_split_asm) + done + +lemma valid_vmdb [elim!]: + "valid_mdb' s \ vmdb (ctes_of s)" + by unfold_locales (simp add: valid_mdb'_def) + +lemma class_links_update: + "\ class_links m; \cte. m x = Some cte + \ mdbNext (cteMDBNode cte) = mdbNext (cteMDBNode cte') + \ capClass (cteCap cte) = capClass (cteCap cte') \ + \ class_links (m(x \ cte'))" + apply clarsimp + apply (unfold class_links_def) + apply (erule allEI, erule allEI) + apply (clarsimp simp: mdb_next_unfold split del: if_split split: if_split_asm) + done + +lemma sameRegionAs_Zombie[simp]: + "\ sameRegionAs (Zombie p zb n) cap" + by (simp add: sameRegionAs_def3 isCap_simps) + +lemma descendants_of_subset_untyped: + assumes adj: "\x. ((m x = None) = (m' x = None)) + \ (\cte cte'. m x = Some cte \ m' x = Some cte' + \ (isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')) + \ (capRange (cteCap cte) = capRange (cteCap cte')) + \ (isUntypedCap (cteCap cte) \ cteCap cte = cteCap cte'))" + and desc: "\x. descendants_of' x m \ descendants_of' x m'" + shows "(untyped_inc' m \ untyped_inc' m') + \ (untyped_mdb' m \ untyped_mdb' m')" +proof + have P: "\x cte. \ m' x = Some cte; isUntypedCap (cteCap cte) \ + \ \node. m x = Some (CTE (cteCap cte) node) \ m' x = Some cte" + apply (cut_tac x=x in adj) + apply clarsimp + apply (case_tac y, simp) + done + + show "untyped_inc' m \ untyped_inc' m'" + unfolding untyped_inc'_def + apply (rule impI, erule allEI, erule allEI) + apply clarsimp + apply (drule P | simp)+ + apply clarsimp + apply (cut_tac x=p in desc) + apply (cut_tac x=p' in desc) + by blast + + have Q: "\x cte. m' x = Some cte + \ \cap node. m x = Some (CTE cap node) + \ isUntypedCap cap = isUntypedCap (cteCap cte) + \ capRange cap = capRange (cteCap cte)" + apply (cut_tac x=x in adj) + apply clarsimp + apply (case_tac y, simp) + done + + show "untyped_mdb' m \ untyped_mdb' m'" + unfolding untyped_mdb'_def + apply (rule impI, erule allEI, erule allEI) + apply clarsimp + apply (drule_tac x=p in P, simp) + apply (drule_tac x=p' in Q, simp) + apply clarsimp + apply (cut_tac x=p in desc) + apply blast + done + +qed + +lemma invalid_Thread_CNode: + "\ isThreadCap cap; isCNodeCap cap'; s \' cap; s \' cap' \ + \ capUntypedPtr cap \ capUntypedPtr cap'" + apply (clarsimp simp: valid_cap'_def isCap_simps) + apply (drule_tac x=0 in spec) + apply (clarsimp simp: obj_at'_def) + done + +lemma Final_notUntyped_capRange_disjoint: + "\ isFinal cap sl (cteCaps_of s); cteCaps_of s sl' = Some cap'; + sl \ sl'; capUntypedPtr cap = capUntypedPtr cap'; capBits cap = capBits cap'; + isThreadCap cap \ isCNodeCap cap; s \' cap; + \ isUntypedCap cap'; \ isArchFrameCap cap'; \ isZombie cap'; + capClass cap' = PhysicalClass; valid_objs' s \ + \ P" + apply (clarsimp simp add: isFinal_def) + apply (drule_tac x=sl' in spec) + apply (clarsimp simp: cteCaps_of_def) + apply (drule(1) ctes_of_valid') + apply (elim disjE isCapDs[elim_format]) + apply (clarsimp simp: valid_cap'_def valid_arch_cap'_def valid_arch_cap_ref'_def bit_simps + obj_at'_def objBits_simps + typ_at'_def ko_wp_at'_def + page_table_at'_def + split: capability.split_asm zombie_type.split_asm + arch_capability.split_asm option.split_asm + dest!: spec[where x=0], + (clarsimp simp: sameObjectAs_def3 isCap_simps)?)+ + done + +lemma capBits_capUntyped_capRange: + "\ capBits cap = capBits cap'; + capUntypedPtr cap = capUntypedPtr cap'; + capClass cap = capClass cap' \ + \ capRange cap = capRange cap'" + by (simp add: capRange_def) + +lemma ztc_phys: + "\ isCNodeCap cap \ isThreadCap cap \ isZombie cap \ + \ capClass cap = PhysicalClass" + by (auto simp: isCap_simps) + +lemma ztc_sameRegion: + "\ isCNodeCap cap \ isThreadCap cap \ isZombie cap \ + \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (subgoal_tac "\ isUntypedCap cap \ \ isArchFrameCap cap + \ \ isIRQControlCap cap") + apply (simp add: sameRegionAs_def3 sameObjectAs_def3) + apply (auto simp: isCap_simps) + done + +lemma distinct_zombies_seperate_if_zombiedE: + "\ distinct_zombies m; m x = Some cte; + isUntypedCap (cteCap cte) \ isUntypedCap (cteCap cte'); + isArchFrameCap (cteCap cte) \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = capClass (cteCap cte); + capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte); + \y cte''. \ m y = Some cte''; x \ y; + isZombie (cteCap cte'); \ isZombie (cteCap cte); + \ isZombie (cteCap cte''); + \ isUntypedCap (cteCap cte''); \ isArchFrameCap (cteCap cte''); + capClass (cteCap cte'') = PhysicalClass; + capUntypedPtr (cteCap cte'') = capUntypedPtr (cteCap cte); + capBits (cteCap cte'') = capBits (cteCap cte) + \ \ False \ + \ distinct_zombies (m (x \ cte'))" + apply (cases "isZombie (cteCap cte') \ \ isZombie (cteCap cte)") + apply (subgoal_tac "\y cte''. m y = Some cte'' \ y \ x + \ capUntypedPtr (cteCap cte'') = capUntypedPtr (cteCap cte) + \ capBits (cteCap cte'') = capBits (cteCap cte) + \ \ isZombie (cteCap cte'')") + apply (erule distinct_zombies_seperateE) + apply (drule_tac x=y in spec, clarsimp) + apply auto[1] + apply (clarsimp simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (drule_tac x=y in spec, drule_tac x=x in spec) + apply (frule isZombie_capClass[where cap="cteCap cte'"]) + apply clarsimp + apply (auto simp: isCap_simps)[1] + apply clarsimp + apply (erule(7) distinct_zombies_unzombieE) + done + +lemma mdb_chunked_update_final: + assumes chunked: "mdb_chunked m" + and slot: "m slot = Some (CTE cap node)" + and Fin1: "\x cte. m x = Some cte \ x \ slot + \ \ sameRegionAs cap (cteCap cte)" + and Fin2: "\x cte. m x = Some cte \ x \ slot + \ \ sameRegionAs cap' (cteCap cte)" + and Fin3: "\x cte. m x = Some cte \ x \ slot + \ sameRegionAs (cteCap cte) cap + \ isUntypedCap (cteCap cte)" + and Fin4: "\x cte. m x = Some cte \ x \ slot + \ sameRegionAs (cteCap cte) cap' + \ isUntypedCap (cteCap cte)" + and capR: "capRange cap = capRange cap'" + shows "mdb_chunked (m (slot \ CTE cap' node))" + (is "mdb_chunked ?m'") +proof - + note trancl[simp] = trancl_cap_upd [where m=m, OF slot] + note rtrancl[simp] = rtrancl_cap_upd [where m=m, OF slot] + + have sameRegionAs: + "\x cte. \ m x = Some cte; x \ slot; sameRegionAs (cteCap cte) cap' \ + \ sameRegionAs (cteCap cte) cap" + apply (frule(2) Fin4) + apply (clarsimp simp: sameRegionAs_def3 capR) + apply (clarsimp simp: isCap_simps) + done + + have is_chunk: + "\x cap n p p'. \ is_chunk m cap p p'; m x = Some (CTE cap n); x \ slot \ \ + is_chunk ?m' cap p p'" + apply (simp add: is_chunk_def split del: if_split) + apply (erule allEI) + apply (clarsimp simp: slot) + apply (frule(1) Fin3, simp) + apply (clarsimp simp: sameRegionAs_def3 capR) + apply (clarsimp simp: isCap_simps) + done + + have not_chunk: "\p. \ m \ slot \\<^sup>+ p; p \ slot \ \ \ is_chunk m cap slot p" + apply (simp add: is_chunk_def) + apply (rule_tac x=p in exI) + apply clarsimp + apply (frule(1) Fin1) + apply simp + done + + show ?thesis using chunked + apply (simp add: mdb_chunked_def split del: if_split) + apply (erule allEI, erule allEI) + apply (clarsimp split del: if_split) + apply (clarsimp simp: slot split: if_split_asm) + apply (frule(1) Fin2[OF _ not_sym], simp) + apply (frule(1) sameRegionAs, clarsimp+) + apply (simp add: not_chunk is_chunk) + apply (simp add: is_chunk) + done +qed + +lemma distinct_zombiesD: + "\ m x = Some cte; distinct_zombies m; isZombie (cteCap cte); + y \ x; m y = Some cte'; capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte); + \ isUntypedCap (cteCap cte'); \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = PhysicalClass \ + \ False" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (drule_tac x=x in spec, drule_tac x=y in spec) + apply clarsimp + apply auto + done + +lemma ztc_replace_update_final: + assumes chunk: "mdb_chunked m" + and slot: "m x = Some (CTE cap node)" + and ztc1: "isCNodeCap cap \ isThreadCap cap \ isZombie cap" + and ztc2: "isCNodeCap cap' \ isThreadCap cap' \ isZombie cap'" + and unt: "capUntypedPtr cap = capUntypedPtr cap'" + and bits: "capBits cap = capBits cap'" + and distz: "distinct_zombies m" + and Fin: "isFinal cap x (option_map cteCap \ m)" + and valid: "s \' cap" "s \' cap'" + shows "mdb_chunked (m (x \ CTE cap' node))" +proof (rule mdb_chunked_update_final [OF chunk, OF slot]) + have cases: "capMasterCap cap = capMasterCap cap' + \ isZombie cap \ isZombie cap'" + using bits unt ztc1 ztc2 + invalid_Thread_CNode [OF _ _ valid] + invalid_Thread_CNode [OF _ _ valid(2) valid(1)] + by (auto simp: isCap_simps) + + have Fin': "\y cte. \ m y = Some cte; y \ x \ \ \ sameObjectAs cap (cteCap cte)" + using Fin + apply (clarsimp simp: isFinal_def) + apply (drule_tac x=y in spec) + apply (clarsimp simp: sameObjectAs_def3) + done + + show Fin1: "\y cte. \ m y = Some cte; y \ x \ \ \ sameRegionAs cap (cteCap cte)" + by (clarsimp simp: ztc_sameRegion [OF ztc1] Fin') + + show capR: "capRange cap = capRange cap'" + using unt bits ztc_phys[OF ztc1] ztc_phys[OF ztc2] + by (simp add: capRange_def) + + have capR_neq: "capRange cap' \ {}" + using capAligned_capUntypedPtr [OF valid_capAligned, OF valid(2)] + by (clarsimp simp add: ztc_phys [OF ztc2]) + + have zombie_case_helper: + "\y cte. \ m y = Some cte; y \ x; isZombie cap \ + \ \ sameObjectAs cap' (cteCap cte)" + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 + elim!: sameObjectAsE) + apply (rule distinct_zombiesD [OF slot distz], simp_all)[1] + apply (drule master_eqE, rule capBits_Master) + apply (simp add: bits) + apply (drule arg_cong[where f=capUntypedPtr]) + apply (simp add: capUntyped_Master unt) + apply (drule arg_cong[where f=isUntypedCap]) + apply (simp add: isCap_Master) + apply (drule arg_cong[where f=isArchFrameCap]) + apply (clarsimp simp add: isCap_Master) + apply (cut_tac ztc2, clarsimp simp: isCap_simps) + apply (drule arg_cong[where f=capClass]) + apply (simp add: capClass_Master ztc_phys[OF ztc2]) + done + + show Fin2: "\y cte. \ m y = Some cte; y \ x \ \ \ sameRegionAs cap' (cteCap cte)" + using capR + apply clarsimp + apply (frule(1) Fin1) + apply (rule disjE [OF cases]) + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 sameObjectAs_def3) + apply (drule_tac F="\cap. (isNullCap cap, isZombie cap, + isUntypedCap cap, isArchFrameCap cap, + capRange cap)" in master_eqE, + simp add: isCap_Master capRange_Master del: isNullCap)+ + using valid apply (auto simp: isCap_Master capRange_Master)[1] + apply (erule disjE) + apply (drule(2) zombie_case_helper) + apply (simp add: ztc_sameRegion ztc1 ztc2) + apply (clarsimp simp: ztc_sameRegion ztc1 ztc2 + elim!: sameObjectAsE) + done + + have untyped_helper: + "\cap cap'. \ isCNodeCap cap' \ isThreadCap cap' \ isZombie cap'; + sameRegionAs cap cap' \ + \ isUntypedCap cap \ sameRegionAs cap' cap" + apply (erule sameRegionAsE) + apply (clarsimp simp: ztc_sameRegion sameObjectAs_def3) + apply (drule_tac F="\cap. (isNullCap cap, isZombie cap, + isUntypedCap cap, isArchFrameCap cap, + capRange cap)" in master_eqE, + simp add: isCap_Master capRange_Master del: isNullCap)+ + apply (auto simp: isCap_Master capRange_Master isCap_simps)[1] + apply simp + apply (clarsimp simp: isCap_simps)+ + done + + show Fin3: "\y cte. \ m y = Some cte; y \ x; sameRegionAs (cteCap cte) cap \ + \ isUntypedCap (cteCap cte)" + apply (frule(1) Fin1) + apply (drule untyped_helper[OF ztc1]) + apply simp + done + + show Fin4: "\y cte. \ m y = Some cte; y \ x; sameRegionAs (cteCap cte) cap' \ + \ isUntypedCap (cteCap cte)" + apply (frule(1) Fin2) + apply (drule untyped_helper[OF ztc2]) + apply simp + done + +qed + +lemma updateCap_untyped_ranges_zero_simple: + "\cte_wp_at' ((\cp. untypedZeroRange cp = untypedZeroRange cap) o cteCap) sl and untyped_ranges_zero'\ + updateCap sl cap + \\_. untyped_ranges_zero'\" + apply (rule hoare_pre, rule untyped_ranges_zero_lift, wp+) + apply (clarsimp simp: modify_map_def cteCaps_of_def cte_wp_at_ctes_of) + apply (simp add: untyped_ranges_zero_inv_def) + apply (rule arg_cong[where f=ran]) + apply (simp add: fun_eq_iff map_comp_def) + done + +crunch tcb_in_cur_domain'[wp]: updateCap "tcb_in_cur_domain' t" + (wp: crunch_wps simp: crunch_simps rule: tcb_in_cur_domain'_lift) + +lemma make_zombie_invs': + "\\s. invs' s \ s \' cap \ + cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s \ + cte_wp_at' (\cte. capClass (cteCap cte) = PhysicalClass \ + capUntypedPtr (cteCap cte) = capUntypedPtr cap \ + capBits (cteCap cte) = capBits cap \ + (\r\cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s). + cte_wp_at' (\cte. cteCap cte = NullCap) r s) \ + (isZombie cap \ isThreadCap cap \ isCNodeCap cap) \ + final_matters' (cteCap cte) \ + (isThreadCap (cteCap cte) \ isCNodeCap (cteCap cte) + \ isZombie (cteCap cte)) \ \ isUntypedCap (cteCap cte) \ + (\p \ threadCapRefs (cteCap cte). + st_tcb_at' ((=) Inactive) p s + \ bound_tcb_at' ((=) None) p s + \ obj_at' (Not \ tcbQueued) p s + \ ko_wp_at' (Not \ hyp_live') p s + \ (\pr. p \ set (ksReadyQueues s pr)))) sl s\ + updateCap sl cap + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_irq_handlers'_def irq_issued'_def) + apply (wp updateCap_ctes_of_wp sch_act_wf_lift valid_queues_lift cur_tcb_lift + updateCap_iflive' updateCap_ifunsafe' updateCap_idle' + valid_irq_node_lift ct_idle_or_in_cur_domain'_lift2 + updateCap_untyped_ranges_zero_simple + | simp split del: if_split)+ + apply clarsimp + apply (intro conjI[rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (auto simp: untypedZeroRange_def isCap_simps)[1] + apply (clarsimp simp: modify_map_def ran_def split del: if_split + split: if_split_asm) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of isCap_simps) + apply auto[1] + + apply (clarsimp simp: disj_comms cte_wp_at_ctes_of + dest!: ztc_phys capBits_capUntyped_capRange) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (clarsimp dest!: valid_global_refsD_with_objSize) + + apply (clarsimp simp: disj_comms cte_wp_at_ctes_of + dest!: ztc_phys capBits_capUntyped_capRange) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (clarsimp dest!: valid_global_refsD_with_objSize) + + apply (auto elim: if_unsafe_then_capD' simp: isCap_simps)[1] + + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule bspec[where x=sl], simp) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (subgoal_tac "st_tcb_at' ((=) Inactive) p' s + \ obj_at' (Not \ tcbQueued) p' s + \ bound_tcb_at' ((=) None) p' s + \ ko_wp_at' (Not \ hyp_live') p' s") + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def live'_def hyp_live'_def) + apply (auto dest!: isCapDs)[1] + apply (clarsimp simp: cte_wp_at_ctes_of disj_ac + dest!: isCapDs) + apply (frule ztc_phys[where cap=cap]) + apply (frule(1) capBits_capUntyped_capRange, simp) + apply (case_tac cte) + apply clarsimp + apply (simp add: valid_mdb_ctes_def) + apply (rule conjI) + apply (subst modify_map_dlist_iff) + apply (case_tac cte, simp) + apply simp + apply (rule conjI) + apply (rule mdb_chain_0_modify_map_inv, simp) + apply simp + apply (rule conjI) + apply (clarsimp simp: modify_map_apply) + apply (simp add: valid_badges_def del: fun_upd_apply) + apply clarify + apply (thin_tac "\ isUntypedCap cap" for cap) + apply (clarsimp simp: isCap_simps split: if_split_asm) + subgoal by ((elim disjE | clarsimp simp: isCap_simps)+) + subgoal by (fastforce simp: isCap_simps sameRegionAs_def3) + apply (clarsimp simp: mdb_next_unfold) + apply (erule_tac x=p in allE) + apply (erule_tac x="mdbNext node" in allE) + subgoal by simp + apply (rule conjI) + apply clarsimp + apply (erule (1) caps_contained_subrange, simp) + subgoal by (clarsimp simp: isCap_simps) + apply (clarsimp simp add: isCap_simps) + apply (subgoal_tac "valid_mdb' s") + prefer 2 + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI) + defer + apply (cut_tac m="ctes_of s" + and m'="(modify_map (ctes_of s) sl + (cteCap_update (\_. cap)))" + in descendants_of_subset_untyped) + apply (clarsimp simp: modify_map_def) + apply (rule conjI, clarsimp simp: isCap_simps) + apply clarsimp + apply (simp only: modify_map_apply) + apply (rule use_update_ztc_two [OF descendants_of_update_ztc]) + apply (rule exEI, rule vmdb.isFinal_untypedParent) + apply (rule vmdb.intro, simp add: valid_mdb'_def) + apply assumption + apply (simp add: cteCaps_of_def) + apply (clarsimp simp: isCap_simps) + apply assumption + apply (clarsimp simp: isCap_simps) + apply assumption + apply (simp add: disj_comms) + apply (simp add: capRange_def) + apply (simp add: capRange_def) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid') + apply (simp add: disj_comms) + apply clarsimp + apply (erule(1) mdb_chain_0_no_loops) + apply (erule (3) isFinal_no_descendants) + apply (clarsimp simp: modify_map_apply) + apply (rule conjI, clarsimp simp: valid_nullcaps_def isCap_simps) + apply (rule conjI, clarsimp simp: ut_revocable'_def isCap_simps) + apply (rule conjI, clarsimp elim!: class_links_update) + apply (rule conjI) + apply (erule(1) distinct_zombies_seperate_if_zombiedE) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply simp + apply simp + apply simp + apply (erule_tac sl'=y in Final_notUntyped_capRange_disjoint, + simp add: cteCaps_of_def, + simp_all add: disj_ac)[1] + apply (erule(1) ctes_of_valid_cap') + apply (rule conjI) + apply (subgoal_tac "cap \ IRQControlCap") + apply (clarsimp simp: irq_control_def) + apply (clarsimp simp: isCap_simps) + apply (simp add: reply_masters_rvk_fb_def, erule ball_ran_fun_updI) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: modify_map_apply) + apply (erule(1) ztc_replace_update_final, simp_all) + apply (simp add: cteCaps_of_def) + apply (erule(1) ctes_of_valid_cap') + done + +lemma isFinal_Zombie: + "isFinal (Zombie p' b n) p cs" + by (simp add: isFinal_def sameObjectAs_def isCap_simps) + +lemma shrink_zombie_invs': + "\invs' and (K (isZombie cap)) + and cte_wp_at' (\cte. cteCap cte = Zombie (capZombiePtr cap) (capZombieType cap) (capZombieNumber cap + 1)) sl + and cte_wp_at' (\cte. cteCap cte = NullCap) (capZombiePtr cap + 2^cteSizeBits * (of_nat (capZombieNumber cap)))\ + updateCap sl cap + \\rv. invs'\" + apply (wp make_zombie_invs') + apply (clarsimp simp: cte_wp_at_ctes_of isFinal_Zombie isCap_simps final_matters'_def) + apply (rule context_conjI) + apply (drule ctes_of_valid', clarsimp) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply clarsimp + apply (rule ccontr, erule notE, rule imageI) + apply (drule word_le_minus_one_leq) + apply (rule ccontr, simp add: linorder_not_less mult.commute mult.left_commute shiftl_t2n) + done + +lemma cte_wp_at_cteCap_norm: + "(cte_wp_at' (\c. P (cteCap c)) p s) = (\cap. cte_wp_at' (\c. cteCap c = cap) p s \ P cap)" + by (auto simp add: cte_wp_at'_def) + +lemma cte_wp_at_conj_eq': + "cte_wp_at' (\c. P c \ Q c) p s = (cte_wp_at' P p s \ cte_wp_at' Q p s)" + by (auto simp add: cte_wp_at'_def) + +lemma cte_wp_at_disj_eq': + "cte_wp_at' (\c. P c \ Q c) p s = (cte_wp_at' P p s \ cte_wp_at' Q p s)" + by (auto simp add: cte_wp_at'_def) + +lemma valid_Zombie_cte_at': + "\ s \' Zombie p zt m; n < zombieCTEs zt \ \ cte_at' (p + (of_nat n * 2^cteSizeBits)) s" + apply (clarsimp simp: valid_cap'_def split: zombie_type.split_asm) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subgoal_tac "tcb_cte_cases (of_nat n * 2^cteSizeBits) \ None") + apply clarsimp + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + apply (thin_tac "a < word_bits" for a) + apply ((clarsimp | erule less_handy_casesE | fastforce simp: objBits_defs)+)[1] + apply (drule spec[where x="of_nat n"]) + apply (subst(asm) less_mask_eq) + apply (rule order_less_le_trans) + apply (erule of_nat_mono_maybe [rotated]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply simp + apply (clarsimp simp: mult.commute mult.left_commute real_cte_at') + done + +lemma cteSwap_cte_wp_cteCap: + "\\s. p \ sl \ + (p = p' \ cte_at' p' s \ P cap') \ + (p \ p' \ cte_wp_at' (\c. P (cteCap c)) p s)\ + cteSwap cap p' cap' sl + \\rv. cte_wp_at' (\c. P (cteCap c)) p\" + apply (simp add: cteSwap_def) + apply (rule hoare_pre) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' + hoare_vcg_all_lift) + apply simp + apply (wp hoare_drop_imps)[1] + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ + apply simp + apply (clarsimp simp: o_def) + done + +lemma capSwap_cte_wp_cteCap: + "\\s. p \ sl \ + (p = p' \ cte_wp_at' (\c. P (cteCap c)) sl s) \ + (p \ p' \ cte_wp_at' (\c. P (cteCap c)) p s)\ + capSwapForDelete p' sl + \\rv. cte_wp_at' (\c. P (cteCap c)) p\" + apply(simp add: capSwapForDelete_def) + apply(wp) + apply(rule cteSwap_cte_wp_cteCap) + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ + apply(clarsimp) + apply(rule conjI) + apply(simp add: cte_at_cte_wp_atD) + apply(clarsimp simp: cte_wp_at_cteCap_norm) + apply(unfold cte_at'_def cte_wp_at'_def) + apply(clarsimp) + apply(clarsimp) + done + +lemma cteSwap_cteCaps_of [wp]: + "\\s. P ((cteCaps_of s) ( a := Some cap2, b := Some cap1 ))\ + cteSwap cap1 a cap2 b + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteSwap_def) + apply (wp getCTE_cteCap_wp | simp)+ + apply (clarsimp split: option.split) + apply (erule rsubst[where P=P], intro ext) + apply (clarsimp simp: modify_map_def split: if_split_asm) + done + +lemma capSwap_cteCaps_of [wp]: + notes if_cong [cong] + shows + "\\s. P ((cteCaps_of s) \ (id ( a := b, b := a )))\ + capSwapForDelete a b + \\rv s. P (cteCaps_of s)\" + apply(simp add: capSwapForDelete_def) + apply(wp getCTE_wp getCTE_cteCap_wp) + apply(clarsimp) + apply(rule conjI) + prefer 2 + apply(clarsimp simp: o_def) + apply(clarsimp simp: cte_wp_at_ctes_of o_def) + apply(erule rsubst [where P=P]) + apply(rule ext) + apply(clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemma cte_wp_final_cteCaps_of: + "(cte_wp_at' (\c. isFinal (cteCap c) p (cteCaps_of s)) p s) = + (\cap. cteCaps_of s p = Some cap \ isFinal cap p (cteCaps_of s))" + by (auto simp add: cteCaps_of_def cte_wp_at_ctes_of) + +lemma updateCap_cap_to': + "\\s. ex_cte_cap_to' p s \ + cte_wp_at' (\cte. p \ cte_refs' (cteCap cte) (irq_node' s) - cte_refs' cap (irq_node' s)) sl s\ + updateCap sl cap + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_cteCap) + apply wp + apply clarsimp + apply (rule_tac x=p' in exI) + apply (clarsimp simp: modify_map_def cte_wp_at_ctes_of cteCaps_of_def) + done + +lemma cteDeleteOne_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cteDeleteOne slot \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_wp_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node'[OF cteDeleteOne_irq_node']) + apply (wp hoare_vcg_ex_lift cteDeleteOne_cte_wp_at_preserved) + apply (case_tac cap, simp_all add: finaliseCap_def Let_def isCap_simps)[1] + apply simp + done + +lemmas setNotification_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setNotification_cte_wp_at' set_ntfn_ksInterrupt] + +lemmas setEndpoint_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setEndpoint_cte_wp_at' setEndpoint_ksInterruptState] + +lemmas setThreadState_cap_to'[wp] + = ex_cte_cap_to'_pres [OF setThreadState_cte_wp_at' setThreadState_ksInterruptState] + +crunch cap_to'[wp]: cancelSignal "ex_cte_cap_wp_to' P p" + (simp: crunch_simps wp: crunch_wps) + +lemma cancelIPC_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cancelIPC t \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: cancelIPC_def Let_def) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (case_tac state, simp_all add: getThreadReplySlot_def locateSlot_conv) + apply (wp ex_cte_cap_to'_pres [OF threadSet_cte_wp_at'] + | simp add: o_def if_apply_def2 + | wpcw | wp (once) hoare_drop_imps)+ + done + +lemma emptySlot_deletes [wp]: + "\\\ emptySlot p opt \\rv s. cte_wp_at' (\c. cteCap c = NullCap) p s\" + apply (simp add: emptySlot_def case_Null_If) + apply (subst tree_cte_cteCap_eq [unfolded o_def]) + apply (wp getCTE_cteCap_wp opt_return_pres_lift) + apply (clarsimp split: option.splits simp: modify_map_def) + done + +lemma capCylicZombieD[dest!]: + "capCyclicZombie cap slot \ \zb n. cap = Zombie slot zb n" + by (clarsimp simp: capCyclicZombie_def split: capability.split_asm) + +crunches finaliseCap + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: getASID_wp crunch_wps simp: crunch_simps) + +lemma finaliseSlot_abort_cases': + "s \ \\\ + finaliseSlot' sl ex + \\rv s. fst rv \ (\ ex \ cte_wp_at' (\cte. isZombie (cteCap cte) + \ capZombiePtr (cteCap cte) = sl) sl s)\,\\\\" +proof (induct rule: finalise_spec_induct) + case (1 slot exp) + show ?case + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply (rule hoare_pre_spec_validE) + apply (wp | simp)+ + apply ((wp "1.hyps" updateCap_cte_wp_at_cases)+, + (assumption | rule refl | simp only: split_def fst_conv snd_conv)+) + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift[where Q="\rv. cte_at' slot"]) + apply (wp typ_at_lifts [OF finaliseCap_typ_at'])[1] + apply (rule finaliseCap_cases) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getCTE_wp isFinalCapability_inv | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done +qed + +lemmas finaliseSlot_abort_cases + = use_spec(2) [OF finaliseSlot_abort_cases', + folded validE_R_def finaliseSlot_def] + +crunch it [wp]: capSwapForDelete "\s. P (ksIdleThread s)" + +lemma cteDelete_delete_cases: + "\\\ + cteDelete slot e + \\rv. cte_wp_at' (\c. cteCap c = NullCap + \ \ e \ isZombie (cteCap c) + \ capZombiePtr (cteCap c) = slot) slot\, -" + apply (simp add: cteDelete_def whenE_def split_def) + apply wp + apply (rule hoare_strengthen_post [OF emptySlot_deletes]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply wp+ + apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + done + +lemmas cteDelete_deletes = cteDelete_delete_cases[where e=True, simplified] + +lemma cteSwap_cap_to'[wp]: + "\ex_cte_cap_to' p\ capSwapForDelete c1 c2 \\rv. ex_cte_cap_to' p\" + apply (simp add: cteSwap_def capSwapForDelete_def ex_cte_cap_to'_cteCap) + apply (wp getCTE_cteCap_wp | simp add: o_def)+ + apply (clarsimp split: option.splits) + apply (rule_tac x="(id (c1 := c2, c2 := c1)) p'" in exI) + apply (clarsimp simp: modify_map_def | rule conjI)+ + done + +lemma zombieCTEs_le: + "zombieCTEs zb \ 2 ^ zBits zb" + by (cases zb, simp_all add: objBits_defs) + +lemma valid_cap'_handy_bits: + "s \' Zombie r zb n \ n \ 2 ^ (zBits zb)" + "s \' Zombie r zb n \ n < 2 ^ word_bits" + "\ s \' Zombie r zb n; n \ 0 \ \ of_nat n - 1 < (2 ^ (zBits zb) :: machine_word)" + "s \' Zombie r zb n \ zBits zb < word_bits" + apply (insert zombieCTEs_le[where zb=zb], + simp_all add: valid_cap'_def) + apply (clarsimp elim!: order_le_less_trans) + apply (clarsimp simp: word_less_nat_alt) + apply (subgoal_tac "n \ unats (len_of TYPE (machine_word_len))") + apply (subst unat_minus_one) + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp add: unats_def) + apply simp + apply (simp add: word_unat.Abs_inverse) + apply (simp only: unats_def mem_simps) + apply (erule order_le_less_trans) + apply (erule order_le_less_trans) + apply (rule power_strict_increasing) + apply (simp only: word_bits_len_of) + apply simp + done + +lemma ex_Zombie_to: + "\ ctes_of s p = Some cte; cteCap cte = Zombie p' b n; + n \ 0; valid_objs' s \ + \ ex_cte_cap_to' p' s" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (intro exI, rule conjI, assumption) + apply (simp add: image_def) + apply (rule bexI[where x=0]) + apply simp + apply simp + apply (frule(1) ctes_of_valid') + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp only: word_bits_len_of) + apply (erule valid_cap'_handy_bits) + apply simp + done + +lemma ex_Zombie_to2: + "\ ctes_of s p = Some cte; cteCap cte = Zombie p' b n; + n \ 0; valid_objs' s \ + \ ex_cte_cap_to' (p' + (2^cteSizeBits * of_nat n - 2^cteSizeBits)) s" + apply (simp add: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (intro exI, rule conjI, assumption) + apply (simp add: image_def shiftl_t2n) + apply (rule bexI[where x="of_nat n - 1"]) + apply (fastforce simp: objBits_defs) + apply (subgoal_tac "n \ unats (len_of TYPE(machine_word_len))") + apply (simp add: word_less_nat_alt) + apply (subst unat_minus_one) + apply (simp add: word_neq_0_conv) + apply (drule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (simp add: unats_def) + apply simp + apply (simp add: word_unat.Abs_inverse) + apply (simp only: unats_def mem_simps word_bits_len_of) + apply (drule(1) ctes_of_valid', simp) + apply (erule valid_cap'_handy_bits) + done + +declare word_to_1_set[simp] + +lemmas finalise_spec_induct2 = finaliseSlot'.induct[where P= + "\sl exp s. P sl (finaliseSlot' sl exp) (\P. exp \ P) s" for P] + +lemma cteSwap_sch_act_simple[wp]: + "\sch_act_simple\ cteSwap cap1 slot1 cap2 slot2 \\_. sch_act_simple\" + by (simp add: cteSwap_def sch_act_simple_def, wp) + +crunch sch_act_simple[wp]: capSwapForDelete sch_act_simple + +lemma updateCap_sch_act_simple[wp]: + "\sch_act_simple\ updateCap slot newCap \\_. sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +definition + "no_cte_prop P = (if \sl cte. \P\ setCTE sl cte \\_. P\ then P else \)" + +lemma no_cte_prop_top: + "no_cte_prop \ = \" + by (simp add: no_cte_prop_def) + +definition + "finalise_prop_stuff P + = ((\s f. P (ksWorkUnitsCompleted_update f s) = P s) + \ irq_state_independent_H P + \ (\s f. P (gsUntypedZeroRanges_update f s) = P s) + \ (\s f. P (ksInterruptState_update f s) = P s) + \ (\s f. P (ksMachineState_update (irq_state_update f) s) = P s) + \ (\s f. P (ksMachineState_update (irq_masks_update f) s) = P s))" + +lemma setCTE_no_cte_prop: + "\no_cte_prop P\ setCTE sl cte \\_. no_cte_prop P\" + by (simp add: no_cte_prop_def hoare_vcg_prop) + +lemma setInterruptState_no_cte_prop: + "\no_cte_prop P and K (finalise_prop_stuff P)\ setInterruptState st \\_. no_cte_prop P\" + apply (simp add: setInterruptState_def, wp) + apply (clarsimp simp: finalise_prop_stuff_def no_cte_prop_def) + done + +lemma dmo_maskInterrupt_no_cte_prop: + "\no_cte_prop P and K (finalise_prop_stuff P)\ + doMachineOp (maskInterrupt m irq) \\_. no_cte_prop P\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: no_cte_prop_def finalise_prop_stuff_def) + done + +lemma updateTrackedFreeIndex_no_cte_prop[wp]: + "\no_cte_prop P and K (finalise_prop_stuff P)\ + updateTrackedFreeIndex ptr idx \\_. no_cte_prop P\" + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: no_cte_prop_def finalise_prop_stuff_def) + done + +crunches emptySlot, capSwapForDelete + for no_cte_prop[wp]: "no_cte_prop P" + (ignore: doMachineOp wp: dmo_maskInterrupt_no_cte_prop) + +lemma reduceZombie_invs'': + assumes fin: + "\s'' rv. \\ (isZombie cap \ capZombieNumber cap = 0); \ (isZombie cap \ \ exposed); isZombie cap \ exposed; + (Inr rv, s'') + \ fst ((withoutPreemption $ locateSlotCap cap (fromIntegral (capZombieNumber cap - 1))) st)\ + \ s'' \ \\s. no_cte_prop Q s \ invs' s \ sch_act_simple s + \ cte_wp_at' (\cte. isZombie (cteCap cte)) slot s + \ ex_cte_cap_to' rv s\ + finaliseSlot rv False + \\rva s. no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (fst rva \ cte_wp_at' (\cte. removeable' rv s (cteCap cte)) rv s) + \ (snd rva \ NullCap \ post_cap_delete_pre' (snd rva) rv (cteCaps_of s))\, + \\rv s. no_cte_prop Q s \ invs' s \ sch_act_simple s\" + assumes stuff: + "finalise_prop_stuff Q" + shows + "st \ \\s. + no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\ + reduceZombie cap slot exposed + \\rv s. + no_cte_prop Q s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\, + \\rv s. no_cte_prop Q s \ invs' s \ sch_act_simple s\" + apply (unfold reduceZombie_def cteDelete_def Let_def + split_def fst_conv snd_conv haskell_fail_def + case_Zombie_assert_fold) + apply (rule hoare_pre_spec_validE) + apply (wp hoare_vcg_disj_lift | simp)+ + apply (wp capSwap_cte_wp_cteCap getCTE_wp' | simp)+ + apply (wp shrink_zombie_invs')[1] + apply (wp | simp)+ + apply (rule getCTE_wp) + apply (wp | simp)+ + apply (rule_tac Q="\cte s. rv = capZombiePtr cap + + of_nat (capZombieNumber cap) * 2^cteSizeBits - 2^cteSizeBits + \ cte_wp_at' (\c. c = cte) slot s \ invs' s + \ no_cte_prop Q s \ sch_act_simple s" + in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of mult.commute mult.left_commute dest!: isCapDs) + apply (simp add: field_simps) + apply (wp getCTE_cte_wp_at)+ + apply simp + apply wp + apply (rule spec_strengthen_postE) + apply (rule_tac Q="\fc s. rv = capZombiePtr cap + + of_nat (capZombieNumber cap) * 2^cteSizeBits - 2^cteSizeBits" + in spec_valid_conj_liftE1) + apply wp[1] + apply (rule fin, assumption+) + apply (clarsimp simp: stuff) + apply (simp add: locateSlot_conv) + apply ((wp | simp)+)[2] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp dest!: isCapDs) + apply (rule conjI) + apply (erule(1) ex_Zombie_to) + apply clarsimp + apply clarsimp + apply clarsimp + apply (clarsimp dest!: isCapDs) + apply (fastforce dest!: ex_Zombie_to2 simp: cte_level_bits_def objBits_defs) + done + +lemmas preemptionPoint_invR = + valid_validE_R [OF preemptionPoint_inv] + +lemmas preemptionPoint_invE = + valid_validE_E [OF preemptionPoint_inv] + +lemma finaliseSlot_invs': + assumes finaliseCap: + "\cap final sl. \no_cte_prop Pr and invs' and sch_act_simple + and cte_wp_at' (\cte. cteCap cte = cap) sl\ finaliseCap cap final False \\rv. no_cte_prop Pr\" + and stuff: "finalise_prop_stuff Pr" + shows + "st \ \\s. + no_cte_prop Pr s \ invs' s \ sch_act_simple s + \ (exposed \ ex_cte_cap_to' slot s) + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s)\ + finaliseSlot' slot exposed + \\rv s. + no_cte_prop Pr s \ invs' s \ sch_act_simple s + \ (exposed \ p = slot \ + cte_wp_at' (\cte. (P and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie p zb n + \ P cp \ (isZombie cp \ capZombiePtr cp \ p))) p s) + \ (fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) + \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, + \\rv s. no_cte_prop Pr s \ invs' s \ sch_act_simple s\" +proof (induct arbitrary: P p rule: finalise_spec_induct2) + case (1 sl exp s Q q) + let ?P = "\cte. (Q and isZombie) (cteCap cte) + \ (\zb n cp. cteCap cte = Zombie q zb n + \ Q cp \ (isZombie cp \ capZombiePtr cp \ q))" + note hyps = "1.hyps"[folded reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]] + have Q: "\x y n. {x :: machine_word} = (\x. y + (x << cteSizeBits)) ` {0 ..< n} \ n = 1" + apply (simp only: shiftl_t2n mult_ac) + apply (drule sym) + apply (case_tac "1 < n") + apply (frule_tac x = "y + 0 * 2^cteSizeBits" in eqset_imp_iff) + apply (frule_tac x = "y + 1 * 2^cteSizeBits" in eqset_imp_iff) + apply (subst(asm) imageI, simp) + apply (erule order_less_trans[rotated], simp) + apply (subst(asm) imageI, simp) + apply simp + apply (simp add: linorder_not_less objBits_defs) + apply (case_tac "n < 1") + apply simp + apply simp + done + have R: "\n. n \ 0 \ {0 .. n - 1} = {0 ..< n :: machine_word}" + apply safe + apply simp + apply (erule(1) word_leq_minus_one_le) + apply simp + apply (erule word_le_minus_one_leq) + done + have final_IRQHandler_no_copy: + "\irq sl sl' s. \ isFinal (IRQHandlerCap irq) sl (cteCaps_of s); sl \ sl' \ \ cteCaps_of s sl' \ Some (IRQHandlerCap irq)" + apply (clarsimp simp: isFinal_def sameObjectAs_def2 isCap_simps) + apply fastforce + done + from stuff have stuff': + "finalise_prop_stuff (no_cte_prop Pr)" + by (simp add: no_cte_prop_def finalise_prop_stuff_def) + note stuff'[unfolded finalise_prop_stuff_def, simp] + show ?case + apply (subst finaliseSlot'.simps) + apply (fold reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]) + apply (unfold split_def) + apply (rule hoare_pre_spec_validE) + apply (wp | simp)+ + apply (wp make_zombie_invs' updateCap_cte_wp_at_cases + hoare_vcg_disj_lift)[1] + apply (wp hyps) + apply ((wp preemptionPoint_invE preemptionPoint_invR + | clarsimp simp: sch_act_simple_def + | simp cong: kernel_state.fold_congs machine_state.fold_congs)+)[1] + apply (rule spec_strengthen_postE [OF reduceZombie_invs''[OF _ stuff]]) + prefer 2 + apply fastforce + apply (rule hoare_pre_spec_validE, + rule spec_strengthen_postE) + apply (unfold finaliseSlot_def)[1] + apply (rule hyps[where P="\" and p=sl], (assumption | rule refl)+) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp, simp) + apply (wp make_zombie_invs' updateCap_ctes_of_wp updateCap_cap_to' + hoare_vcg_disj_lift updateCap_cte_wp_at_cases)+ + apply simp + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. invs' s \ sch_act_simple s \ s \' (fst fin) + \ (exp \ ex_cte_cap_to' sl s) + \ no_cte_prop Pr s + \ cte_wp_at' (\cte. cteCap cte = cteCap rv) sl s + \ (q = sl \ exp \ cte_wp_at' (?P) q s)" + in hoare_vcg_conj_lift) + apply (wp hoare_vcg_disj_lift finaliseCap finaliseCap_invs[where sl=sl]) + apply (rule finaliseCap_zombie_cap') + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_cte_refs) + apply (rule finaliseCap_replaceable[where slot=sl]) + apply clarsimp + apply (erule disjE[where P="F \ G" for F G]) + apply (clarsimp simp: capRemovable_def cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac b; case_tac "cteCap rv"; simp add: post_cap_delete_pre'_def) + apply (clarsimp simp: final_IRQHandler_no_copy) + apply (drule (1) ctes_of_valid'[OF _ invs_valid_objs']) + apply (clarsimp simp: valid_cap'_def) + apply (clarsimp dest!: isCapDs) + apply (rule conjI) + apply (clarsimp simp: capRemovable_def) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac "cteCap rv", + simp_all add: isCap_simps removeable'_def + fun_eq_iff[where f="cte_refs' cap" for cap] + fun_eq_iff[where f=tcb_cte_cases] + tcb_cte_cases_def + word_neq_0_conv[symmetric])[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp) + apply (case_tac "cteCap rv", + simp_all add: isCap_simps removeable'_def + fun_eq_iff[where f="cte_refs' cap" for cap] + fun_eq_iff[where f=tcb_cte_cases] + tcb_cte_cases_def cteSizeBits_def)[1] + apply (frule Q[unfolded cteSizeBits_def, simplified]) + apply clarsimp + apply (simp add: mask_def) + apply (subst(asm) R) + apply (drule valid_capAligned [OF ctes_of_valid']) + apply fastforce + apply (simp add: capAligned_def word_bits_def) + apply (frule Q[unfolded cteSizeBits_def, simplified]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of capRemovable_def) + apply (subgoal_tac "final_matters' (cteCap rv) \ \ isUntypedCap (cteCap rv)") + apply clarsimp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (case_tac "cteCap rv", + simp_all add: isCap_simps final_matters'_def)[1] + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: removeable'_def) + apply (clarsimp simp: conj_comms) + apply (rule conjI, erule ctes_of_valid', clarsimp) + apply (rule conjI, clarsimp) + apply fastforce + done +qed + +lemma finaliseSlot_invs'': + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s)\ + finaliseSlot slot exposed + \\rv s. invs' s \ sch_act_simple s \ (fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) + \ (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\, + \\rv s. invs' s \ sch_act_simple s\" + unfolding finaliseSlot_def + apply (rule hoare_pre, rule hoare_post_impErr, rule use_spec) + apply (rule finaliseSlot_invs'[where P="\" and Pr="\" and p=slot]) + apply (simp_all add: no_cte_prop_top) + apply wp + apply (simp add: finalise_prop_stuff_def) + apply clarsimp + done + +lemma finaliseSlot_invs: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. invs'\" + apply (rule validE_valid, rule hoare_post_impErr) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_sch_act_simple: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ finaliseSlot slot e \\rv. sch_act_simple\" + apply (rule validE_valid, rule hoare_post_impErr) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_removeable: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ + finaliseSlot slot e + \\rv s. fst rv \ cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s\,-" + apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_irqs: + "\\s. invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s)\ + finaliseSlot slot e + \\rv s. (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))\,-" + apply (rule validE_validE_R, rule hoare_post_impErr) + apply (rule finaliseSlot_invs'') + apply simp+ + done + +lemma finaliseSlot_cte_wp_at: + "\ \cap. P cap \ isZombie cap; p \ slot \ \ + \\s. invs' s \ sch_act_simple s \ ex_cte_cap_to' slot s + \ cte_wp_at' (\cte. P (cteCap cte)) p s\ + finaliseSlot slot False + \\rv s. cte_wp_at' (\cte. P (cteCap cte) \ + (\zb n cp. cteCap cte = Zombie p zb n \ + P cp \ capZombiePtr cp \ p)) p s\,-" + unfolding finaliseSlot_def + apply (rule hoare_pre, unfold validE_R_def) + apply (rule hoare_post_impErr, rule use_spec) + apply (rule finaliseSlot_invs'[where P=P and Pr=\ and p=p]) + apply (simp_all add: no_cte_prop_top finalise_prop_stuff_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas reduceZombie_invs' + = reduceZombie_invs''[where Q=\, simplified no_cte_prop_top simp_thms + finalise_prop_stuff_def irq_state_independent_H_def, + OF drop_spec_validE TrueI, + OF hoare_weaken_preE, + OF finaliseSlot_invs'', + THEN use_specE'] + +lemma reduceZombie_invs: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv s. invs' s\" + apply (rule validE_valid) + apply (rule hoare_post_impErr, rule hoare_pre, rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma reduceZombie_cap_to: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv s. \ exposed \ ex_cte_cap_to' slot s\, -" + apply (rule validE_validE_R, rule hoare_pre, + rule hoare_post_impErr) + apply (rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma reduceZombie_sch_act_simple: + "\\s. invs' s \ sch_act_simple s \ (\ exposed \ ex_cte_cap_to' slot s) + \ cte_wp_at' (\cte. cteCap cte = cap) slot s\ + reduceZombie cap slot exposed + \\rv. sch_act_simple\" + apply (rule validE_valid, rule hoare_pre, + rule hoare_post_impErr) + apply (rule reduceZombie_invs'[where p=slot]) + apply clarsimp+ + done + +lemma cteDelete_invs': + "\invs' and sch_act_simple and K ex\ cteDelete ptr ex \\rv. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: cteDelete_def whenE_def split_def) + apply (rule hoare_pre, wp finaliseSlot_invs) + apply (rule hoare_post_imp_R) + apply (unfold validE_R_def) + apply (rule use_spec) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_removeable) + apply (rule spec_valid_conj_liftE1) + apply (rule finaliseSlot_irqs) + apply (rule finaliseSlot_abort_cases'[folded finaliseSlot_def]) + apply simp + apply simp + done + +declare cases_simp_conj[simp] + +crunch typ_at'[wp]: capSwapForDelete "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemma cteDelete_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ cteDelete slot exposed \\_ s. P (typ_at' T p s)\" + by (wp cteDelete_preservation | simp | fastforce)+ + +lemmas cteDelete_typ_at'_lifts [wp] = typ_at_lifts [OF cteDelete_typ_at'] + +lemma cteDelete_cte_at: + "\\\ cteDelete slot bool \\rv. cte_at' slot\" + apply (rule_tac Q="\s. cte_at' slot s \ \ cte_at' slot s" + in hoare_pre(1)) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_disj_lift) + apply (rule typ_at_lifts, rule cteDelete_typ_at') + apply (simp add: cteDelete_def finaliseSlot_def split_def) + apply (rule validE_valid, rule seqE) + apply (subst finaliseSlot'_simps_ext) + apply (rule seqE) + apply simp + apply (rule getCTE_sp) + apply (rule hoare_pre, rule hoare_FalseE) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_FalseE) + apply auto + done + +lemma cteDelete_cte_wp_at_invs: + "\ \cap. P cap \ isZombie cap \ \ + \\s. invs' s \ sch_act_simple s \ ex_cte_cap_to' slot s \ + cte_wp_at' (\cte. P (cteCap cte)) p s\ + cteDelete slot False + \\rv. cte_at' slot and invs' and sch_act_simple + and cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte = NullCap \ + (\zb n cp. cteCap cte = capability.Zombie p zb n \ P cp + \ (capZombiePtr cp \ p \ p = slot))) p\, -" + apply (rule hoare_pre) + apply (wp cteDelete_cte_at) + prefer 2 + apply (erule_tac Q="invs' s \ R" for s R in conjI[rotated]) + apply simp + apply (simp only: cteDelete_def withoutPreemption_def fun_app_def split_def) + apply (cases "p = slot") + apply (cases "\cp. P cp") + apply (simp add: whenE_def) + apply wp + apply (rule hoare_strengthen_post [OF emptySlot_deletes]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply wp + apply (simp add: imp_conjR conj_comms) + apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s \ + (fst rv \ + cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) \ + (fst rv \ + (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))) \ + (\ fst rv \ + cte_wp_at' (\cte. P (cteCap cte) \ + cteCap cte = NullCap \ + (\zb n. cteCap cte = Zombie slot zb n)) + slot s)" + and E="\rv. \" in hoare_post_impErr) + apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple + hoare_drop_imps(2)[OF finaliseSlot_irqs]) + apply (rule hoare_post_imp_R, rule finaliseSlot_abort_cases) + apply (clarsimp simp: cte_wp_at_ctes_of dest!: isCapDs) + apply simp + apply simp + apply simp + apply (simp add: cte_wp_at_ctes_of validE_R_def) + apply (simp add: whenE_def) + apply (wp emptySlot_cte_wp_cap_other) + apply (rule_tac Q'="\rv s. invs' s \ sch_act_simple s \ + (fst rv \ + cte_wp_at' (\cte. removeable' slot s (cteCap cte)) slot s) \ + (fst rv \ + (snd rv \ NullCap \ post_cap_delete_pre' (snd rv) slot (cteCaps_of s))) \ + cte_wp_at' (\cte. P (cteCap cte) \ + cteCap cte = NullCap \ + (\zb n. cteCap cte = Zombie p zb n) \ + (\cp. P cp \ capZombiePtr cp \ p)) + p s" + in hoare_post_imp_R) + apply (wp finaliseSlot_invs finaliseSlot_removeable finaliseSlot_sch_act_simple + hoare_drop_imps(2)[OF finaliseSlot_irqs]) + apply (rule hoare_post_imp_R [OF finaliseSlot_cte_wp_at[where p=p and P=P]]) + apply simp+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply simp + done + + +lemma cteDelete_sch_act_simple: + "\invs' and sch_act_simple and (\s. \ exposed \ ex_cte_cap_to' slot s)\ + cteDelete slot exposed \\rv. sch_act_simple\" + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (rule_tac hoare_post_impErr [where Q="\rv. sch_act_simple" + and E="\rv. sch_act_simple"]) + apply (rule valid_validE) + apply (wp finaliseSlot_sch_act_simple) + apply simp+ + done + +crunch st_tcb_at'[wp]: "Arch.finaliseCap", unbindMaybeNotification, prepareThreadDelete "st_tcb_at' P t" + (simp: crunch_simps pteAtIndex_def + wp: crunch_wps getObject_inv loadObject_default_inv) +end + + +lemma finaliseCap2_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ + finaliseCap cap final flag + \\rv. st_tcb_at' P t\" + apply (simp add: finaliseCap_def Let_def + getThreadCSpaceRoot deletingIRQHandler_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply ((wp cancelAllIPC_st_tcb_at cancelAllSignals_st_tcb_at + prepareThreadDelete_st_tcb_at' + suspend_st_tcb_at' cteDeleteOne_st_tcb_at getCTE_wp' + | simp add: isCap_simps getSlotCap_def getIRQSlot_def + locateSlot_conv getInterruptState_def + split del: if_split + | wpc))+ + done + +crunch st_tcb_at'[wp]: capSwapForDelete "st_tcb_at' P t" + +lemma cteDelete_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ + cteDelete slot ex + \\rv. st_tcb_at' P t\" + apply (rule cteDelete_preservation) + apply (rule finaliseCap2_st_tcb_at' [OF x]) + apply assumption + apply wp+ + apply auto + done + +definition + capToRPO :: "capability \ machine_word option \ nat" +where + "capToRPO cap \ case cap of + NullCap \ (None, 0) + | Zombie p zt n \ (Some p, 2) + | _ \ (None, 3)" + +lemma emptySlot_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + emptySlot sl opt + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp getCTE_cteCap_wp opt_return_pres_lift) + apply (clarsimp simp: o_def split: option.split) + apply (erule rpo_trans) + apply (rule rpo_delta[where S="{sl}"], simp_all) + apply (simp add: modify_map_def) + apply (simp add: Int_insert_left dom_def modify_map_def) + apply (clarsimp simp: capToRPO_def split: capability.split) + done + +lemma rvk_prog_modify_map: + "\ \x. Some x = m p \ + capToRPO (f x) = capToRPO x + \ rpo_measure p (Some (capToRPO (f x))) + < rpo_measure p (Some (capToRPO x)) \ + \ revoke_progress_ord (option_map capToRPO \ m) (option_map capToRPO \ (modify_map m p f))" + apply (cases "m p") + apply (simp add: modify_map_def fun_upd_idem) + apply (simp add: revoke_progress_ord_def) + apply simp + apply (erule disjE) + apply (simp add: modify_map_def fun_upd_idem) + apply (simp add: revoke_progress_ord_def) + apply (rule rpo_delta[where S="{p}"], + simp_all add: modify_map_def dom_def) + done + +lemma capSwap_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s) + \ cte_wp_at' (\cte. \n. (capToRPO (cteCap cte)) = (Some p1, Suc n)) p2 s + \ cte_wp_at' (\cte. fst (capToRPO (cteCap cte)) \ Some p1) p1 s\ + capSwapForDelete p1 p2 + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\" + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (cases "p1 = p2") + apply simp + apply (erule rpo_trans) + apply (rule rpo_delta[where S="{p1, p2}"], simp_all) + apply (simp add: Int_insert_left dom_def) + apply (case_tac "capToRPO (cteCap ctea)") + apply simp + apply arith + done + +lemmas setObject_ASID_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF setObject_ASID_ctes_of'] +lemmas cancelAllIPC_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF cancelAllIPC_ctes_of] +lemmas cancelAllSignals_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF cancelAllSignals_ctes_of] +lemmas setEndpoint_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ep_ctes_of] +lemmas setNotification_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ntfn_ctes_of] + +lemmas emptySlot_rvk_prog' = emptySlot_rvk_prog[unfolded o_def] +lemmas threadSet_ctesCaps_of = cteCaps_of_ctes_of_lift[OF threadSet_ctes_of] + +lemmas storePTE_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF storePTE_ctes] + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma vcpuSwitch_rvk_prog': + "vcpuSwitch v \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma vcpuFinalise_rvk_prog': + "vcpuFinalise v \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma dissociateVCPUTCB_rvk_prog': + "dissociateVCPUTCB v t \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma vcpuUpdate_rvk_prog': + "vcpuUpdate p f \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma loadVMID_rvk_prog': + "loadVMID p \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +lemma archThreadSet_rvk_prog': + "archThreadSet f p \\s. revoke_progress_ord m (\x. map_option capToRPO (cteCaps_of s x))\" + by (wpsimp simp: cteCaps_of_def) + +crunch rvk_prog': finaliseCap + "\s. revoke_progress_ord m (\x. option_map capToRPO (cteCaps_of s x))" + (wp: crunch_wps emptySlot_rvk_prog' threadSet_ctesCaps_of + getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def pteAtIndex_def setBoundNotification_def + ignore: setCTE threadSet) + +lemmas finalise_induct3 = finaliseSlot'.induct[where P= + "\sl exp s. P sl (finaliseSlot' sl exp) s" for P] + +lemma finaliseSlot_rvk_prog: + "s \ \\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + finaliseSlot' slot e + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\,\\\\" +proof (induct rule: finalise_induct3) + case (1 sl ex st) + show ?case + apply (subst finaliseSlot'.simps) + apply (unfold split_def) + apply (rule hoare_pre_spec_validE) + apply wp + apply ((wp | simp)+)[1] + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv + snd_conv haskell_fail_def + case_Zombie_assert_fold) + apply (wp capSwap_rvk_prog | simp only: withoutPreemption_def)+ + apply (wp preemptionPoint_inv)[1] + apply force + apply force + apply (wp capSwap_rvk_prog | simp only: withoutPreemption_def)+ + apply (wp getCTE_wp | simp)+ + apply (rule hoare_strengthen_post [OF emptySlot_rvk_prog[where m=m]]) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def o_def + dest!: isCapDs) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (clarsimp simp: capToRPO_def) + apply (rule spec_strengthen_postE, + rule "1.hyps", (assumption | rule refl)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (clarsimp simp: cteCaps_of_def capToRPO_def dest!: isCapDs) + apply ((wp | simp add: locateSlot_conv)+)[2] + apply (rule drop_spec_validE) + apply simp + apply (rule_tac Q="\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s) + \ cte_wp_at' (\cte. cteCap cte = fst rvb) sl s" + in hoare_post_imp) + apply (clarsimp simp: o_def cte_wp_at_ctes_of capToRPO_def + dest!: isCapDs) + apply (simp split: capability.split_asm) + apply (wp updateCap_cte_wp_at_cases | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fc s. cte_wp_at' (\cte. cteCap cte = cteCap rv) sl s + \ revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)" + in hoare_vcg_conj_lift) + apply (wp finaliseCap_rvk_prog'[folded o_def])[1] + apply (rule finaliseCap_cases) + apply (clarsimp simp: o_def cte_wp_at_ctes_of cteCaps_of_def) + apply (strengthen imp_consequent, simp) + apply (erule rpo_trans) + apply (rule rvk_prog_modify_map[unfolded o_def]) + apply (erule disjE, simp add: capRemovable_def) + apply (auto dest!: isCapDs simp: capToRPO_def split: if_split if_split_asm)[1] + apply (wp isFinalCapability_inv getCTE_wp | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of o_def) + done +qed + +lemma cteDelete_rvk_prog: + "\\s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\ + cteDelete slot e + \\rv s. revoke_progress_ord m (option_map capToRPO \ cteCaps_of s)\,-" + including no_pre + apply (simp add: cteDelete_def whenE_def split_def) + apply (wp emptySlot_rvk_prog) + apply (simp only: cases_simp) + apply (simp add: finaliseSlot_def) + apply (rule use_spec, rule finaliseSlot_rvk_prog) + done + +text \Proving correspondence between the delete functions.\ + +definition + "spec_corres s r P P' f f' \ corres r (P and ((=) s)) P' f f'" + +lemma use_spec_corres': + assumes x: "\s. Q s \ spec_corres s r P P' f f'" + shows "corres r (P and Q) P' f f'" + apply (clarsimp simp: corres_underlying_def) + apply (frule x) + apply (clarsimp simp: spec_corres_def corres_underlying_def) + apply (erule(1) my_BallE, simp)+ + done + +lemmas use_spec_corres = use_spec_corres'[where Q="\", simplified] + +lemma drop_spec_corres: + "corres r P P' f f' \ spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (erule corres_guard_imp) + apply simp + apply assumption + done + +lemma spec_corres_split: + assumes x: "spec_corres s r' P P' f f'" + assumes y: "\rv rv' s'. \ (rv, s') \ fst (f s); r' rv rv' \ \ + spec_corres s' r (R rv) (R' rv') (g rv) (g' rv')" + assumes z: "\Q\ f \R\" "\Q'\ f' \R'\" + shows "spec_corres s r (P and Q) (P' and Q') (f >>= g) (f' >>= g')" +proof - + have w: "\rv rv'. r' rv rv' \ corres r (R rv and (\s'. (rv, s') \ fst (f s))) (R' rv') (g rv) (g' rv')" + apply (rule use_spec_corres') + apply (erule(1) y) + done + show ?thesis + unfolding spec_corres_def + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule x[unfolded spec_corres_def]) + apply (erule w) + apply (wp z) + apply (rule univ_wp) + apply (rule z) + apply simp + apply assumption + done +qed + +lemma spec_corres_splitE: + assumes x: "spec_corres s (e \ r') P P' f f'" + assumes y: "\rv rv' s'. \ (Inr rv, s') \ fst (f s); r' rv rv' \ \ + spec_corres s' (e \ r) (R rv) (R' rv') (g rv) (g' rv')" + assumes z: "\Q\ f \R\,-" "\Q'\ f' \R'\,-" + shows "spec_corres s (e \ r) (P and Q) (P' and Q') (f >>=E g) (f' >>=E g')" +proof - + note w = z[unfolded validE_R_def validE_def] + show ?thesis + unfolding bindE_def + apply (rule spec_corres_split [OF x _ w(1) w(2)]) + apply (case_tac rv) + apply (clarsimp simp: lift_def spec_corres_def) + apply (clarsimp simp: lift_def) + apply (erule(1) y) + done +qed + +lemmas spec_corres_split' = spec_corres_split [OF drop_spec_corres] +lemmas spec_corres_splitE' = spec_corres_splitE [OF drop_spec_corres] + +lemma spec_corres_guard_imp: + assumes x: "spec_corres s r Q Q' f f'" + assumes y: "P s \ Q s" "\s'. P' s' \ Q' s'" + shows "spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (rule corres_guard_imp) + apply (rule x[unfolded spec_corres_def]) + apply (clarsimp elim!: y) + apply (erule y) + done + +lemma spec_corres_returns[simp]: + "spec_corres s r P P' (return x) (return y) = (\s'. (P s \ P' s' \ (s, s') \ state_relation) \ r x y)" + "spec_corres s r' P P' (returnOk x) (returnOk y) = (\s'. (P s \ P' s' \ (s, s') \ state_relation) \ r' (Inr x) (Inr y))" + by (simp add: spec_corres_def returnOk_def)+ + +lemma cte_map_replicate: + "cte_map (ptr, replicate bits False) = ptr" + by (simp add: cte_map_def) + +lemma spec_corres_locate_Zombie: + "\ P s \ valid_cap (cap.Zombie ptr bits (Suc n)) s; + spec_corres s r P P' f (f' (cte_map (ptr, nat_to_cref (zombie_cte_bits bits) n))) \ + \ spec_corres s r P P' f (locateSlotCap (Zombie ptr (zbits_map bits) (Suc n)) (of_nat n) >>= f')" + unfolding spec_corres_def + apply (simp add: locateSlot_conv cte_level_bits_def stateAssert_def bind_assoc) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_assume_pre, clarsimp) + apply (frule cte_at_nat_to_cref_zbits, rule lessI) + apply (subst(asm) cte_map_nat_to_cref) + apply (drule valid_Zombie_n_less_cte_bits) + apply simp + apply (clarsimp simp: valid_cap_def cap_aligned_def word_bits_def + split: option.split_asm) + apply (simp add: mult.commute cte_level_bits_def) + apply (clarsimp simp: isCap_simps valid_cap_def) + apply (simp only: assert_def, subst if_P) + apply (cases bits, simp_all add: zbits_map_def) + apply (clarsimp simp: cap_table_at_gsCNodes isCap_simps + zbits_map_def) + apply (rule word_of_nat_less) + apply (simp add: cap_aligned_def) + apply (erule corres_guard_imp, simp_all) + apply wp+ + done + +lemma spec_corres_req: + "\ \s'. \ P s; P' s'; (s, s') \ state_relation \ \ F; + F \ spec_corres s r P P' f f' \ + \ spec_corres s r P P' f f'" + unfolding spec_corres_def + apply (rule corres_assume_pre, erule meta_mp) + apply simp + done + +lemma zombie_alignment_oddity: + "\ cte_wp_at (\c. c = cap.Zombie (cte_map slot) zb n) slot s; + invs s \ \ (cte_map slot, replicate (zombie_cte_bits zb) False) = slot" + apply (frule cte_wp_at_valid_objs_valid_cap, clarsimp+) + apply (rule cte_map_inj_eq) + apply (simp only: cte_map_replicate) + apply (erule cte_at_replicate_zbits) + apply (erule cte_wp_at_weakenE, simp) + apply clarsimp+ + done + +primrec + rec_del_concrete :: "rec_del_call \ (bool \ capability) kernel_p set" +where + "rec_del_concrete (CTEDeleteCall ptr ex) + = {liftME (\x. (True, NullCap)) (cteDelete (cte_map ptr) ex)}" +| "rec_del_concrete (FinaliseSlotCall ptr ex) + = {finaliseSlot (cte_map ptr) ex}" +| "rec_del_concrete (ReduceZombieCall cap slot ex) + = (if red_zombie_will_fail cap then {} else + (\cap. liftME (\x. (True, NullCap)) (reduceZombie cap (cte_map slot) ex)) ` {cap'. cap_relation cap cap'})" + +lemma rec_del_concrete_empty: + "red_zombie_will_fail cap \ rec_del_concrete (ReduceZombieCall cap slot ex) = {}" + by simp + +lemmas rec_del_concrete_unfold + = rec_del_concrete.simps red_zombie_will_fail.simps + if_True if_False ball_simps simp_thms + +lemma cap_relation_removables: + "\ cap_relation cap cap'; isNullCap cap' \ isZombie cap'; + s \ cap; cte_at slot s; invs s \ + \ cap_removeable cap slot = capRemovable cap' (cte_map slot) + \ cap_cyclic_zombie cap slot = capCyclicZombie cap' (cte_map slot)" + apply (clarsimp simp: capRemovable_def isCap_simps + capCyclicZombie_def cap_cyclic_zombie_def + split: cap_relation_split_asm arch_cap.split_asm) + apply (rule iffD1 [OF conj_commute], rule context_conjI) + apply (rule iffI) + apply (clarsimp simp: cte_map_replicate) + apply clarsimp + apply (frule(1) cte_map_inj_eq [rotated, OF _ cte_at_replicate_zbits]) + apply clarsimp+ + apply (simp add: cte_map_replicate) + apply simp + apply simp + done + +lemma spec_corres_add_asm: + "spec_corres s r P Q f g \ spec_corres s r (P and F) Q f g" + unfolding spec_corres_def + apply (erule corres_guard_imp) + apply simp+ + done + +lemma spec_corres_gen_asm2: + "(F \ spec_corres s r Q P' f g) \ spec_corres s r Q (P' and (\s. F)) f g" + unfolding spec_corres_def + by (auto intro: corres_gen_asm2) + +crunch typ_at'[wp]: reduceZombie "\s. P (typ_at' T p s)" + (simp: crunch_simps wp: crunch_wps) + +lemmas reduceZombie_typ_ats[wp] = typ_at_lifts [OF reduceZombie_typ_at'] + +lemma spec_corres_if: + "\ G = G'; G \ spec_corres s r P P' a c; \ G \ spec_corres s r Q Q' b d\ + \ spec_corres s r (\x. (G \ P x) \ (\ G \ Q x)) (\x. (G' \ P' x) \ (\ G' \ Q' x)) + (if G then a else b) (if G' then c else d)" + by simp + +lemma spec_corres_liftME2: + "spec_corres s (f \ r) P P' m (liftME fn m') + = spec_corres s (f \ (\x. r x \ fn)) P P' m m'" + by (simp add: spec_corres_def) + + +lemma rec_del_ReduceZombie_emptyable: + "\invs + and (cte_wp_at ((=) cap) slot and is_final_cap' cap + and (\y. is_zombie cap)) and + (\s. \ ex \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) slot s) and + emptyable slot and + (\s. \ cap_removeable cap slot \ (\t\obj_refs cap. halted_if_tcb t s))\ + rec_del (ReduceZombieCall cap slot ex) \\rv. emptyable slot\, -" + by (rule rec_del_emptyable [where args="ReduceZombieCall cap slot ex", simplified]) + +crunch sch_act_simple[wp]: cteDelete sch_act_simple + +lemmas preemption_point_valid_list = preemption_point_inv'[where P="valid_list", simplified] + +lemma finaliseSlot_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ finaliseSlot ptr exposed \\_ s. P (typ_at' T p s)\" + by (rule finaliseSlot_preservation, (wp | simp)+) + +lemmas finaliseSlot_typ_ats[wp] = typ_at_lifts[OF finaliseSlot_typ_at'] + +lemmas rec_del_valid_list_irq_state_independent[wp] = + rec_del_preservation[OF cap_swap_for_delete_valid_list set_cap_valid_list empty_slot_valid_list finalise_cap_valid_list preemption_point_valid_list] + +lemma rec_del_corres: + "\C \ rec_del_concrete args. + spec_corres s (dc \ (case args of + FinaliseSlotCall _ _ \ (\r r'. fst r = fst r' + \ cap_relation (snd r) (snd r') ) + | _ \ dc)) + (einvs and simple_sched_action + and valid_rec_del_call args + and cte_at (slot_rdcall args) + and emptyable (slot_rdcall args) + and (\s. \ exposed_rdcall args \ ex_cte_cap_wp_to (\cp. cap_irqs cp = {}) (slot_rdcall args) s) + and (\s. case args of ReduceZombieCall cap sl ex \ + \t\obj_refs cap. halted_if_tcb t s + | _ \ True)) + (invs' and sch_act_simple and cte_at' (cte_map (slot_rdcall args)) and + (\s. \ exposed_rdcall args \ ex_cte_cap_to' (cte_map (slot_rdcall args)) s) + and (\s. case args of ReduceZombieCall cap sl ex \ + \cp'. cap_relation cap cp' + \ ((cte_wp_at' (\cte. cteCap cte = cp') (cte_map sl)) + and (\s. \ capRemovable cp' (cte_map sl) + \ (\ ex \ \ capCyclicZombie cp' (cte_map sl)))) s + | _ \ True)) + (rec_del args) C" +proof (induct rule: rec_del.induct, + simp_all only: rec_del_fails rec_del_concrete_empty + red_zombie_will_fail.simps ball_simps(5)) + case (1 slot exposed) + show ?case + apply (clarsimp simp: cteDelete_def liftME_def bindE_assoc + split_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE) + apply (rule "1.hyps"[simplified rec_del_concrete_unfold dc_def]) + apply (rule drop_spec_corres) + apply (simp(no_asm) add: dc_def[symmetric] liftME_def[symmetric] + whenE_liftE) + apply (rule corres_when, simp) + apply simp + apply (rule emptySlot_corres) + apply (wp rec_del_invs rec_del_valid_list rec_del_cte_at finaliseSlot_invs hoare_drop_imps + preemption_point_inv' + | simp)+ + done +next + case (2 slot exposed) + have prove_imp: + "\P Q. \ P \ Q \ \ (P \ Q) = True" + by simp + show ?case + apply (simp only: rec_del_concrete_unfold finaliseSlot_def) + apply (subst rec_del_simps_ext) + apply (subst finaliseSlot'_simps_ext) + apply (fold reduceZombie_def[unfolded cteDelete_def finaliseSlot_def]) + apply (unfold fun_app_def unlessE_whenE K_bind_def split_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE') + apply simp + apply (rule get_cap_corres) + apply (rule spec_corres_if) + apply auto[1] + apply (rule drop_spec_corres, rule corres_trivial, + simp add: returnOk_def) + apply (rule spec_corres_splitE') + apply simp + apply (rule isFinalCapability_corres[where ptr=slot]) + apply (rule spec_corres_splitE') + apply simp + apply (rule finaliseCap_corres[where sl=slot]) + apply simp + apply simp + apply simp + + apply (rule_tac F="isZombie (fst rv'b) \ isNullCap (fst rv'b)" + in spec_corres_gen_asm2) + apply (rule spec_corres_req[rotated]) + apply (rule_tac F="\s. invs s \ cte_at slot s \ s \ fst rvb" + in spec_corres_add_asm) + apply (rule spec_corres_if) + apply (erule conjunct1) + apply (rule drop_spec_corres, rule corres_trivial, + simp add: returnOk_def) + apply (rule spec_corres_if) + apply (erule conjunct2) + apply (rule drop_spec_corres, + simp add: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule updateCap_corres) + apply simp + apply (simp(no_asm_use) add: cap_cyclic_zombie_def split: cap.split_asm) + apply (simp add: is_cap_simps) + apply (rule spec_corres_splitE') + apply simp + apply (rule updateCap_corres, erule conjunct1) + apply (case_tac "fst rvb", auto simp: isCap_simps is_cap_simps)[1] + apply (rule spec_corres_splitE) + apply (rule iffD1 [OF spec_corres_liftME2[where fn="\v. (True, NullCap)"]]) + apply (rule bspec [OF "2.hyps"(1), unfolded fun_app_def], assumption+) + apply (case_tac "fst rvb", simp_all add: isCap_simps is_cap_simps)[1] + apply (rename_tac nat) + apply (case_tac nat, simp_all)[1] + apply clarsimp + apply (rule spec_corres_splitE'[OF preemptionPoint_corres]) + apply (rule "2.hyps"(2)[unfolded fun_app_def rec_del_concrete_unfold + finaliseSlot_def], + assumption+) + apply (wp preemption_point_inv')[1] + apply clarsimp+ + apply (wp preemptionPoint_invR) + apply simp + apply clarsimp + apply simp + apply (wp rec_del_invs rec_del_cte_at reduce_zombie_cap_somewhere + rec_del_ReduceZombie_emptyable + reduceZombie_invs reduce_zombie_cap_to | simp)+ + apply (wp reduceZombie_cap_to reduceZombie_sch_act_simple) + apply simp + apply (wp replace_cap_invs final_cap_same_objrefs + set_cap_cte_wp_at set_cap_cte_cap_wp_to + hoare_vcg_const_Ball_lift hoare_weak_lift_imp + | simp add: conj_comms + | erule finalise_cap_not_reply_master [simplified])+ + apply (elim conjE, strengthen exI[mk_strg I], + strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) + apply (wp make_zombie_invs' updateCap_cap_to' + updateCap_cte_wp_at_cases + hoare_vcg_ex_lift hoare_weak_lift_imp) + apply clarsimp + apply (drule_tac cap=a in cap_relation_removables, + clarsimp, assumption+) + apply (clarsimp simp: conj_comms) + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. einvs s \ simple_sched_action s + \ replaceable s slot (fst fin) rv + \ cte_wp_at ((=) rv) slot s \ s \ fst fin + \ emptyable slot s + \ (\t\obj_refs (fst fin). halted_if_tcb t s)" + in hoare_vcg_conj_lift) + apply (wp finalise_cap_invs finalise_cap_replaceable + finalise_cap_makes_halted + hoare_vcg_disj_lift hoare_vcg_ex_lift)[1] + apply (rule finalise_cap_cases[where slot=slot]) + apply clarsimp + apply (frule if_unsafe_then_capD, clarsimp, clarsimp) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE[where P="c = cap.NullCap \ P" for c P]) + apply clarsimp + apply (clarsimp simp: conj_comms dest!: is_cap_simps [THEN iffD1]) + apply (frule trans [OF _ appropriate_Zombie, OF sym]) + apply (case_tac rv, simp_all add: fst_cte_ptrs_def is_cap_simps + is_final_cap'_def)[1] + apply (wp | simp)+ + apply (rule hoare_strengthen_post) + apply (rule_tac Q="\fin s. invs' s \ sch_act_simple s \ s \' fst fin + \ (exposed \ ex_cte_cap_to' (cte_map slot) s) + \ cte_wp_at' (\cte. cteCap cte = cteCap rv') (cte_map slot) s" + in hoare_vcg_conj_lift) + apply (wp hoare_vcg_disj_lift finaliseCap_invs[where sl="cte_map slot"])[1] + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_replaceable[where slot="cte_map slot"]) + apply (rule finaliseCap_cte_refs) + apply clarsimp + apply (erule disjE[where P="F \ G" for F G]) + apply (clarsimp simp: capRemovable_def cte_wp_at_ctes_of) + apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) + apply (case_tac "cteCap rv'", + auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] + apply (wp isFinalCapability_inv hoare_weak_lift_imp + | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ + apply (rule isFinal[where x="cte_map slot"]) + apply (wp get_cap_wp| simp add: conj_comms)+ + apply (wp getCTE_wp') + apply clarsimp + apply (frule cte_wp_at_valid_objs_valid_cap[where P="(=) cap" for cap]) + apply fastforce + apply (fastforce simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp) + apply ((clarsimp | rule conjI)+)[1] + done + +next + case (3 ptr bits n slot) + show ?case + apply simp + apply (rule drop_spec_corres) + apply (simp add: reduceZombie_def case_Zombie_assert_fold) + apply (rule stronger_corres_guard_imp[rotated]) + apply assumption + apply (rule conjI) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule cte_at_replicate_zbits) + apply (drule cte_at_get_cap_wp, clarsimp) + apply (rule cte_wp_at_weakenE') + apply (erule(1) pspace_relation_cte_wp_at[OF state_relation_pspace_relation]) + apply clarsimp+ + apply (rule TrueI) + apply assumption + apply (rule_tac F="(ptr, replicate (zombie_cte_bits bits) False) \ slot" in corres_req) + apply (clarsimp simp: capCyclicZombie_def cte_map_replicate) + apply (rule_tac F="ptr \ cte_map slot" in corres_req) + apply (elim conjE exE) + apply (frule cte_wp_valid_cap, clarsimp) + apply (drule cte_map_inj) + apply (erule cte_at_replicate_zbits) + apply (erule cte_wp_at_weakenE, simp) + apply clarsimp+ + apply (simp add: cte_map_replicate) + apply (simp add: liftM_def liftME_def[symmetric]) + apply (simp add: liftE_bindE) + apply (rule corres_symb_exec_r [OF _ getCTE_sp]) + apply (rule_tac F="isZombie (cteCap x) \ capZombiePtr (cteCap x) \ ptr" + in corres_req) + apply (clarsimp simp: state_relation_def dest!: isCapDs) + apply (drule pspace_relation_cte_wp_atI') + apply (subst(asm) eq_commute, assumption) + apply clarsimp + apply clarsimp + apply (case_tac c, simp_all)[1] + apply (clarsimp simp: cte_wp_at_def) + apply (drule(1) zombies_finalD2, clarsimp+) + apply (fold dc_def) + apply (rule corres_guard_imp, rule capSwapForDelete_corres) + apply (simp add: cte_map_replicate) + apply simp + apply clarsimp + apply (rule conjI, clarsimp)+ + apply (rule conjI, rule cte_at_replicate_zbits, erule cte_wp_valid_cap) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule tcb_valid_nonspecial_cap, fastforce) + apply (clarsimp simp: ran_tcb_cap_cases is_cap_simps is_nondevice_page_cap_simps + split: Structures_A.thread_state.split) + apply (simp add: ran_tcb_cap_cases is_cap_simps is_nondevice_page_cap_simps) + apply fastforce + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_map_replicate) + done +next + note if_cong [cong] option.case_cong [cong] + case (4 ptr bits n slot) + let ?target = "(ptr, nat_to_cref (zombie_cte_bits bits) n)" + note hyps = "4.hyps"[simplified rec_del_concrete_unfold spec_corres_liftME2] + show ?case + apply (simp only: rec_del_concrete_unfold cap_relation.simps) + apply (simp add: reduceZombie_def Let_def + liftE_bindE + del: inf_apply) + apply (subst rec_del_simps_ext) + apply (rule_tac F="ptr + 2 ^ cte_level_bits * of_nat n + = cte_map ?target" + in spec_corres_req) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (subst cte_map_nat_to_cref) + apply (drule valid_Zombie_n_less_cte_bits, simp) + apply (clarsimp simp: valid_cap_def cap_aligned_def word_bits_def + split: option.split_asm) + apply (simp add: cte_level_bits_def) + apply (simp add: spec_corres_liftME2 pred_conj_assoc) + apply (rule spec_corres_locate_Zombie) + apply (auto dest: cte_wp_valid_cap)[1] + apply (rule_tac F="n < 2 ^ (word_bits - cte_level_bits)" in spec_corres_req) + apply clarsimp + apply (drule cte_wp_valid_cap, clarsimp) + apply (frule valid_Zombie_n_less_cte_bits) + apply (drule Suc_le_lessD) + apply (erule order_less_le_trans) + apply (rule power_increasing) + apply (clarsimp simp: valid_cap_def cap_aligned_def + split: option.split_asm) + apply (simp add: cte_level_bits_def word_bits_def) + apply simp + apply simp + apply (rule spec_corres_gen_asm2) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE) + apply (rule hyps) + apply (simp add: in_monad) + apply (rule drop_spec_corres) + apply (simp add: liftE_bindE del: rec_del.simps) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap ourCTE = Zombie ptr (zbits_map bits) (Suc n) + \ cteCap ourCTE = NullCap + \ (\zb n cp. cteCap ourCTE = Zombie (cte_map slot) zb n + \ cp = Zombie ptr (zbits_map bits) (Suc n) + \ capZombiePtr cp \ cte_map slot)" + in corres_gen_asm2) + apply (rule_tac P="invs and cte_wp_at (\c. c = new_cap) slot + and cte_wp_at (\c. c = cap.NullCap \ \ False \ is_zombie c + \ ?target \ fst_cte_ptrs c) ?target" + and P'="invs' and sch_act_simple + and cte_wp_at' (\c. c = ourCTE) (cte_map slot) + and cte_at' (cte_map ?target)" + in corres_inst) + apply (erule disjE) + apply (case_tac new_cap, simp_all split del: if_split)[1] + apply (simp add: liftME_def[symmetric]) + apply (rule stronger_corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F="cteCap endCTE = capability.NullCap" + in corres_gen_asm2, simp) + apply (rule updateCap_corres) + apply simp + apply (simp add: is_cap_simps) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) + apply (wp, (wp getCTE_wp)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp, simp) + apply clarsimp + apply (frule zombies_finalD, clarsimp) + apply (clarsimp simp: is_cap_simps) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule cte_wp_valid_cap[unfolded cte_wp_at_eq_simp], clarsimp) + apply (drule cte_wp_at_norm[where p="?target"], clarsimp) + apply (erule disjE) + apply (drule(1) pspace_relation_cte_wp_at + [OF state_relation_pspace_relation], + clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: is_cap_simps fst_cte_ptrs_def + cte_wp_at_ctes_of) + apply (frule cte_at_cref_len [rotated, OF cte_at_replicate_zbits]) + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply clarsimp + apply (drule(1) nat_to_cref_replicate_Zombie) + apply simp + apply (clarsimp simp: capRemovable_def cte_wp_at_def) + apply (drule(1) zombies_finalD2, clarsimp+) + apply (simp add: is_cap_simps) + apply (erule disjE) + apply (case_tac new_cap, simp_all split del: if_split)[1] + apply (simp add: assertE_def returnOk_def) + apply (elim exE conjE) + apply (case_tac new_cap, simp_all)[1] + apply (clarsimp simp add: is_zombie_def) + apply (simp add: assertE_def liftME_def[symmetric] + split del: if_split) + apply (rule corres_req[rotated], subst if_P, assumption) + apply (simp add: returnOk_def) + apply (clarsimp simp: zombie_alignment_oddity cte_map_replicate) + apply (wp get_cap_cte_wp_at getCTE_wp' rec_del_cte_at + rec_del_invs rec_del_delete_cases)+ + apply (rule hoare_post_imp_R) + apply (rule_tac P="\cp. cp = Zombie ptr (zbits_map bits) (Suc n)" + in cteDelete_cte_wp_at_invs[where p="cte_map slot"]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of | rule conjI)+ + apply (clarsimp simp: capRemovable_def shiftl_t2n[symmetric]) + apply (drule arg_cong[where f="\x. x >> cte_level_bits"], + subst(asm) shiftl_shiftr_id) + apply (clarsimp simp: cte_level_bits_def word_bits_def) + apply (rule order_less_le_trans) + apply (erule of_nat_mono_maybe [rotated]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply (simp add: word_bits_def) + apply simp + apply (erule(1) notE [rotated, OF _ of_nat_neq_0]) + apply (erule order_less_le_trans) + apply (rule power_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply clarsimp + apply (frule cte_wp_valid_cap, clarsimp) + apply (rule conjI, erule cte_at_nat_to_cref_zbits) + apply simp + apply (simp add: halted_emptyable) + apply (erule(1) zombie_is_cap_toE) + apply simp + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid', clarsimp+) + apply (frule valid_Zombie_cte_at'[where n=n]) + apply (clarsimp simp: valid_cap'_def) + apply (intro conjI) + apply (fastforce simp: cte_wp_at_ctes_of cte_level_bits_def objBits_defs + mult.commute mult.left_commute) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="cte_map slot" in exI) + apply (clarsimp simp: image_def) + apply (rule_tac x="of_nat n" in bexI) + apply (fastforce simp: cte_level_bits_def objBits_defs mult.commute mult.left_commute shiftl_t2n) + apply simp + apply (subst field_simps, rule plus_one_helper2) + apply simp + apply (frule of_nat_mono_maybe[rotated, where 'a=machine_word_len]) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def cte_level_bits_def) + apply simp + apply clarsimp + apply (drule_tac f="\x. x - 1" and y=0 in arg_cong) + apply (clarsimp simp: word_bits_def cte_level_bits_def) + done +qed + +lemma cteDelete_corres: + "corres (dc \ dc) + (einvs and simple_sched_action and cte_at ptr and emptyable ptr) + (invs' and sch_act_simple and cte_at' (cte_map ptr)) + (cap_delete ptr) (cteDelete (cte_map ptr) True)" + unfolding cap_delete_def + using rec_del_corres[where args="CTEDeleteCall ptr True"] + apply (simp add: spec_corres_liftME2 liftME_def[symmetric]) + apply (erule use_spec_corres) + done + + +text \The revoke functions, and their properties, are + slightly easier to deal with than the delete + function. However, their termination arguments + are complex, requiring that the delete functions + reduce the number of non-null capabilities.\ + +definition + cteRevoke_recset :: "((machine_word \ kernel_state) \ (machine_word \ kernel_state)) set" +where + "cteRevoke_recset \ measure (\(sl, s). (\mp. \x \ dom mp. rpo_measure x (mp x)) + (option_map capToRPO \ cteCaps_of s))" + +lemma wf_cteRevoke_recset: + "wf cteRevoke_recset" + by (simp add: cteRevoke_recset_def) + +termination cteRevoke + apply (rule cteRevoke.termination) + apply (rule wf_cteRevoke_recset) + apply (clarsimp simp add: cteRevoke_recset_def in_monad + dest!: in_getCTE in_preempt') + apply (frule use_validE_R [OF _ cteDelete_rvk_prog]) + apply (rule rpo_sym) + apply (frule use_validE_R [OF _ cteDelete_deletes]) + apply simp + apply (simp add: revoke_progress_ord_def) + apply (erule disjE) + apply (drule_tac f="\f. f (mdbNext (cteMDBNode rv))" in arg_cong) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def capToRPO_def) + apply (simp split: capability.split_asm) + apply (case_tac rvb, clarsimp) + apply assumption + done + +lemma cteRevoke_preservation': + assumes x: "\ptr. \P\ cteDelete ptr True \\rv. P\" + assumes y: "\f s. P (ksWorkUnitsCompleted_update f s) = P s" + assumes irq: "irq_state_independent_H P" + shows "s \ \P\ cteRevoke ptr \\rv. P\,\\rv. P\" +proof (induct rule: cteRevoke.induct) + case (1 p s') + show ?case + apply (subst cteRevoke.simps) + apply (wp "1.hyps") + apply (wp x y preemptionPoint_inv hoare_drop_imps irq | clarsimp)+ + done +qed + +lemmas cteRevoke_preservation = + validE_valid [OF use_spec(2) [OF cteRevoke_preservation']] + +lemma cteRevoke_typ_at': + "\\s. P (typ_at' T p s)\ cteRevoke ptr \\rv s. P (typ_at' T p s)\" + by (wp cteRevoke_preservation | clarsimp)+ + +lemma cteRevoke_invs': + "\invs' and sch_act_simple\ cteRevoke ptr \\rv. invs'\" + apply (rule_tac Q="\rv. invs' and sch_act_simple" in hoare_strengthen_post) + apply (wp cteRevoke_preservation cteDelete_invs' cteDelete_sch_act_simple)+ + apply simp_all + done + +declare cteRevoke.simps[simp del] + +lemma spec_corres_symb_exec_l_Ex: + assumes x: "\rv. (rv, s) \ fst (f s) \ spec_corres s r (Q rv) P' (g rv) h" + shows "spec_corres s r (\s. \rv. Q rv s \ (rv, s) \ fst (f s)) P' + (do rv \ f; g rv od) h" +proof - + have y: "\rv. corres r (\s'. s' = s \ Q rv s \ (rv, s) \ fst (f s)) P' (g rv) h" + apply (rule corres_req) + defer + apply (rule corres_guard_imp, + erule x[unfolded spec_corres_def]) + apply simp+ + done + show ?thesis + unfolding spec_corres_def + apply (rule corres_guard_imp, + rule corres_symb_exec_l_Ex, + rule y) + apply simp+ + done +qed + +lemma spec_corres_symb_exec_l_Ex2: + assumes y: "P s \ \rv. (rv, s) \ fst (f s)" + assumes x: "\rv. (rv, s) \ fst (f s) \ + spec_corres s r (\s. \s'. (rv, s) \ fst (f s') \ P s') P' (g rv) h" + shows "spec_corres s r P P' (do rv \ f; g rv od) h" + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_symb_exec_l_Ex) + apply (erule x) + apply (frule y) + apply fastforce + apply assumption + done + +lemma spec_corres_symb_exec_r_All: + assumes nf: "\rv. no_fail (Q' rv) g" + assumes x: "\rv. spec_corres s r P (Q' rv) f (h rv)" + shows "spec_corres s r P (\s. (\p \ fst (g s). snd p = s \ Q' (fst p) s) \ (\rv. Q' rv s)) + f (do rv \ g; h rv od)" + unfolding spec_corres_def + apply (rule corres_guard_imp, + rule corres_symb_exec_r_All, + rule nf, + rule x[unfolded spec_corres_def]) + apply simp+ + done + +lemma spec_corres_symb_exec_r_Ex: + assumes y: "\s. P' s \ \p \ fst (g s). snd p = s" + assumes z: "\s. P' s \ \p \ fst (g s). snd p = s" + assumes nf: "no_fail P' g" + assumes x: "\rv. spec_corres s r P (\s. \s'. (rv, s) \ fst (g s') \ P' s') f (h rv)" + shows "spec_corres s r P P' f (do rv \ g; h rv od)" + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_symb_exec_r_All) + prefer 2 + apply (rule x) + apply (insert nf)[1] + apply (clarsimp simp: no_fail_def) + apply (frule y) + apply (drule(1) bspec) + apply fastforce + apply assumption + apply (frule y) + apply (rule conjI) + apply clarsimp + apply (drule(1) bspec) + apply fastforce + apply (frule z) + apply fastforce + done + +lemma in_getCTE_cte_wp_at': + "(rv, s') \ fst (getCTE p s) = (s = s' \ cte_wp_at' ((=) rv) p s)" + apply (rule iffI) + apply (clarsimp dest!: in_getCTE simp: cte_wp_at'_def) + apply (clarsimp simp: cte_wp_at'_def getCTE_def) + done + +lemma state_relation_cap_relation: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; + valid_objs s; pspace_distinct' s'; pspace_aligned' s' \ + \ cap_relation cap (cteCap cte)" + apply (cases p, clarsimp simp: state_relation_def) + apply (drule(3) pspace_relation_cte_wp_at) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma descendants_of_empty_state_relation: + "\ (s, s') \ state_relation; cte_at p s \ \ + (descendants_of p (cdt s) = {}) = (descendants_of' (cte_map p) (ctes_of s') = {})" + apply (clarsimp simp only: state_relation_def cdt_relation_def swp_def) + apply (drule spec, drule(1) mp) + apply (fastforce) + done + +lemma subtree_first_step: + "\ ctes_of s p = Some cte; ctes_of s \ p \ p' \ + \ mdbNext (cteMDBNode cte) \ nullPointer \ + (\cte'. ctes_of s (mdbNext (cteMDBNode cte)) = Some cte' + \ isMDBParentOf cte cte')" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold nullPointer_def parentOf_def) + apply clarsimp + done + +lemma cap_revoke_mdb_stuff1: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; invs s; invs' s'; + cap \ cap.NullCap; cteCap cte \ NullCap \ + \ (descendants_of p (cdt s) = {}) + = (\ (mdbNext (cteMDBNode cte) \ nullPointer + \ cte_wp_at' (isMDBParentOf cte) (mdbNext (cteMDBNode cte)) s'))" + apply (subst descendants_of_empty_state_relation) + apply assumption + apply (clarsimp elim!: cte_wp_at_weakenE) + apply (simp add: descendants_of'_def) + apply safe + apply (drule spec[where x="mdbNext (cteMDBNode cte)"]) + apply (erule notE, rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold cte_wp_at_ctes_of) + apply (simp add: nullPointer_def) + apply (clarsimp simp: parentOf_def cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) subtree_first_step) + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) subtree_first_step) + apply clarsimp + done + +lemma select_bind_spec_corres': + "\P sa \ x \ S; spec_corres sa r P P' (f x) g\ +\ spec_corres sa r P P' (select S >>= f) g" + apply (clarsimp simp add: spec_corres_def + corres_underlying_def bind_def + select_def + | drule(1) bspec | erule rev_bexI | rule conjI)+ + done + +lemma cap_revoke_mdb_stuff4: + "\ (s, s') \ state_relation; cte_wp_at ((=) cap) p s; + cte_wp_at' ((=) cte) (cte_map p) s'; invs s; valid_list s; invs' s'; + cap \ cap.NullCap; cteCap cte \ NullCap; + descendants_of p (cdt s) \ {} \ + \ \p'. mdbNext (cteMDBNode cte) = cte_map p' + \ next_child p (cdt_list s) = Some p'" + apply(subgoal_tac "descendants_of p (cdt s) \ {}") + prefer 2 + apply simp + apply (subst(asm) cap_revoke_mdb_stuff1) + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of state_relation_def) + apply (drule(1) pspace_relation_cte_wp_atI[where x="mdbNext c" for c]) + apply clarsimp + apply clarsimp + apply (intro exI, rule conjI [OF refl]) + apply(simp add: cdt_list_relation_def) + apply(erule_tac x="fst p" in allE, erule_tac x="snd p" in allE) + apply(case_tac "cte", simp) + apply(case_tac "next_slot p (cdt_list s) (cdt s)") + apply(simp add: next_slot_def empty_list_empty_desc next_child_None_empty_desc) + apply(frule cte_at_next_slot') + apply(erule invs_mdb) + apply(simp add: invs_def valid_state_def finite_depth) + apply(assumption) + apply(simp add: next_slot_def empty_list_empty_desc) + apply(frule invs_valid_pspace, simp add: valid_pspace_def) + apply(rule cte_map_inj_eq) + apply(simp add: cte_wp_at_def)+ + done + +lemma cteRevoke_corres': + "spec_corres s (dc \ dc) + (einvs and simple_sched_action and cte_at ptr) + (invs' and sch_act_simple and cte_at' (cte_map ptr)) + (cap_revoke ptr) (\s. cteRevoke (cte_map ptr) s)" +proof (induct rule: cap_revoke.induct) + case (1 slot s') + show ?case + apply (subst cap_revoke.simps) + apply (subst cteRevoke.simps[abs_def]) + apply (simp add: liftE_bindE next_revoke_cap_def select_ext_def bind_assoc) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (clarsimp simp: cte_wp_at_def) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply (clarsimp simp: cte_at'_def getCTE_def) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (simp add: in_monad in_get_cap_cte_wp_at + in_getCTE_cte_wp_at') + apply (rule_tac F="cap_relation cap (cteCap cte)" + in spec_corres_req) + apply (clarsimp | erule(2) state_relation_cap_relation)+ + apply (case_tac "cap = cap.NullCap") + apply (simp add: whenE_def) + apply (case_tac "cteCap cte = NullCap") + apply (simp add: whenE_def) + apply (case_tac "descendants_of slot (cdt s') = {}") + apply (case_tac "mdbNext (cteMDBNode cte) = nullPointer") + apply (simp add: whenE_def) + apply (simp add: whenE_def[where P=True]) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply clarsimp + apply (subgoal_tac "cte_at' (mdbNext (cteMDBNode cte)) s") + apply (clarsimp simp: getCTE_def cte_at'_def) + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule_tac F="\ isMDBParentOf cte nextCTE" + in spec_corres_req) + apply (clarsimp simp: in_getCTE_cte_wp_at') + apply (subst(asm) cap_revoke_mdb_stuff1, assumption+) + apply (clarsimp simp: cte_wp_at'_def) + apply (simp add: whenE_def) + apply (rule_tac F="mdbNext (cteMDBNode cte) \ nullPointer" + in spec_corres_req) + apply clarsimp + apply (subst(asm) cap_revoke_mdb_stuff1, assumption+) + apply (clarsimp simp: cte_wp_at'_def) + apply (simp add: whenE_def[where P=True]) + apply (rule spec_corres_symb_exec_r_Ex) + apply (clarsimp elim!: use_valid [OF _ getCTE_inv]) + apply (subgoal_tac "cte_at' (mdbNext (cteMDBNode cte)) s") + apply (clarsimp simp: getCTE_def cte_at'_def) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def nullPointer_def) + apply (erule (2) valid_dlistEn) + apply simp + apply (simp add: in_monad in_get_cap_cte_wp_at + in_getCTE_cte_wp_at') + apply(case_tac "next_child slot (cdt_list s')") + apply(rule_tac F="False" in spec_corres_req) + apply(clarsimp) + apply(frule next_child_NoneD) + apply(simp add: empty_list_empty_desc) + apply(simp) + apply (rule_tac F="valid_list s'" in spec_corres_req,simp) + apply (frule next_child_child_set, assumption) + apply simp + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (simp add: in_monad) + apply (drule next_childD, simp) + apply (simp add: child_descendant) + apply (rule spec_corres_symb_exec_l_Ex2) + apply (clarsimp simp: in_monad) + apply (drule next_childD, simp) + apply (clarsimp) + apply (drule child_descendant) + apply (drule descendants_of_cte_at, erule invs_mdb) + apply (clarsimp simp: cte_wp_at_def) + apply (simp add: in_monad) + apply(case_tac "capa = cap.NullCap") + apply(rule_tac F="False" in spec_corres_req) + apply(clarsimp) + apply(drule next_childD, simp) + apply(clarsimp) + apply(drule child_descendant) + apply(drule cap_revoke_mdb_stuff3) + apply(erule invs_mdb) + apply(clarsimp simp: cte_wp_at_def) + apply(simp) + apply (simp) + apply (rule_tac F="isMDBParentOf cte nextCTE" + in spec_corres_req) + apply clarsimp + apply(frule cap_revoke_mdb_stuff1, (simp add: in_get_cap_cte_wp_at)+) + apply (clarsimp simp: cte_wp_at'_def) + + apply (rule spec_corres_req) + apply clarsimp + apply (rule cap_revoke_mdb_stuff4, (simp add: in_get_cap_cte_wp_at)+) + apply (clarsimp simp: whenE_def) + apply (rule spec_corres_guard_imp) + apply (rule spec_corres_splitE' [OF cteDelete_corres]) + apply (rule spec_corres_splitE' [OF preemptionPoint_corres]) + apply (rule "1.hyps", + (simp add: cte_wp_at_def in_monad select_def next_revoke_cap_def select_ext_def + | assumption | rule conjI refl)+)[1] + apply (wp cap_delete_cte_at cteDelete_invs' cteDelete_sch_act_simple + preemptionPoint_invR preemption_point_inv' | clarsimp)+ + apply (clarsimp simp: cte_wp_at_cte_at) + apply(drule next_childD, simp) + apply(clarsimp, drule child_descendant) + apply (fastforce simp: emptyable_def dest: reply_slot_not_descendant) + apply (clarsimp elim!: cte_wp_at_weakenE') + done +qed + +lemmas cteRevoke_corres = use_spec_corres [OF cteRevoke_corres'] + +lemma arch_recycleCap_improve_cases: + "\ \ isFrameCap cap; \ isPageTableCap cap; \ isVCPUCap cap; \ isASIDControlCap cap \ + \ (if isASIDPoolCap cap then v else undefined) = v" + by (cases cap, simp_all add: isCap_simps) + +crunch typ_at'[wp]: invokeCNode "\s. P (typ_at' T p s)" + (ignore: finaliseSlot + simp: crunch_simps filterM_mapM unless_def + arch_recycleCap_improve_cases + wp: crunch_wps undefined_valid finaliseSlot_preservation) + +lemmas invokeCNode_typ_ats [wp] = typ_at_lifts [OF invokeCNode_typ_at'] + +crunch st_tcb_at'[wp]: cteMove "st_tcb_at' P t" + (wp: crunch_wps) + +lemma threadSet_st_tcb_at2: + assumes x: "\tcb. P (tcbState tcb) \ P (tcbState (f tcb))" + shows "\st_tcb_at' P t\ threadSet f t' \\rv. st_tcb_at' P t\" + including no_pre + apply (simp add: threadSet_def pred_tcb_at'_def) + apply (wp setObject_tcb_strongest) + apply (rule hoare_strengthen_post, rule getObject_tcb_sp) + apply (clarsimp simp: obj_at'_def x) + done + +crunch st_tcb_at_simplish[wp]: "cancelBadgedSends" "st_tcb_at' (\st. P st \ simple' st) t" + (wp: crunch_wps threadSet_st_tcb_at2 + simp: crunch_simps filterM_mapM makeObject_tcb unless_def) + +lemma cancelBadgedSends_st_tcb_at': + assumes x: "\st. simple' st \ P st" + shows "\st_tcb_at' P t\ cancelBadgedSends a b \\_. st_tcb_at' P t\" + apply (rule hoare_chain) + apply (rule cancelBadgedSends_st_tcb_at_simplish[where P=P and t=t]) + apply (auto simp: x elim!: pred_tcb'_weakenE) + done + +lemmas cteRevoke_st_tcb_at' + = cteRevoke_preservation [OF cteDelete_st_tcb_at'] +lemmas cteRevoke_st_tcb_at_simplish + = cteRevoke_st_tcb_at'[where P="\st. Q st \ simple' st", + simplified] for Q + +lemmas finaliseSlot_st_tcb_at' + = finaliseSlot_preservation [OF finaliseCap2_st_tcb_at' + emptySlot_pred_tcb_at' + capSwapForDelete_st_tcb_at' + updateCap_pred_tcb_at'] +lemmas finaliseSlot_st_tcb_at_simplish + = finaliseSlot_st_tcb_at'[where P="\st. Q st \ simple' st", + simplified] for Q + +lemma updateCap_valid_objs [wp]: + "\\s. valid_objs' s \ s \' cap\ + updateCap ptr cap + \\r. valid_objs'\" + unfolding updateCap_def + apply (wp setCTE_valid_objs getCTE_wp) + apply clarsimp + apply (erule cte_at_cte_wp_atD) + done + +end + +lemma (in mdb_move) [intro!]: + shows "mdb_chain_0 m" using valid + by (auto simp: valid_mdb_ctes_def) + +lemma (in mdb_move) m'_badged: + "m' p = Some (CTE cap node) + \ if p = dest then mdbFirstBadged node = mdbFirstBadged src_node \ cap = cap' + else if p = src then \ mdbFirstBadged node \ cap = NullCap + else \node'. m p = Some (CTE cap node') \ mdbFirstBadged node = mdbFirstBadged node'" + using src dest neq + apply (clarsimp simp: m'_def n_def modify_map_cases nullMDBNode_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply auto + done + +lemma (in mdb_move) m'_next: + "m' \ p \ p' \ + if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p' = dest then m \ p \ src + else m \ p \ p'" + using src dest src_0 dest_0 dlist neq src_neq_prev + apply (simp add: m'_def n_def) + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply (rename_tac cap node) + apply simp + apply (simp add: modify_map_cases) + apply (cases "mdbPrev src_node = p") + apply clarsimp + apply (erule_tac p=src in valid_dlistEp, assumption) + apply clarsimp + apply clarsimp + apply simp + apply (cases "p=src", simp) + apply clarsimp + apply (case_tac "mdbNext node = p") + apply clarsimp + apply clarsimp + apply (erule_tac p=p in valid_dlistEn, assumption) + apply clarsimp + apply (clarsimp simp: prev) + done + +lemma (in mdb_move) sameRegionAs_parent_eq: + "sameRegionAs cap cap' = sameRegionAs cap src_cap" + using parency unfolding weak_derived'_def + by (simp add: sameRegionAs_def2) + +lemma (in mdb_move) m'_cap: + "m' p = Some (CTE c node) \ + if p = src then c = NullCap + else if p = dest then c = cap' + else \node'. m p = Some (CTE c node')" + using src dest neq + apply (simp add: m'_def n_def) + apply (auto simp add: modify_map_if split: if_split_asm) + done + +context mdb_move +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma m_to_src: + "m \ p \ src = (p \ 0 \ p = mdbPrev src_node)" + apply (insert src) + apply (rule iffI) + apply (clarsimp simp add: mdb_next_unfold) + apply (rule conjI, clarsimp) + apply (case_tac z) + apply clarsimp + apply (erule_tac p=p in dlistEn, clarsimp) + apply clarsimp + apply (clarsimp simp add: mdb_next_unfold) + apply (erule dlistEp, clarsimp) + apply clarsimp + done + +lemma m_from_prev_src: + "m \ mdbPrev src_node \ p = (mdbPrev src_node \ 0 \ p = src)" + apply (insert src) + apply (rule iffI) + apply (clarsimp simp: mdb_next_unfold) + apply (rule conjI, clarsimp) + apply (erule dlistEp, clarsimp) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (erule dlistEp, clarsimp) + apply clarsimp + done + +lemma m'_nextD: + "m' \ p \ p' \ + (if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p = mdbPrev src_node then p' = dest \ p \ 0 + else m \ p \ p')" + using src dest src_0 dest_0 dlist neq src_neq_prev + apply (simp add: m'_def n_def) + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (simp add: modify_map_cases) + apply (cases "mdbPrev src_node = p") + apply clarsimp + apply simp + apply (cases "p=src", simp) + apply clarsimp + done + + +lemmas prev_src = prev_p_next + +lemma m'_next_eq: + notes if_cong [cong] + shows + "m' \ p \ p' = + (if p = src then p' = 0 + else if p = dest then m \ src \ p' + else if p = mdbPrev src_node then p' = dest \ p \ 0 + else m \ p \ p')" + apply (insert src dest) + apply (rule iffI) + apply (drule m'_nextD, simp) + apply (cases "p=0") + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (simp split: if_split_asm) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases neq) + apply (simp add: mdb_next_unfold m'_def n_def modify_map_cases neq) + apply clarsimp + apply (drule prev_src) + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold m'_def n_def modify_map_cases) + apply (cases "mdbNext src_node = p") + apply (clarsimp) + apply (case_tac z) + apply clarsimp + apply clarsimp + done + +declare dest_0 [simp] + +lemma m'_swp_eq: + "m' \ p \ p' = m \ s_d_swap p src dest \ s_d_swap p' src dest" + by (auto simp add: m'_next_eq s_d_swap_def m_to_src m_from_prev_src) + +lemma m'_tranclD: + "m' \ p \\<^sup>+ p' \ m \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + apply (erule trancl.induct) + apply (fastforce simp: m'_swp_eq) + apply (fastforce simp: m'_swp_eq intro: trancl_trans) + done + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ m' \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + apply (erule trancl.induct) + apply (fastforce simp: m'_swp_eq) + apply (fastforce simp: m'_swp_eq intro: trancl_trans) + done + +lemma m'_trancl_eq: + "m' \ p \\<^sup>+ p' = m \ s_d_swap p src dest \\<^sup>+ s_d_swap p' src dest" + by (auto dest: m_tranclD m'_tranclD) + +lemma m'_rtrancl_eq: + "m' \ p \\<^sup>* p' = m \ s_d_swap p src dest \\<^sup>* s_d_swap p' src dest" + by (auto simp: rtrancl_eq_or_trancl m'_trancl_eq s_d_swap_def) + +lemma m_cap: + "m p = Some (CTE c node) \ + if p = src then \node'. c = src_cap \ m' dest = Some (CTE cap' node') + else if p = dest then \node'. c = NullCap \ m' src = Some (CTE NullCap node') + else \node'. m' p = Some (CTE c node')" + apply (auto simp: src dest) + apply (auto simp: m'_def n_def src dest modify_map_if neq) + done + +lemma sameRegion_cap'_src [simp]: + "sameRegionAs cap' c = sameRegionAs src_cap c" + using parency unfolding weak_derived'_def + apply (case_tac "isReplyCap src_cap"; clarsimp) + apply (clarsimp simp: capMasterCap_def split: capability.splits arch_capability.splits + ; fastforce simp: sameRegionAs_def AARCH64_H.sameRegionAs_def isCap_simps split: if_split_asm)+ + done + +lemma chunked': + "mdb_chunked m'" + using chunked + apply (clarsimp simp: mdb_chunked_def) + apply (drule m'_cap)+ + apply (clarsimp simp: m'_trancl_eq sameRegion_cap'_src split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x="s_d_swap p' src dest" in allE) + apply (clarsimp simp: src s_d_swap_other) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (simp add: s_d_swap_other) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp: src sameRegionAs_parent_eq) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (simp add: s_d_swap_other) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + apply (clarsimp simp: is_chunk_def m'_rtrancl_eq m'_trancl_eq s_d_swap_other) + apply (erule_tac x="s_d_swap p'' src dest" in allE) + apply clarsimp + apply (drule_tac p="s_d_swap p'' src dest" in m_cap) + apply (clarsimp simp: s_d_swap_def sameRegionAs_parent_eq split: if_split_asm) + done + +lemma isUntypedCap': + "isUntypedCap cap' = isUntypedCap src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_isUntyped) + +lemma capRange': + "capRange cap' = capRange src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_capRange) + +lemma untypedRange': + "untypedRange cap' = untypedRange src_cap" + using parency unfolding weak_derived'_def + by (clarsimp simp: weak_derived'_def dest!: capMaster_untypedRange) + +lemmas ut' = isUntypedCap' capRange' untypedRange' + +lemma m'_revocable: + "m' p = Some (CTE c node) \ + if p = src then \mdbRevocable node + else if p = dest then mdbRevocable node = mdbRevocable src_node + else \node'. m p = Some (CTE c node') \ mdbRevocable node = mdbRevocable node'" + apply (insert src dest neq) + apply (frule m'_cap) + apply (clarsimp simp: m'_def n_def modify_map_if nullMDBNode_def split: if_split_asm) + done + +lemma cteMove_valid_mdb_helper: + "(isUntypedCap cap' \ cap' = src_cap) \valid_mdb_ctes m'" +proof + note sameRegion_cap'_src [simp del] + note dest_0 [simp del] src_0 [simp del] + note src_next [simp del] + note rtrancl0 [simp del] + + show "valid_dlist m'" by (rule dlist') + show "no_0 m'" by (rule no_0') + + have chain: "mdb_chain_0 m" .. + + have mp: "cte_mdb_prop m dest (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer)" using dest prev nxt + unfolding cte_mdb_prop_def + by (simp add: nullPointer_def) + hence nsd: "\ m \ mdbNext src_node \\<^sup>* dest" using dlist + by (auto elim: next_rtrancl_tranclE dest: null_mdb_no_trancl [OF _ no_0]) + + have sd: "mdbNext src_node \ 0 \ mdbNext src_node \ dom m" + proof - + assume T: "mdbNext src_node \ 0" + have "m \ src \ mdbNext src_node" by (rule m_p_next) + moreover have "m \ src \\<^sup>+ 0" using chain src unfolding mdb_chain_0_def by (clarsimp simp: dom_def) + ultimately have "m \ mdbNext src_node \\<^sup>+ 0" using T + by (auto elim: tranclE2' simp: next_unfold') + thus "mdbNext src_node \ dom m" + by - (erule tranclE2', (clarsimp simp: next_unfold')+) + qed + + let ?m = "(modify_map + (modify_map (modify_map m (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (%_. dest)))) src + (cteMDBNode_update (mdbNext_update (%_. (mdbNext nullMDBNode))))) + dest (cteMDBNode_update (mdbNext_update (%_. (mdbNext src_node)))))" + + let ?goal = "mdb_chain_0 ?m" + { + assume "mdbPrev src_node = 0" and T: "mdbNext src_node = 0" + hence ms: "m (mdbPrev src_node) = None" using no_0 by (simp add: no_0_def) + hence ?goal using T + by (auto simp: modify_map_None [where m = m, OF ms] nullPointer_def + intro!: mdb_chain_0_modify_map_0) + } moreover + { + assume "mdbPrev src_node \ 0" and "mdbNext src_node = 0" + hence ?goal + apply - + apply (simp add: nullMDBNode_def nullPointer_def) + apply (subst modify_map_addr_com [where y = dest], simp add: neq)+ + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_0 [OF chain no_0]) + apply clarsimp + apply (clarsimp simp: dest) + apply (subst next_update_is_modify [symmetric], rule dest) + apply simp + apply (subst next_update_lhs_rtrancl) + apply simp + apply (rule no_0_lhs_tranclI [OF no_0 dest_0]) + apply simp + apply (rule no_0_lhs_tranclI [OF no_0]) + apply simp + apply clarsimp + done + } moreover + { + assume "mdbPrev src_node = 0" and T: "mdbNext src_node \ 0" + hence ms: "m (mdbPrev src_node) = None" using no_0 by (simp add: no_0_def) + hence ?goal using T + apply (simp add: modify_map_None nullPointer_def) + apply (subst modify_map_addr_com [OF neq]) + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next [OF chain no_0 sd, OF T nsd]) + apply clarsimp + done + } moreover + { + assume U: "mdbPrev src_node \ 0" and T: "mdbNext src_node \ 0" + hence ?goal using dlist + apply - + apply (simp add: nullPointer_def) + apply (subst modify_map_addr_com [where y = dest], simp add: neq)+ + apply (rule mdb_chain_0_modify_map_0) + apply (rule mdb_chain_0_modify_map_next) + apply (rule mdb_chain_0_modify_map_next [OF chain no_0 sd nsd, OF T]) + apply clarsimp + apply (clarsimp simp: dest) + apply (subst next_update_is_modify [symmetric], rule dest) + apply simp + apply (subst next_update_lhs_rtrancl) + apply simp + apply (rule nsd) + apply simp + apply (rule no_next_prev_rtrancl [OF valid], rule src, rule U) + apply clarsimp + done + } + ultimately have ?goal + apply (cases "mdbPrev src_node = 0") + apply (cases "mdbNext src_node = 0") + apply auto[2] + apply (cases "mdbNext src_node = 0") + apply auto + done + + thus "mdb_chain_0 m'" + unfolding m'_def n_def + apply - + apply (rule mdb_chain_0_modify_map_prev) + apply (subst modify_map_addr_com [OF src_neq_prev]) + apply (subst modify_map_addr_com [OF prev_neq_dest2]) + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_addr_com [OF neq_sym])+ + apply (rule mdb_chain_0_modify_map_replace) + apply (subst modify_map_com [ where g = "(cteCap_update (%_. cap'))"], + case_tac x, simp)+ + apply (rule mdb_chain_0_modify_map_inv) + apply (subst modify_map_com [ where g = "(cteCap_update (%_. capability.NullCap))"], + case_tac x, simp)+ + apply (erule mdb_chain_0_modify_map_inv) + apply simp + apply simp + done + + from valid + have "valid_badges m" .. + thus "valid_badges m'" using src dest parency + apply (clarsimp simp: valid_badges_def2) + apply (drule m'_badged)+ + apply (drule m'_next) + apply (clarsimp simp add: weak_derived'_def split: if_split_asm) + apply (erule_tac x=src in allE, erule_tac x=p' in allE, + erule allE, erule impE, erule exI) + apply clarsimp + apply (erule_tac x=p in allE, erule_tac x=src in allE, + erule allE, erule impE, erule exI) + apply clarsimp + by fastforce + + from valid + have "caps_contained' m" by (simp add: valid_mdb_ctes_def) + with src dest neq parency + show "caps_contained' m'" + apply (clarsimp simp: caps_contained'_def) + apply (drule m'_cap)+ + apply (clarsimp split: if_split_asm) + apply (clarsimp dest!: capRange_untyped) + apply (erule_tac x=src in allE, erule_tac x=p' in allE) + apply (clarsimp simp add: weak_derived'_def) + apply (drule capMaster_untypedRange) + apply clarsimp + apply blast + apply (erule_tac x=p in allE, erule_tac x=src in allE) + apply (clarsimp simp: weak_derived'_def) + apply (frule capMaster_isUntyped) + apply (drule capMaster_capRange) + apply clarsimp + apply blast + by fastforce + + show "mdb_chunked m'" by (rule chunked') + + from untyped_mdb + show "untyped_mdb' m'" + apply (simp add: untyped_mdb'_def) + apply clarsimp + apply (drule m'_cap)+ + apply (clarsimp simp: descendants split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (simp add: src ut') + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src ut') + done + + assume isUntypedCap_eq:"isUntypedCap cap' \ cap' = src_cap" + from untyped_inc + show "untyped_inc' m'" + using isUntypedCap_eq + apply (simp add: untyped_inc'_def) + apply clarsimp + apply (drule m'_cap)+ + apply (clarsimp simp: descendants split: if_split_asm) + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp add: src ut') + apply (intro conjI impI) + apply clarsimp+ + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp add: src ut') + apply (intro conjI impI) + apply clarsimp+ + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + done + + note if_cong [cong] + + from not_null parency + have "src_cap \ NullCap \ cap' \ NullCap" + by (clarsimp simp: weak_derived'_def) + moreover + from valid + have "valid_nullcaps m" .. + ultimately + show vn': "valid_nullcaps m'" + apply (clarsimp simp: valid_nullcaps_def) + apply (frule m'_cap) + apply (insert src dest) + apply (frule spec, erule allE, erule (1) impE) + apply (clarsimp split: if_split_asm) + apply (simp add: n_def m'_def) + apply (simp add: modify_map_if) + apply (simp add: n_def m'_def) + apply (simp add: modify_map_if) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply clarsimp + apply (erule allE, erule allE, erule (1) impE) + apply clarsimp + apply (insert dlist) + apply (erule_tac p=src in valid_dlistEn, assumption) + apply clarsimp + apply (clarsimp simp: nullMDBNode_def nullPointer_def) + apply (erule allE, erule allE, erule (1) impE) + apply clarsimp + apply (erule_tac p=src in valid_dlistEp, assumption) + apply clarsimp + apply (clarsimp simp: nullMDBNode_def nullPointer_def) + done + + from valid + have "ut_revocable' m" .. + thus "ut_revocable' m'" using src dest parency + apply (clarsimp simp: ut_revocable'_def) + apply (frule m'_cap) + apply (frule m'_revocable) + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "isUntypedCap src_cap") + apply simp + apply (clarsimp simp: weak_derived'_def dest!: capMaster_isUntyped) + done + + from src + have src': "m' src = Some (CTE NullCap nullMDBNode)" + by (simp add: m'_def n_def modify_map_if) + with dlist' no_0' + have no_prev_of_src': "\p. \m' \ p \ src" + apply clarsimp + apply (frule (3) vdlist_nextD) + apply (simp add: mdb_prev_def mdb_next_unfold nullPointer_def) + done + + from valid + have "class_links m" .. + thus "class_links m'" using src dest parency + apply (clarsimp simp: class_links_def weak_derived'_def) + apply (case_tac cte) + apply (case_tac cte') + apply clarsimp + apply (case_tac "p'=src") + apply (simp add: no_prev_of_src') + apply (drule m'_next) + apply (drule m'_cap)+ + apply (clarsimp split: if_split_asm) + apply (fastforce dest!: capMaster_capClass) + apply (fastforce dest!: capMaster_capClass) + apply fastforce + done + + show "irq_control m'" using src dest parency + apply (clarsimp simp: irq_control_def) + apply (frule m'_revocable) + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp add: weak_derived'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule m'_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: weak_derived'_def) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + apply (erule (1) irq_controlD, rule irq_control) + done + + have distz: "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + + thus "distinct_zombies m'" + apply (simp add: m'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def distinct_zombies_nonCTE_modify_map + modify_map_apply src dest neq) + apply (erule distinct_zombies_switchE, rule dest, rule src) + apply simp + apply (cut_tac parency) + apply (clarsimp simp: weak_derived'_def) + done + + have "reply_masters_rvk_fb m" using valid .. + thus "reply_masters_rvk_fb m'" using neq parency + apply (simp add: m'_def n_def reply_masters_rvk_fb_def + ball_ran_modify_map_eq) + apply (simp add: modify_map_apply m_p dest) + apply (intro ball_ran_fun_updI, simp_all) + apply (frule bspec, rule ranI, rule m_p) + apply (clarsimp simp: weak_derived'_def) + apply (drule master_eqE[where F=isReplyCap], simp add: isCap_Master) + apply (simp add: isCap_simps)+ + done + +qed + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteMove_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c \ NullCap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. if_live_then_nonz_cap'\" + unfolding cteMove_def + apply simp + apply wp + apply (simp only: if_live_then_nonz_cap'_def imp_conv_disj + ex_nonz_cap_to'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift updateCap_cte_wp_at_cases + getCTE_wp hoare_weak_lift_imp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule(1) if_live_then_nonz_capE') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (drule_tac x="(id (src := dest, dest := src)) cref" in spec) + apply (clarsimp dest!: weak_derived_zobj split: if_split_asm) + done + +lemma cteMove_valid_pspace' [wp]: + "\\x. valid_pspace' x \ + cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ + cte_wp_at' (\c. isUntypedCap (cteCap c) \ capability = cteCap c) word1 x \ + cte_wp_at' (\c. cteCap c \ NullCap) word1 x \ + x \' capability \ + cte_wp_at' (\c. cteCap c = capability.NullCap) word2 x\ + cteMove capability word1 word2 + \\y. valid_pspace'\" + unfolding cteMove_def + apply (simp add: pred_conj_def valid_pspace'_def valid_mdb'_def) + apply (wp sch_act_wf_lift valid_queues_lift + cur_tcb_lift updateCap_no_0 updateCap_ctes_of_wp getCTE_wp | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def)+ + apply (clarsimp dest!: cte_at_cte_wp_atD) + apply (rule_tac x = cte in exI) + apply clarsimp + apply (clarsimp dest!: cte_at_cte_wp_atD) + apply (rule_tac x = ctea in exI) + apply (clarsimp simp: isCap_simps) + apply rule + apply (clarsimp elim!: valid_mdb_ctesE) + apply (case_tac ctea) + apply (case_tac cte) + apply (rule_tac old_dest_node = "cteMDBNode cte" and src_cap = "cteCap ctea" in + mdb_move.cteMove_valid_mdb_helper) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of weak_derived'_def isCap_simps simp del: not_ex) + apply unfold_locales + apply (simp_all add: valid_mdb'_def cte_wp_at_ctes_of nullPointer_def weak_derived'_def) + apply clarsimp + done + +lemma cteMove_ifunsafe': + "\if_unsafe_then_cap' + and cte_wp_at' (\c. cteCap c = capability.NullCap) dest + and ex_cte_cap_to' dest + and cte_wp_at' (\c. weak_derived' (cteCap c) cap) src\ + cteMove cap src dest + \\rv. if_unsafe_then_cap'\" + apply (rule hoare_pre) + apply (simp add: ifunsafe'_def3 cteMove_def o_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (subgoal_tac "ex_cte_cap_to' cref s") + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="(id (dest := src, src := dest)) crefb" + in exI) + apply (auto simp: modify_map_def dest!: weak_derived_cte_refs + split: if_split_asm)[1] + apply (case_tac "cref = dest") + apply simp + apply (rule if_unsafe_then_capD'[where P="\cte. cteCap cte \ NullCap"]) + apply (clarsimp simp add: cte_wp_at_ctes_of modify_map_def + split: if_split_asm) + apply simp+ + done + +lemma cteMove_idle'[wp]: + "\\s. valid_idle' s\ + cteMove cap src dest + \\rv. valid_idle'\" + apply (simp add: cteMove_def) + apply (wp updateCap_idle' | simp)+ + apply (wp getCTE_wp') + apply (clarsimp simp: valid_idle'_def cte_wp_at_ctes_of weak_derived'_def) + done + +crunch ksInterrupt[wp]: cteMove "\s. P (ksInterruptState s)" + (wp: crunch_wps) + +crunch ksArch[wp]: cteMove "\s. P (ksArchState s)" + (wp: crunch_wps) + +lemma cteMove_irq_handlers' [wp]: + "\\s. valid_irq_handlers' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF cteMove_ksInterrupt]) + apply (simp add: cteMove_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ran_def) + apply (subst(asm) imp_ex, subst(asm) all_comm) + apply (drule_tac x="(id (src := dest, dest := src)) a" in spec) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (auto simp: cteCaps_of_def weak_derived'_def) + done + +lemmas cteMove_valid_irq_node'[wp] + = valid_irq_node_lift[OF cteMove_ksInterrupt cteMove_typ_at'] + +crunch valid_arch_state'[wp]: cteMove "valid_arch_state'" + (wp: crunch_wps) + +crunch global_refs_noop[wp]: cteMove "\s. P (global_refs' s)" + (wp: crunch_wps) +crunch gsMaxObjectSize[wp]: cteMove "\s. P (gsMaxObjectSize s)" + (wp: crunch_wps) + +lemma cteMove_global_refs' [wp]: + "\\s. valid_global_refs' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. valid_global_refs'\" + apply (rule hoare_name_pre_state, clarsimp simp: valid_global_refs'_def) + apply (frule_tac p=src and cte="the (ctes_of s src)" in cte_at_valid_cap_sizes_0) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=global_refs', OF cteMove_global_refs_noop]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (simp add: cteMove_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ran_def all_conj_distrib[symmetric] + imp_conjR[symmetric]) + apply (subst(asm) imp_ex, subst(asm) all_comm) + apply (drule_tac x="(id (dest := src, src := dest)) a" in spec) + apply (clarsimp simp: modify_map_def cteCaps_of_def + split: if_split_asm dest!: weak_derived_capRange_capBits) + apply auto? + done + +lemma cteMove_urz [wp]: + "\\s. untyped_ranges_zero' s + \ valid_pspace' s + \ cte_wp_at' (\c. weak_derived' (cteCap c) cap) src s + \ cte_wp_at' (\c. isUntypedCap (cteCap c) \ cap = cteCap c) src s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteMove cap src dest + \\rv. untyped_ranges_zero'\" + apply (clarsimp simp: cteMove_def) + apply (rule hoare_pre) + apply (wp untyped_ranges_zero_lift getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of + split del: if_split) + apply (erule untyped_ranges_zero_delta[where xs="[src, dest]"], + (clarsimp simp: modify_map_def)+) + apply (clarsimp simp: ran_restrict_map_insert modify_map_def + cteCaps_of_def untypedZeroRange_def[where ?x0.0=NullCap]) + apply (drule weak_derived_untypedZeroRange[OF weak_derived_sym'], clarsimp) + apply auto + done + +lemma cteMove_invs' [wp]: + "\\x. invs' x \ ex_cte_cap_to' word2 x \ + cte_wp_at' (\c. weak_derived' (cteCap c) capability) word1 x \ + cte_wp_at' (\c. isUntypedCap (cteCap c) \ capability = cteCap c) word1 x \ + cte_wp_at' (\c. (cteCap c) \ NullCap) word1 x \ + x \' capability \ + cte_wp_at' (\c. cteCap c = capability.NullCap) word2 x\ + cteMove capability word1 word2 + \\y. invs'\" + apply (simp add: invs'_def valid_state'_def pred_conj_def) + apply (rule hoare_pre) + apply ((rule hoare_vcg_conj_lift, (wp cteMove_ifunsafe')[1]) + | rule hoare_vcg_conj_lift[rotated])+ + apply (unfold cteMove_def) + apply (wp cur_tcb_lift valid_queues_lift haskell_assert_inv + sch_act_wf_lift ct_idle_or_in_cur_domain'_lift2 tcb_in_cur_domain'_lift)+ + apply clarsimp + done + +lemma cteMove_cte_wp_at: + "\\s. cte_at' ptr s \ (if p = ptr then (Q capability.NullCap) else (if p' = ptr then Q cap else cte_wp_at' (Q \ cteCap) ptr s))\ + cteMove cap p p' + \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" + unfolding cteMove_def + apply (fold o_def) + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma cteMove_ex: + "\ex_cte_cap_to' ptr and + cte_wp_at' (weak_derived' cap o cteCap) p and + cte_wp_at' ((=) NullCap o cteCap) p' and + K (p \ p') \ + cteMove cap p p' + \\_. ex_cte_cap_to' ptr\" + unfolding ex_cte_cap_to'_def + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteMove_ksInterrupt]) + apply (wp hoare_vcg_ex_lift cteMove_cte_wp_at) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac "cref = p") + apply simp + apply (rule_tac x=p' in exI) + apply (clarsimp simp: weak_derived'_def dest!: capMaster_same_refs) + apply (rule_tac x=cref in exI) + apply clarsimp + done + +lemmas cteMove_typ_at_lifts [wp] = typ_at_lifts [OF cteMove_typ_at'] + +lemmas finalise_slot_corres' + = rec_del_corres[where args="FinaliseSlotCall slot exp", + simplified rec_del_concrete.simps, + simplified, folded finalise_slot_def] for slot exp +lemmas finalise_slot_corres = use_spec_corres [OF finalise_slot_corres'] + +lemma corres_disj_abs: + "\ corres rv P R f g; corres rv Q R f g \ + \ corres rv (\s. P s \ Q s) R f g" + by (auto simp: corres_underlying_def) + +crunch ksMachine[wp]: updateCap "\s. P (ksMachineState s)" + +lemma cap_relation_same: + "\ cap_relation cap cap'; cap_relation cap cap'' \ + \ cap' = cap''" + by (clarsimp split: cap_relation_split_asm + arch_cap.split_asm) + +crunch gsUserPages[wp]: updateCap "\s. P (gsUserPages s)" +crunch gsCNodes[wp]: updateCap "\s. P (gsCNodes s)" +crunch ksWorkUnitsCompleted[wp]: updateCap "\s. P (ksWorkUnitsCompleted s)" +crunch ksDomSchedule[wp]: updateCap "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: updateCap "\s. P (ksDomScheduleIdx s)" +crunch ksDomainTime[wp]: updateCap "\s. P (ksDomainTime s)" + +lemma corres_null_cap_update: + "cap_relation cap cap' \ + corres dc (invs and cte_wp_at ((=) cap) slot) + (invs' and cte_at' (cte_map slot)) + (set_cap cap slot) (updateCap (cte_map slot) cap')" + apply (rule corres_caps_decomposition[rotated]) + apply (wp updateCap_ctes_of_wp)+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply + fun_upd_def[symmetric]) + apply (frule state_relation_pspace_relation) + apply (frule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (drule(1) cap_relation_same) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_caps_of_state fun_upd_idem) + apply (clarsimp simp: state_relation_def) + apply (erule_tac P="\caps. cdt_relation caps m ctes" for m ctes in rsubst) + apply (rule ext, clarsimp simp: cte_wp_at_caps_of_state eq_commute) + apply(clarsimp simp: cdt_list_relation_def state_relation_def) + apply(case_tac "next_slot (a, b) (cdt_list s) (cdt s)") + apply(simp) + apply(clarsimp) + apply(erule_tac x=a in allE, erule_tac x=b in allE) + apply(simp) + apply(clarsimp simp: modify_map_def split: if_split_asm) + apply(case_tac z) + apply(clarsimp) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: state_relation_def fun_upd_def[symmetric] + cte_wp_at_caps_of_state fun_upd_idem) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap pt_types_of_heap_eq o_def) + apply (subst return_bind[where x="()", symmetric], subst updateCap_def, + rule corres_split_forwards') + apply (rule corres_guard_imp, rule getCTE_symb_exec_r, simp+) + prefer 3 + apply clarsimp + apply (rule setCTE_corres) + apply (wp | simp)+ + apply (fastforce elim!: cte_wp_at_weakenE) + apply wp + apply fastforce + done + +declare corres_False' [simp] + +lemma invokeCNode_corres: + "cnodeinv_relation ci ci' \ + corres (dc \ dc) + (einvs and simple_sched_action and valid_cnode_inv ci) + (invs' and sch_act_simple and valid_cnode_inv' ci') + (invoke_cnode ci) (invokeCNode ci')" + apply (simp add: invoke_cnode_def invokeCNode_def) + apply (cases ci, simp_all) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule cteInsert_corres) + apply simp+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + elim!: cte_wp_at_cte_at) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (erule cteMove_corres) + apply (clarsimp simp: cte_wp_at_caps_of_state real_cte_tcb_valid) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule cteRevoke_corres) + apply (rule corres_guard_imp [OF cteDelete_corres]) + apply (clarsimp simp: cte_at_typ cap_table_at_typ halted_emptyable) + apply simp + apply (rename_tac cap1 cap2 p1 p2 p3) + apply (elim conjE exE) + apply (intro impI conjI) + apply simp + apply (rule corres_guard_imp) + apply (rule_tac F="wellformed_cap cap1 \ wellformed_cap cap2" + in corres_gen_asm) + apply (erule (1) cteSwap_corres [OF refl refl], simp+) + apply (simp add: invs_def valid_state_def valid_pspace_def + real_cte_tcb_valid valid_cap_def2) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + cte_wp_at_ctes_of weak_derived'_def) + apply (simp split del: if_split) + apply (rule_tac F = "cte_map p1 \ cte_map p3" in corres_req) + apply clarsimp + apply (drule (2) cte_map_inj_eq [OF _ cte_wp_at_cte_at cte_wp_at_cte_at]) + apply clarsimp + apply clarsimp + apply clarsimp + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (erule cteMove_corres) + apply (erule cteMove_corres) + apply wp + apply (simp add: cte_wp_at_caps_of_state) + apply (wp cap_move_caps_of_state cteMove_cte_wp_at [simplified o_def])+ + apply (simp add: real_cte_tcb_valid invs_def valid_state_def valid_pspace_def) + apply (elim conjE exE) + apply (drule(3) real_cte_weak_derived_not_reply_masterD)+ + apply (clarsimp simp: cte_wp_at_caps_of_state + ex_cte_cap_to_cnode_always_appropriate_strg + cte_wp_at_conj) + apply (simp add: cte_wp_at_ctes_of) + apply (elim conjE exE) + apply (intro impI conjI) + apply fastforce + apply (fastforce simp: weak_derived'_def) + apply simp + apply (erule weak_derived_sym') + apply clarsimp + apply simp + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (rename_tac prod) + apply (simp add: getThreadCallerSlot_def locateSlot_conv objBits_simps) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (subgoal_tac "thread + 2^cte_level_bits * tcbCallerSlot = cte_map (thread, tcb_cnode_index 3)") + prefer 2 + apply (simp add: cte_map_def tcb_cnode_index_def tcbCallerSlot_def cte_level_bits_def) + apply (rule corres_split[OF getSlotCap_corres], simp) + apply (rule_tac P="\s. (is_reply_cap cap \ cap = cap.NullCap) \ + (is_reply_cap cap \ + (einvs and cte_at (threada, tcb_cnode_index 3) and + cte_wp_at (\c. c = cap.NullCap) prod and + real_cte_at prod and valid_cap cap and + K ((threada, tcb_cnode_index 3) \ prod)) s)" and + P'="\s. (isReplyCap rv' \ \ capReplyMaster rv') \ (invs' and + cte_wp_at' + (\c. weak_derived' rv' (cteCap c) \ + cteCap c \ capability.NullCap) + (cte_map (threada, tcb_cnode_index 3)) and + cte_wp_at' (\c. cteCap c = capability.NullCap) (cte_map prod)) s" in corres_inst) + apply (case_tac cap, simp_all add: isCap_simps is_cap_simps split: bool.split)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule cteMove_corres) + apply (simp add: real_cte_tcb_valid)+ + apply (wp get_cap_wp) + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp)+ + apply clarsimp + apply (rule conjI) + apply (rule tcb_at_cte_at) + apply fastforce + apply (simp add: tcb_cap_cases_def) + apply (clarsimp simp: cte_wp_at_cte_at) + apply (rule conjI) + apply (frule tcb_at_invs) + apply (frule_tac ref="tcb_cnode_index 3" and Q="is_reply_cap or (=) cap.NullCap" + in tcb_cap_wp_at) + apply (clarsimp split: Structures_A.thread_state.split_asm)+ + apply (clarsimp simp: cte_wp_at_def is_cap_simps all_rights_def) + apply clarsimp + apply (rule conjI, simp add: cte_wp_valid_cap invs_valid_objs) + apply (clarsimp simp: cte_wp_at_def is_cap_simps all_rights_def) + apply clarsimp + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply clarsimp + apply (case_tac "has_cancel_send_rights x7", + frule has_cancel_send_rights_ep_cap, + simp add: is_cap_simps) + apply (clarsimp simp: when_def unless_def isCap_simps) + apply (rule corres_guard_imp) + apply (rule cancelBadgedSends_corres) + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp) + done + +lemma updateCap_noop_irq_handlers: + "\valid_irq_handlers' and cte_wp_at' (\cte. cteCap cte = cap) slot\ + updateCap slot cap + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq[where f=ksInterruptState, OF updateCap_ksInterruptState]) + apply wp + apply (simp, subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (simp split: option.split_asm + add: modify_map_apply fun_upd_idem) + done + +crunch ct_idle_or_in_cur_domain'[wp]: updateCap ct_idle_or_in_cur_domain' + (rule: ct_idle_or_in_cur_domain'_lift2) + +lemma updateCap_noop_invs: + "\invs' and cte_wp_at' (\cte. cteCap cte = cap) slot\ + updateCap slot cap + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def + valid_pspace'_def valid_mdb'_def) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp updateCap_iflive' + updateCap_ifunsafe' updateCap_idle' + valid_irq_node_lift + updateCap_noop_irq_handlers sch_act_wf_lift + untyped_ranges_zero_lift) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply) + apply (strengthen untyped_ranges_zero_delta[where xs=Nil, mk_strg I E]) + apply (case_tac cte) + apply (clarsimp simp: fun_upd_idem cteCaps_of_def modify_map_apply + valid_mdb'_def) + apply (frule(1) ctes_of_valid') + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (rule_tac P="(=) cte" for cte in if_unsafe_then_capD') + apply (simp add: cte_wp_at_ctes_of) + apply assumption + apply clarsimp + done + +lemmas make_zombie_or_noop_or_arch_invs + = hoare_vcg_disj_lift [OF updateCap_noop_invs + hoare_vcg_disj_lift [OF make_zombie_invs' arch_update_updateCap_invs], + simplified] + +lemma invokeCNode_invs' [wp]: + "\invs' and sch_act_simple and valid_cnode_inv' cinv\ + invokeCNode cinv \\y. invs'\" + unfolding invokeCNode_def + apply (cases cinv) + apply (wp cteRevoke_invs' cteInsert_invs | simp split del: if_split)+ + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def isCap_simps badge_derived'_def) + apply (erule(1) valid_irq_handlers_ctes_ofD) + apply (clarsimp simp: invs'_def valid_state'_def) + defer + apply (wp cteRevoke_invs' | simp)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (erule weak_derived_sym') + defer + apply (simp add: getSlotCap_def getThreadCallerSlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp haskell_fail_wp getCTE_wp|wpc)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac ctea) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply fastforce + apply ((wp cteDelete_invs'|simp split del: if_split)+) + apply (wp cteMove_ex cteMove_cte_wp_at)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (fastforce simp: isCap_simps weak_derived'_def) + apply (rule hoare_pre) + apply simp + apply (wp | wpc | simp add: unless_def)+ + done + +declare withoutPreemption_lift [wp] + +crunch irq_states' [wp]: capSwapForDelete valid_irq_states' + +lemma setVCPU_valid_irq_states' [wp]: + "setObject p (vcpu::vcpu) \valid_irq_states'\" + by (wp valid_irq_states_lift') + +crunches writeVCPUHardwareReg, readVCPUHardwareReg + for irq_masks[wp]: "\s. P (irq_masks s)" + +crunches vcpuUpdate, vcpuWriteReg, vcpuSaveReg, vcpuRestoreReg, vcpuReadReg + for irq_states'[wp]: valid_irq_states' + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + (ignore: getObject setObject) + +lemma saveVirtTimer_irq_states'[wp]: + "saveVirtTimer vcpu_ptr \valid_irq_states'\" + unfolding saveVirtTimer_def + by (wpsimp simp: read_cntpct_def + wp: doMachineOp_irq_states') + +lemma restoreVirtTimer_irq_states'[wp]: + "restoreVirtTimer vcpu_ptr \valid_irq_states'\" + unfolding restoreVirtTimer_def isIRQActive_def + by (simp add: liftM_bind) + (wpsimp wp: maskInterrupt_irq_states' getIRQState_wp hoare_vcg_imp_lift' doMachineOp_irq_states' + simp: if_apply_def2 read_cntpct_def) + +crunches + vcpuDisable, vcpuEnable, vcpuRestore, vcpuRestoreReg, vcpuSaveReg, + vcpuUpdate, vgicUpdateLR, vcpuSave + for irq_states' [wp]: valid_irq_states' + (wp: crunch_wps maskInterrupt_irq_states'[where b=True, simplified] no_irq no_irq_mapM_x + simp: crunch_simps no_irq_isb no_irq_dsb + set_gic_vcpu_ctrl_hcr_def setSCTLR_def setHCR_def get_gic_vcpu_ctrl_hcr_def + getSCTLR_def get_gic_vcpu_ctrl_lr_def get_gic_vcpu_ctrl_apr_def + get_gic_vcpu_ctrl_vmcr_def + set_gic_vcpu_ctrl_vmcr_def set_gic_vcpu_ctrl_apr_def uncurry_def + set_gic_vcpu_ctrl_lr_def + ignore: saveVirtTimer) + +crunch irq_states' [wp]: finaliseCap valid_irq_states' + (wp: crunch_wps unless_wp getASID_wp no_irq_setVSpaceRoot + simp: crunch_simps o_def pteAtIndex_def) + +lemma finaliseSlot_IRQInactive': + "s \ \valid_irq_states'\ finaliseSlot' a b + \\_. valid_irq_states'\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" +proof (induct rule: finalise_spec_induct) + case (1 sl exp s) + show ?case + apply (rule hoare_pre_spec_validE) + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def) + apply (wp "1.hyps") + apply (unfold Let_def split_def fst_conv snd_conv + case_Zombie_assert_fold haskell_fail_def) + apply (wp getCTE_wp' preemptionPoint_invR| simp add: o_def irq_state_independent_HI)+ + apply (rule hoare_post_imp [where Q="\_. valid_irq_states'"]) + apply simp + apply wp[1] + apply (rule spec_strengthen_postE) + apply (rule "1.hyps", (assumption|rule refl)+) + apply simp + apply (wp hoare_drop_imps hoare_vcg_all_lift | simp add: locateSlot_conv)+ + done +qed + +lemma finaliseSlot_IRQInactive: + "\valid_irq_states'\ finaliseSlot a b + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (unfold validE_E_def) + apply (rule hoare_post_impErr) + apply (rule use_spec(2) [OF finaliseSlot_IRQInactive', folded finaliseSlot_def]) + apply (rule TrueI) + apply assumption + done + +lemma finaliseSlot_irq_states': + "\valid_irq_states'\ finaliseSlot a b \\rv. valid_irq_states'\" + by (wp finaliseSlot_preservation | clarsimp)+ + +lemma cteDelete_IRQInactive: + "\valid_irq_states'\ cteDelete x y + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: cteDelete_def split_def) + apply (wp whenE_wp) + apply (rule hoare_post_impErr) + apply (rule validE_E_validE) + apply (rule finaliseSlot_IRQInactive) + apply simp + apply simp + apply assumption + done + +lemma cteDelete_irq_states': + "\valid_irq_states'\ cteDelete x y + \\rv. valid_irq_states'\" + apply (simp add: cteDelete_def split_def) + apply (wp whenE_wp) + apply (rule hoare_post_impErr) + apply (rule hoare_valid_validE) + apply (rule finaliseSlot_irq_states') + apply simp + apply simp + apply assumption + done + +lemma preemptionPoint_IRQInactive_spec: + "s \ \valid_irq_states'\ preemptionPoint + \\_. valid_irq_states'\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply wp + apply (rule hoare_pre, wp preemptionPoint_invR) + apply clarsimp+ + done + +lemma cteRevoke_IRQInactive': + "s \ \valid_irq_states'\ cteRevoke x + \\_. \\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" +proof (induct rule: cteRevoke.induct) + case (1 p s') + show ?case + apply (subst cteRevoke.simps) + apply (wp "1.hyps" unlessE_wp whenE_wp preemptionPoint_IRQInactive_spec + cteDelete_IRQInactive cteDelete_irq_states' getCTE_wp')+ + apply clarsimp + done +qed + +lemma cteRevoke_IRQInactive: + "\valid_irq_states'\ cteRevoke x + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (unfold validE_E_def) + apply (rule use_spec) + apply (rule cteRevoke_IRQInactive') + done + +lemma inv_cnode_IRQInactive: + "\valid_irq_states'\ invokeCNode cnode_inv + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: invokeCNode_def) + apply (rule hoare_pre) + apply (wp cteRevoke_IRQInactive finaliseSlot_IRQInactive + cteDelete_IRQInactive + whenE_wp + | wpc + | simp add: split_def)+ + done + +end + +end \ No newline at end of file diff --git a/proof/refine/AARCH64/CSpace1_R.thy b/proof/refine/AARCH64/CSpace1_R.thy new file mode 100644 index 0000000000..d9c1fb277c --- /dev/null +++ b/proof/refine/AARCH64/CSpace1_R.thy @@ -0,0 +1,7177 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace refinement +*) + +theory CSpace1_R +imports + CSpace_I +begin + +context Arch begin global_naming AARCH64_A (*FIXME: arch_split*) + +lemmas final_matters_def = final_matters_def[simplified final_matters_arch_def] + +declare final_matters_simps[simp del] + +lemmas final_matters_simps[simp] + = final_matters_def[split_simps cap.split arch_cap.split] + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma isMDBParentOf_CTE1: + "isMDBParentOf (CTE cap node) cte = + (\cap' node'. cte = CTE cap' node' \ sameRegionAs cap cap' + \ mdbRevocable node + \ (isEndpointCap cap \ capEPBadge cap \ 0 \ + capEPBadge cap = capEPBadge cap' \ \ mdbFirstBadged node') + \ (isNotificationCap cap \ capNtfnBadge cap \ 0 \ + capNtfnBadge cap = capNtfnBadge cap' \ \ mdbFirstBadged node'))" + apply (simp add: isMDBParentOf_def Let_def split: cte.splits split del: if_split) + apply (clarsimp simp: Let_def) + apply (fastforce simp: isCap_simps) + done + +lemma isMDBParentOf_CTE: + "isMDBParentOf (CTE cap node) cte = + (\cap' node'. cte = CTE cap' node' \ sameRegionAs cap cap' + \ mdbRevocable node + \ (capBadge cap, capBadge cap') \ capBadge_ordering (mdbFirstBadged node'))" + apply (simp add: isMDBParentOf_CTE1) + apply (intro arg_cong[where f=Ex] ext conj_cong refl) + apply (cases cap, simp_all add: isCap_simps) + apply (auto elim!: sameRegionAsE simp: isCap_simps) + done + +lemma isMDBParentOf_trans: + "\ isMDBParentOf a b; isMDBParentOf b c \ \ isMDBParentOf a c" + apply (cases a) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule(1) sameRegionAs_trans, simp) + apply (erule(1) capBadge_ordering_trans) + done + +lemma parentOf_trans: + "\ s \ a parentOf b; s \ b parentOf c \ \ s \ a parentOf c" + by (auto simp: parentOf_def elim: isMDBParentOf_trans) + +lemma subtree_parent: + "s \ a \ b \ s \ a parentOf b" + by (erule subtree.induct) auto + +lemma leadsto_is_prev: + "\ m \ p \ c; m c = Some (CTE cap node); + valid_dlist m; no_0 m \ \ + p = mdbPrev node" + by (fastforce simp add: next_unfold' valid_dlist_def Let_def no_0_def) + +lemma subtree_target_Some: + "m \ a \ b \ m b \ None" + by (erule subtree.cases) (auto simp: parentOf_def) + +lemma subtree_prev_loop: + "\ m p = Some (CTE cap node); no_loops m; valid_dlist m; no_0 m \ \ + m \ p \ mdbPrev node = False" + apply clarsimp + apply (frule subtree_target_Some) + apply (drule subtree_mdb_next) + apply (subgoal_tac "m \ p \\<^sup>+ p") + apply (simp add: no_loops_def) + apply (erule trancl_into_trancl) + apply (clarsimp simp: mdb_next_unfold) + apply (fastforce simp: next_unfold' valid_dlist_def no_0_def Let_def) + done + +lemma subtree_trans_lemma: + assumes "s \ b \ c" + shows "s \ a \ b \ s \ a \ c" + using assms +proof induct + case direct_parent + thus ?case + by (blast intro: trans_parent parentOf_trans subtree_parent) +next + case (trans_parent y z) + have IH: "s \ a \ b \ s \ a \ y" by fact+ + have step: "s \ y \ z" "z \ 0" "s \ b parentOf z" by fact+ + + have "s \ a \ b" by fact+ + hence "s \ a \ y" and "s \ a parentOf b" by (auto intro: IH subtree_parent) + moreover + with step + have "s \ a parentOf z" by - (rule parentOf_trans) + ultimately + show ?case using step by - (rule subtree.trans_parent) +qed + +lemma subtree_trans: "\ s \ a \ b; s \ b \ c \ \ s \ a \ c" + by (rule subtree_trans_lemma) + +lemma same_arch_region_as_relation: + "\acap_relation c d; acap_relation c' d'\ \ + arch_same_region_as c c' = + sameRegionAs (ArchObjectCap d) (ArchObjectCap d')" + by (cases c; cases c') + (auto simp: AARCH64_H.sameRegionAs_def sameRegionAs_def Let_def isCap_simps mask_def + add_diff_eq) + +lemma is_phyiscal_relation: + "cap_relation c c' \ is_physical c = isPhysicalCap c'" + by (auto simp: is_physical_def arch_is_physical_def + split: cap.splits arch_cap.splits) + +lemma obj_ref_of_relation: + "\ cap_relation c c'; capClass c' = PhysicalClass \ \ + obj_ref_of c = capUntypedPtr c'" + by (cases c; simp) (rename_tac arch_cap, case_tac arch_cap, auto) + +lemma obj_size_relation: + "\ cap_relation c c'; capClass c' = PhysicalClass \ \ + obj_size c = capUntypedSize c'" + apply (cases c, simp_all add: objBits_simps' zbits_map_def + cte_level_bits_def + split: option.splits sum.splits) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp add: objBits_def AARCH64_H.capUntypedSize_def bit_simps') + done + +lemma same_region_as_relation: + "\ cap_relation c d; cap_relation c' d' \ \ + same_region_as c c' = sameRegionAs d d'" + apply (cases c) + apply clarsimp + apply (clarsimp simp: sameRegionAs_def isCap_simps Let_def is_phyiscal_relation) + apply (auto simp: obj_ref_of_relation obj_size_relation cong: conj_cong)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def bits_of_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps Let_def)[1] + apply simp + apply (cases c') + apply (clarsimp simp: same_arch_region_as_relation| + clarsimp simp: sameRegionAs_def isCap_simps Let_def)+ + done + +lemma can_be_is: + "\ cap_relation c (cteCap cte); cap_relation c' (cteCap cte'); + mdbRevocable (cteMDBNode cte) = r; + mdbFirstBadged (cteMDBNode cte') = r' \ \ + should_be_parent_of c r c' r' = isMDBParentOf cte cte'" + unfolding should_be_parent_of_def isMDBParentOf_def + apply (cases cte) + apply (rename_tac cap mdbnode) + apply (cases cte') + apply (rename_tac cap' mdbnode') + apply (clarsimp split del: if_split) + apply (case_tac "mdbRevocable mdbnode") + prefer 2 + apply simp + apply (clarsimp split del: if_split) + apply (case_tac "RetypeDecls_H.sameRegionAs cap cap'") + prefer 2 + apply (simp add: same_region_as_relation) + apply (simp add: same_region_as_relation split del: if_split) + apply (cases c, simp_all add: isCap_simps) + apply (cases c', auto simp: sameRegionAs_def Let_def isCap_simps)[1] + apply (cases c', auto simp: sameRegionAs_def isCap_simps is_cap_simps)[1] + apply (auto simp: Let_def)[1] + done + +lemma no_fail_getCTE [wp]: + "no_fail (cte_at' p) (getCTE p)" + apply (simp add: getCTE_def getObject_def split_def + loadObject_cte alignCheck_def unless_def + alignError_def is_aligned_mask[symmetric] + cong: kernel_object.case_cong) + apply (rule no_fail_pre, (wp | wpc)+) + apply (clarsimp simp: cte_wp_at'_def getObject_def + loadObject_cte split_def in_monad + dest!: in_singleton + split del: if_split) + apply (clarsimp simp: in_monad typeError_def objBits_simps + magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.split_asm + split del: if_split) + apply simp+ + done + +lemma tcb_cases_related: + "tcb_cap_cases ref = Some (getF, setF, restr) \ + \getF' setF'. (\x. tcb_cte_cases (cte_map (x, ref) - x) = Some (getF', setF')) + \ (\tcb tcb'. tcb_relation tcb tcb' \ cap_relation (getF tcb) (cteCap (getF' tcb')))" + by (simp add: tcb_cap_cases_def tcb_cnode_index_def to_bl_1 + cte_map_def' tcb_relation_def + split: if_split_asm) + +lemma pspace_relation_cte_wp_at: + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at ((=) c) (cref, oref) s; pspace_aligned' s'; + pspace_distinct' s' \ + \ cte_wp_at' (\cte. cap_relation c (cteCap cte)) (cte_map (cref, oref)) s'" + apply (simp add: cte_wp_at_cases) + apply (erule disjE) + apply clarsimp + apply (drule(1) pspace_relation_absD) + apply (simp add: unpleasant_helper) + apply (drule spec, drule mp, erule domI) + apply (clarsimp simp: cte_relation_def) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=cte]) + apply simp + apply (drule ko_at_imp_cte_wp_at') + apply (clarsimp elim!: cte_wp_at_weakenE') + apply clarsimp + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb]) + apply simp + apply (drule tcb_cases_related) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + done + +lemma pspace_relation_ctes_ofI: + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at ((=) c) slot s; pspace_aligned' s'; + pspace_distinct' s' \ + \ \cte. ctes_of s' (cte_map slot) = Some cte \ cap_relation c (cteCap cte)" + apply (cases slot, clarsimp) + apply (drule(3) pspace_relation_cte_wp_at) + apply (simp add: cte_wp_at_ctes_of) + done + +lemma get_cap_corres_P: + "corres (\x y. cap_relation x (cteCap y) \ P x) + (cte_wp_at P cslot_ptr) + (pspace_aligned' and pspace_distinct') + (get_cap cslot_ptr) (getCTE (cte_map cslot_ptr))" + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule cte_wp_at_norm) + apply (clarsimp simp: state_relation_def) + apply (drule (3) pspace_relation_ctes_ofI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (cases cslot_ptr) + apply (rename_tac oref cref) + apply (clarsimp simp: cte_wp_at_def) + apply (frule in_inv_by_hoareD[OF getCTE_inv]) + apply (drule use_valid [where P="\", OF _ getCTE_sp TrueI]) + apply (clarsimp simp: state_relation_def) + apply (drule pspace_relation_ctes_ofI) + apply (simp add: cte_wp_at_def) + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas get_cap_corres = get_cap_corres_P[where P="\", simplified] + +lemma cap_relation_masks: + "cap_relation c c' \ cap_relation + (cap_rights_update (cap_rights c \ rmask) c) + (RetypeDecls_H.maskCapRights (rights_mask_map rmask) c')" + apply (case_tac c, simp_all add: isCap_defs maskCapRights_def Let_def + rights_mask_map_def maskVMRights_def + AllowSend_def AllowRecv_def + cap_rights_update_def + split del: if_split) + apply (clarsimp simp add: isCap_defs) + by (rule ArchAcc_R.arch_cap_rights_update + [simplified, simplified rights_mask_map_def]) + +lemma getCTE_wp: + "\\s. cte_at' p s \ (\cte. cte_wp_at' ((=) cte) p s \ Q cte s)\ getCTE p \Q\" + apply (clarsimp simp add: getCTE_def valid_def cte_wp_at'_def) + apply (drule getObject_cte_det) + apply clarsimp + done + +lemma getCTE_ctes_of: + "\\s. ctes_of s p \ None \ P (the (ctes_of s p)) (ctes_of s)\ getCTE p \\rv s. P rv (ctes_of s)\" + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma getCTE_wp': + "\\s. \cte. cte_wp_at' ((=) cte) p s \ Q cte s\ getCTE p \Q\" + apply (clarsimp simp add: getCTE_def valid_def cte_wp_at'_def) + apply (drule getObject_cte_det) + apply clarsimp + done + +lemma getSlotCap_corres: + "cte_ptr' = cte_map cte_ptr \ + corres cap_relation + (cte_at cte_ptr) + (pspace_distinct' and pspace_aligned') + (get_cap cte_ptr) + (getSlotCap cte_ptr')" + apply (simp add: getSlotCap_def) + apply (subst bind_return [symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + done + +lemma maskCapRights [simp]: + "cap_relation c c' \ + cap_relation (mask_cap msk c) (maskCapRights (rights_mask_map msk) c')" + by (simp add: mask_cap_def cap_relation_masks) + +lemma maskCap_valid [simp]: + "s \' RetypeDecls_H.maskCapRights R cap = s \' cap" + by (clarsimp simp: valid_cap'_def maskCapRights_def isCap_simps + capAligned_def AARCH64_H.maskCapRights_def + split: capability.split arch_capability.split) + +lemma getSlotCap_valid_cap: + "\valid_objs'\ getSlotCap t \\r. valid_cap' r and cte_at' t\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_valid_cap | simp)+ + done + +lemmas getSlotCap_valid_cap1 [wp] = getSlotCap_valid_cap [THEN hoare_conjD1] +lemmas getSlotCap_valid_cap2 [wp] = getSlotCap_valid_cap [THEN hoare_conjD2] + +lemma resolveAddressBits_real_cte_at': + "\ valid_objs' and valid_cap' cap \ + resolveAddressBits cap addr depth + \\rv. real_cte_at' (fst rv)\, -" +proof (induct rule: resolveAddressBits.induct) + case (1 cap addr depth) + show ?case + apply (clarsimp simp: validE_def validE_R_def valid_def split: sum.split) + apply (subst (asm) resolveAddressBits.simps) + apply (simp only: Let_def split: if_split_asm) + prefer 2 + apply (simp add: in_monad) + apply (simp only: in_bindE_R K_bind_def) + apply (elim exE conjE) + apply (simp only: split: if_split_asm) + apply (clarsimp simp: in_monad locateSlot_conv stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (clarsimp simp add: valid_cap'_def objBits_simps' cte_level_bits_def + split: option.split_asm) + apply (simp only: in_bindE_R K_bind_def) + apply (elim exE conjE) + apply (simp only: cap_case_CNodeCap split: if_split_asm) + apply (drule_tac cap=nextCap in isCapDs(4), elim exE) + apply (simp only: in_bindE_R K_bind_def) + apply (frule (12) 1 [OF refl], (assumption | rule refl)+) + apply (clarsimp simp: in_monad locateSlot_conv objBits_simps stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (frule in_inv_by_hoareD [OF getSlotCap_inv]) + apply simp + apply (frule (1) post_by_hoare [OF getSlotCap_valid_cap]) + apply (simp add: valid_def validE_def validE_R_def) + apply (erule allE, erule impE, blast) + apply (drule (1) bspec) + apply simp + apply (clarsimp simp: in_monad locateSlot_conv objBits_simps stateAssert_def) + apply (cases cap) + apply (simp_all add: isCap_defs)[12] + apply (frule in_inv_by_hoareD [OF getSlotCap_inv]) + apply (clarsimp simp: valid_cap'_def cte_level_bits_def objBits_defs) + done +qed + +lemma resolveAddressBits_cte_at': + "\ valid_objs' and valid_cap' cap \ + resolveAddressBits cap addr depth + \\rv. cte_at' (fst rv)\, \\rv s. True\" + apply (fold validE_R_def) + apply (rule hoare_post_imp_R) + apply (rule resolveAddressBits_real_cte_at') + apply (erule real_cte_at') + done + +declare AllowSend_def[simp] +declare AllowRecv_def[simp] + +lemma cap_map_update_data: + assumes "cap_relation c c'" + shows "cap_relation (update_cap_data p x c) (updateCapData p x c')" +proof - + note simps = update_cap_data_def updateCapData_def word_size + isCap_defs is_cap_defs Let_def badge_bits_def + cap_rights_update_def badge_update_def + { fix x :: machine_word + define y where "y \ (x >> 6) && mask 58" (* guard_bits *) + define z where "z \ unat (x && mask 6)" (* cnode_guard_size_bits *) + have "of_bl (to_bl (y && mask z)) = (of_bl (replicate (64-z) False @ drop (64-z) (to_bl y))::machine_word)" + by (simp add: bl_and_mask) + then + have "y && mask z = of_bl (drop (64 - z) (to_bl y))" + apply simp + apply (subst test_bit_eq_iff [symmetric]) + apply (rule ext) + apply (clarsimp simp: test_bit_of_bl nth_append) + done + } note x = this + from assms + show ?thesis + apply (cases c) + apply (simp_all add: simps)[5] + defer + apply (simp_all add: simps)[4] + apply (clarsimp simp: simps the_arch_cap_def) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp add: simps arch_update_cap_data_def + AARCH64_H.updateCapData_def) + \ \CNodeCap\ + apply (simp add: simps word_bits_def the_cnode_cap_def andCapRights_def + rightsFromWord_def data_to_rights_def nth_ucast cteRightsBits_def cteGuardBits_def) + apply (insert x) + apply (subgoal_tac "unat (x && mask 6) < unat (2^6::machine_word)") + prefer 2 + apply (fold word_less_nat_alt)[1] + apply (rule and_mask_less_size) + apply (simp add: word_size) + apply (simp add: word_bw_assocs cnode_padding_bits_def cnode_guard_size_bits_def) + done +qed + + +lemma cte_map_shift: + assumes bl: "to_bl cref' = zs @ cref" + assumes pre: "guard \ cref" + assumes len: "cbits + length guard \ length cref" + assumes aligned: "is_aligned ptr (5 + cbits)" (* cte_level_bits *) + assumes cbits: "cbits \ word_bits - cte_level_bits" + shows + "ptr + 32 * ((cref' >> length cref - (cbits + length guard)) && mask cbits) = \ \2^cte_level_bits\ + cte_map (ptr, take cbits (drop (length guard) cref))" +proof - + let ?l = "length cref - (cbits + length guard)" + from pre obtain xs where + xs: "cref = guard @ xs" by (auto simp: prefix_def less_eq_list_def) + hence len_c: "length cref = length guard + length xs" by simp + with len have len_x: "cbits \ length xs" by simp + + from bl xs + have cref': "to_bl cref' = zs @ guard @ xs" by simp + hence "length (to_bl cref') = length \" by simp + hence 64: "64 = length zs + length guard + length xs" by simp + + have len_conv [simp]: "size ptr = word_bits" + by (simp add: word_size word_bits_def) + + have "to_bl ((cref' >> ?l) && mask cbits) = (replicate (64 - cbits) False) @ + drop (64 - cbits) (to_bl (cref' >> ?l))" + by (simp add: bl_shiftl word_size bl_and_mask + cte_level_bits_def word_bits_def) + also + from len_c len_x cref' 64 + have "\ = (replicate (64 - cbits) False) @ take cbits xs" + by (simp add: bl_shiftr word_size add.commute add.left_commute) + also + from len_x len_c 64 + have "\ = to_bl (of_bl (take cbits (drop (length guard) cref)) :: machine_word)" + by (simp add: xs word_rev_tf takefill_alt rev_take rev_drop) + + finally + show ?thesis + by (simp add: cte_map_def') +qed + +lemma cte_map_shift': + "\ to_bl cref' = zs @ cref; guard \ cref; length cref = cbits + length guard; + is_aligned ptr (5 + cbits); cbits \ word_bits - cte_level_bits \ \ + ptr + 32 * (cref' && mask cbits) = cte_map (ptr, drop (length guard) cref)" + by (auto dest: cte_map_shift) + +lemma cap_relation_Null2 [simp]: + "cap_relation c NullCap = (c = cap.NullCap)" + by (cases c) auto + +lemmas cnode_cap_case_if = cap_case_CNodeCap + +lemma corres_stateAssert_assume_stronger: + "\ corres_underlying sr nf nf' r P Q f (g ()); + \s s'. \ (s, s') \ sr; P s; Q s' \ \ P' s' \ \ + corres_underlying sr nf nf' r P Q f (stateAssert P' [] >>= g)" + apply (clarsimp simp: bind_assoc stateAssert_def) + apply (rule corres_symb_exec_r [OF _ get_sp]) + apply (rule_tac F="P' x" in corres_req) + apply clarsimp + apply (auto elim: corres_guard_imp)[1] + apply wp+ + done + +lemma cap_table_at_gsCNodes: + "cap_table_at bits ptr s \ (s, s') \ state_relation + \ gsCNodes s' ptr = Some bits" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply blast + done + +lemma rab_corres': + "\ cap_relation (fst a) c'; drop (64-bits) (to_bl cref') = snd a; + bits = length (snd a) \ \ + corres (lfr \ (\(cte, bits) (cte', bits'). + cte' = cte_map cte \ length bits = bits')) + (valid_objs and pspace_aligned and valid_cap (fst a)) + (valid_objs' and pspace_distinct' and pspace_aligned' and valid_cap' c') + (resolve_address_bits a) + (resolveAddressBits c' cref' bits)" +unfolding resolve_address_bits_def +proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) + case (1 z cap cref) + show ?case + proof (cases "isCNodeCap c'") + case True + with "1.prems" + obtain ptr guard cbits where caps: + "cap = cap.CNodeCap ptr cbits guard" + "c' = CNodeCap ptr cbits (of_bl guard) (length guard)" + apply (cases cap, simp_all add: isCap_defs) + apply auto + done + with "1.prems" + have IH: "\vd vc c' f' cref' bits. + \ cbits + length guard \ 0; \ length cref < cbits + length guard; guard \ cref; + vc = drop (cbits + length guard) cref; vc \ []; vd \ cap.NullCap; + cap_relation vd c'; bits = length vc; is_cnode_cap vd; + drop (64 - bits) (to_bl cref') = vc \ + \ corres (lfr \ (\(cte, bits) (cte', bits'). + cte' = cte_map cte \ length bits = bits')) + (valid_objs and pspace_aligned and (\s. s \ fst (vd,vc))) + (valid_objs' and pspace_distinct' and pspace_aligned' and (\s. s \' c')) + (resolve_address_bits' z (vd, vc)) + (CSpace_H.resolveAddressBits c' cref' bits)" + apply - + apply (rule "1.hyps" [of _ cbits guard, OF caps(1)]) + prefer 7 + apply (clarsimp simp: in_monad) + apply (rule get_cap_success) + apply (auto simp: in_monad intro!: get_cap_success) (* takes time *) + done + note if_split [split del] + { assume "cbits + length guard = 0 \ cbits = 0 \ guard = []" + hence ?thesis + apply (simp add: caps isCap_defs + resolveAddressBits.simps resolve_address_bits'.simps) + apply (rule corres_fail) + apply (clarsimp simp: valid_cap_def) + done + } + moreover + { assume "cbits + length guard \ 0 \ \(cbits = 0 \ guard = [])" + hence [simp]: "((cbits + length guard = 0) = False) \ + ((cbits = 0 \ guard = []) = False) \ + (0 < cbits \ guard \ []) " by simp + from "1.prems" + have ?thesis + apply - + apply (rule corres_assume_pre) + apply (subgoal_tac "is_aligned ptr (5 + cbits) \ cbits \ word_bits - cte_level_bits") (*cte_level_bits *) + prefer 2 + apply (clarsimp simp: caps) + apply (erule valid_CNodeCapE) + apply fastforce + apply fastforce + apply (fastforce simp: word_bits_def cte_level_bits_def) + apply (thin_tac "t \ state_relation" for t) + apply (erule conjE) + apply (subst resolveAddressBits.simps) + apply (subst resolve_address_bits'.simps) + apply (simp add: caps isCap_defs Let_def) + apply (simp add: linorder_not_less drop_postfix_eq) + apply (simp add: liftE_bindE[where a="locateSlotCap a b" for a b]) + apply (simp add: locateSlot_conv) + apply (rule corres_stateAssert_assume_stronger[rotated]) + apply (clarsimp simp: valid_cap_def cap_table_at_gsCNodes isCap_simps) + apply (rule and_mask_less_size, simp add: word_bits_def word_size cte_level_bits_def) + apply (erule exE) + apply (cases "guard \ cref") + prefer 2 + apply (clarsimp simp: guard_mask_shift lookup_failure_map_def unlessE_whenE) + apply (clarsimp simp: guard_mask_shift unlessE_whenE) + apply (cases "length cref < cbits + length guard") + apply (simp add: lookup_failure_map_def) + apply simp + apply (cases "length cref = cbits + length guard") + apply clarsimp + apply (rule corres_noopE) + prefer 2 + apply wp + apply wp + apply (clarsimp simp: objBits_simps cte_level_bits_def) + apply (erule (2) valid_CNodeCapE) + apply (erule (3) cte_map_shift') + apply simp + apply simp + apply (subgoal_tac "cbits + length guard < length cref"; simp) + apply (rule corres_initial_splitE) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule getSlotCap_corres) + apply (simp add: objBits_simps cte_level_bits_def) + apply (erule (1) cte_map_shift) + apply simp + apply assumption + apply (simp add: cte_level_bits_def) + apply clarsimp + apply (clarsimp simp: valid_cap_def) + apply (erule cap_table_at_cte_at) + apply simp + apply clarsimp + apply (case_tac "is_cnode_cap rv") + prefer 2 + apply (simp add: cnode_cap_case_if) + apply (rule corres_noopE) + prefer 2 + apply (rule no_fail_pre, rule no_fail_returnOK) + apply (rule TrueI) + prefer 2 + apply (simp add: unlessE_whenE cnode_cap_case_if) + apply (rule IH, (simp_all)[9]) + apply clarsimp + apply (drule postfix_dropD) + apply clarsimp + apply (subgoal_tac "64 + (cbits + length guard) - length cref = (cbits + length guard) + (64 - length cref)") + prefer 2 + apply (drule len_drop_lemma) + apply simp + apply arith + apply simp + apply (subst drop_drop [symmetric]) + apply simp + apply wp + apply (clarsimp simp: objBits_simps cte_level_bits_def) + apply (erule (1) cte_map_shift) + apply simp + apply assumption + apply (simp add: cte_level_bits_def) + apply (wp get_cap_wp) + apply clarsimp + apply (erule (1) cte_wp_valid_cap) + apply wpsimp + done + } + ultimately + show ?thesis by fast + next + case False + with "1.prems" + show ?thesis + by (cases cap) + (auto simp: resolve_address_bits'.simps resolveAddressBits.simps + isCap_defs lookup_failure_map_def) + qed +qed + +lemma getThreadCSpaceRoot: + "getThreadCSpaceRoot t = return t" + by (simp add: getThreadCSpaceRoot_def locateSlot_conv + tcbCTableSlot_def) + +lemma getThreadVSpaceRoot: + "getThreadVSpaceRoot t = return (t+2^cteSizeBits)" (*2^cte_level_bits*) + by (simp add: getThreadVSpaceRoot_def locateSlot_conv objBits_simps' + tcbVTableSlot_def shiftl_t2n cte_level_bits_def) + +lemma getSlotCap_tcb_corres: + "corres (\t c. cap_relation (tcb_ctable t) c) + (tcb_at t and valid_objs and pspace_aligned) + (pspace_distinct' and pspace_aligned') + (gets_the (get_tcb t)) + (getSlotCap t)" + (is "corres ?r ?P ?Q ?f ?g") + using get_cap_corres [where cslot_ptr = "(t, tcb_cnode_index 0)"] + apply (simp add: getSlotCap_def liftM_def[symmetric]) + apply (drule corres_guard_imp [where P="?P" and P'="?Q"]) + apply (clarsimp simp: cte_at_cases tcb_at_def + dest!: get_tcb_SomeD) + apply simp + apply (subst(asm) corres_cong [OF refl refl gets_the_tcb_get_cap[symmetric] refl refl]) + apply simp + apply (simp add: o_def cte_map_def tcb_cnode_index_def) + done + +lemma lookupSlotForThread_corres: + "corres (lfr \ (\(cref, bits) cref'. cref' = cte_map cref)) + (valid_objs and pspace_aligned and tcb_at t) + (valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t) + (lookup_slot_for_thread t (to_bl cptr)) + (lookupSlotForThread t cptr)" + apply (unfold lookup_slot_for_thread_def lookupSlotForThread_def) + apply (simp add: const_def) + apply (simp add: getThreadCSpaceRoot) + apply (fold returnOk_liftE) + apply simp + apply (rule corres_initial_splitE) + apply (subst corres_liftE_rel_sum) + apply (rule corres_guard_imp) + apply (rule getSlotCap_tcb_corres) + apply simp + apply simp + apply (subst bindE_returnOk[symmetric]) + apply (rule corres_initial_splitE) + apply (rule rab_corres') + apply simp + apply (simp add: word_size) + apply simp + apply (clarsimp simp: word_size) + prefer 4 + apply wp + apply clarsimp + apply (erule (1) objs_valid_tcb_ctable) + prefer 4 + apply wp + apply clarsimp + apply simp + prefer 2 + apply (rule hoare_vcg_precond_impE) + apply (rule resolve_address_bits_cte_at [unfolded validE_R_def]) + apply clarsimp + prefer 2 + apply (rule hoare_vcg_precond_impE) + apply (rule resolveAddressBits_cte_at') + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (simp add: returnOk_def split_def) + done + +lemmas rab_cte_at' [wp] = resolveAddressBits_cte_at' [folded validE_R_def] + +lemma lookupSlot_cte_at_wp[wp]: + "\valid_objs'\ lookupSlotForThread t addr \\rv. cte_at' rv\, \\r. \\" + apply (simp add: lookupSlotForThread_def) + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv tcbCTableSlot_def) + apply (wp | simp add: split_def)+ + done + +lemma lookupSlot_inv[wp]: + "\P\ lookupSlotForThread t addr \\_. P\" + apply (simp add: lookupSlotForThread_def) + apply (simp add: getThreadCSpaceRoot_def locateSlot_conv tcbCTableSlot_def) + apply (wp | simp add: split_def)+ + done + +lemma lookupCap_corres: + "corres (lfr \ cap_relation) + (valid_objs and pspace_aligned and tcb_at t) + (valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t) + (lookup_cap t (to_bl ref)) (lookupCap t ref)" + apply (simp add: lookup_cap_def lookupCap_def bindE_assoc + lookupCapAndSlot_def liftME_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForThread_corres]) + apply (simp add: split_def getSlotCap_def liftM_def[symmetric] o_def) + apply (rule get_cap_corres) + apply (rule hoare_pre, wp lookup_slot_cte_at_wp|simp)+ + done + +lemma setObject_cte_obj_at_tcb': + assumes x: "\tcb f. P (tcbCTable_update f tcb) = P tcb" + "\tcb f. P (tcbVTable_update f tcb) = P tcb" + "\tcb f. P (tcbReply_update f tcb) = P tcb" + "\tcb f. P (tcbCaller_update f tcb) = P tcb" + "\tcb f. P (tcbIPCBufferFrame_update f tcb) = P tcb" + shows + "\\s. P' (obj_at' (P :: tcb \ bool) p s)\ + setObject c (cte::cte) + \\_ s. P' (obj_at' P p s)\" + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd) + apply (clarsimp elim!: rsubst[where P=P']) + apply (clarsimp simp: updateObject_cte in_monad objBits_simps + tcbCTableSlot_def tcbVTableSlot_def x + typeError_def + split: if_split_asm + Structures_H.kernel_object.split_asm) + done + +lemma setCTE_typ_at': + "\\s. P (typ_at' T p s)\ setCTE c cte \\_ s. P (typ_at' T p s)\" + by (clarsimp simp add: setCTE_def) (wp setObject_typ_at') + +lemmas setObject_typ_at [wp] = setObject_typ_at' [where P=id, simplified] + +lemma setCTE_typ_at [wp]: + "\typ_at' T p\ setCTE c cte \\_. typ_at' T p\" + by (clarsimp simp add: setCTE_def) wp + +lemmas setCTE_typ_ats [wp] = typ_at_lifts [OF setCTE_typ_at'] + +lemma setObject_cte_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ setObject ptr (cte::cte) \\_ s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma setCTE_tcb_in_cur_domain': + "\tcb_in_cur_domain' t\ + setCTE c cte + \\_. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def setCTE_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp)+ + done + +lemma setCTE_ctes_of_wp [wp]: + "\\s. P ((ctes_of s) (p \ cte))\ + setCTE p cte + \\rv s. P (ctes_of s)\" + by (simp add: setCTE_def ctes_of_setObject_cte) + +lemma setCTE_weak_cte_wp_at: + "\\s. (if p = ptr then P (cteCap cte) else cte_wp_at' (\c. P (cteCap c)) p s)\ + setCTE ptr cte + \\uu s. cte_wp_at'(\c. P (cteCap c)) p s\" + apply (simp add: cte_wp_at_ctes_of) + apply wp + apply clarsimp + done + +lemma updateMDB_weak_cte_wp_at: + "\cte_wp_at' (\c. P (cteCap c)) p\ + updateMDB m f + \\uu. cte_wp_at'(\c. P (cteCap c)) p\" + unfolding updateMDB_def + apply simp + apply safe + apply (wp setCTE_weak_cte_wp_at getCTE_wp) + apply (auto simp: cte_wp_at'_def) + done + +lemma cte_wp_at_extract': + "\ cte_wp_at' (\c. c = x) p s; cte_wp_at' P p s \ \ P x" + by (clarsimp simp: cte_wp_at_ctes_of) + +lemmas setCTE_valid_objs = setCTE_valid_objs' + +lemma capFreeIndex_update_valid_cap': + "\fa \ fb; fb \ 2 ^ bits; is_aligned (of_nat fb :: machine_word) minUntypedSizeBits; + s \' capability.UntypedCap d v bits fa\ + \ s \' capability.UntypedCap d v bits fb" + apply (clarsimp simp:valid_cap'_def capAligned_def valid_untyped'_def ko_wp_at'_def) + apply (intro conjI impI allI) + apply (elim allE) + apply (erule(1) impE)+ + apply (erule disjE) + apply simp_all + apply (rule disjI1) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp) + apply (rule word_plus_mono_right) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + apply (subst word_bits_def[symmetric]) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp + apply simp + apply (subst word_bits_def[symmetric]) + apply (erule le_less_trans) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp+ + apply (erule is_aligned_no_wrap') + apply (rule word_of_nat_less) + apply simp + apply (erule allE)+ + apply (erule(1) impE)+ + apply simp + done + +lemma maxFreeIndex_update_valid_cap'[simp]: + "s \' capability.UntypedCap d v0a v1a fa \ + s \' capability.UntypedCap d v0a v1a (maxFreeIndex v1a)" + apply (rule capFreeIndex_update_valid_cap'[rotated -1]) + apply assumption + apply (clarsimp simp: valid_cap'_def capAligned_def ko_wp_at'_def + maxFreeIndex_def shiftL_nat)+ + apply (erule is_aligned_weaken[OF is_aligned_triv]) + done + +lemma ctes_of_valid_cap'': + "\ ctes_of s p = Some r; valid_objs' s\ \ s \' (cteCap r)" + apply (rule cte_wp_at_valid_objs_valid_cap'[where P="(=) r", simplified]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma cap_insert_objs' [wp]: + "\valid_objs' + and valid_cap' cap\ + cteInsert cap src dest \\rv. valid_objs'\" + including no_pre + apply (simp add: cteInsert_def updateCap_def setUntypedCapAsFull_def bind_assoc split del: if_split) + apply (wp setCTE_valid_objs) + apply simp + apply wp+ + apply (clarsimp simp: updateCap_def) + apply (wp|simp)+ + apply (rule hoare_drop_imp)+ + apply wp+ + apply (rule hoare_strengthen_post[OF getCTE_sp]) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + dest!: ctes_of_valid_cap'') + done + +lemma cteInsert_weak_cte_wp_at: + "\\s. if p = dest then P cap else p \ src \ + cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at'(\c. P (cteCap c)) p\" + unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def + apply (simp add: bind_assoc split del: if_split) + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ + apply (wp getCTE_ctes_wp)+ + apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ +done + +lemma setCTE_valid_cap: + "\valid_cap' c\ setCTE ptr cte \\r. valid_cap' c\" + by (rule typ_at_lifts, rule setCTE_typ_at') + +lemma updateMDB_valid_cap: + "\valid_cap' c\ updateMDB ptr f \\_. valid_cap' c\" + unfolding updateMDB_def + apply simp + apply rule + apply (wp setCTE_valid_cap) + done + +lemma set_is_modify: + "m p = Some cte \ + m (p \ cteMDBNode_update (\_. (f (cteMDBNode cte))) cte) = + m (p \ cteMDBNode_update f cte)" + apply (case_tac cte) + apply (rule ext) + apply clarsimp + done + +lemma updateMDB_ctes_of_wp: + "\\s. (p \ 0 \ P (modify_map (ctes_of s) p (cteMDBNode_update f))) \ + (p = 0 \ P (ctes_of s))\ + updateMDB p f + \\rv s. P (ctes_of s)\" + apply (simp add: updateMDB_def) + apply safe + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (simp add: modify_map_def set_is_modify) + done + +lemma updateMDB_ctes_of_no_0 [wp]: + "\\s. no_0 (ctes_of s) \ + P (modify_map (ctes_of s) p (cteMDBNode_update f))\ + updateMDB p f + \\rv s. P (ctes_of s)\" + by (wp updateMDB_ctes_of_wp) clarsimp + +lemma updateMDB_no_0 [wp]: + "\\s. no_0 (ctes_of s)\ + updateMDB p f + \\rv s. no_0 (ctes_of s)\" + by wp simp + +lemma isMDBParentOf_next_update [simp]: + "isMDBParentOf (cteMDBNode_update (mdbNext_update f) cte) cte' = + isMDBParentOf cte cte'" + "isMDBParentOf cte (cteMDBNode_update (mdbNext_update f) cte') = + isMDBParentOf cte cte'" + apply (cases cte) + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (cases cte') + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma isMDBParentOf_next_update_cte [simp]: + "isMDBParentOf (CTE cap (mdbNext_update f node)) cte' = + isMDBParentOf (CTE cap node) cte'" + "isMDBParentOf cte (CTE cap (mdbNext_update f node)) = + isMDBParentOf cte (CTE cap node)" + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma valid_mdbD1': + "\ ctes_of s p = Some cte; mdbNext (cteMDBNode cte) \ 0; valid_mdb' s \ \ + \c. ctes_of s (mdbNext (cteMDBNode cte)) = Some c \ mdbPrev (cteMDBNode c) = p" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def valid_dlist_def Let_def) + +lemma valid_mdbD2': + "\ ctes_of s p = Some cte; mdbPrev (cteMDBNode cte) \ 0; valid_mdb' s \ \ + \c. ctes_of s (mdbPrev (cteMDBNode cte)) = Some c \ mdbNext (cteMDBNode c) = p" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def valid_dlist_def Let_def) + +lemma prev_next_update: + "cteMDBNode_update (mdbNext_update f) (cteMDBNode_update (mdbPrev_update f') x) = + cteMDBNode_update (mdbPrev_update f') (cteMDBNode_update (mdbNext_update f) x)" + apply (cases x) + apply (rename_tac cap mdbnode) + apply (case_tac mdbnode) + apply simp + done + +lemmas modify_map_prev_next_up [simp] = + modify_map_com [where f="cteMDBNode_update (mdbNext_update f)" and + g="cteMDBNode_update (mdbPrev_update f')" for f f', + OF prev_next_update] + +lemma update_prev_next_trancl: + assumes nxt: "m \ x \\<^sup>+ y" + shows "(modify_map m ptr (cteMDBNode_update (mdbPrev_update z))) \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ cteMDBNode_update (mdbPrev_update z) cte)" + + from nxt have "?m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some + by - (rule r_into_trancl, clarsimp simp: next_unfold') + next + case (step q r) + show ?case + proof (rule trancl_into_trancl) + show "?m \ q \ r" using step(2) Some + by (simp only: mdb_next_update, clarsimp simp: next_unfold') + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma update_prev_next_trancl2: + assumes nxt: "(modify_map m ptr (cteMDBNode_update (mdbPrev_update z))) \ x \\<^sup>+ y" + shows "m \ x \\<^sup>+ y" +proof (cases "m ptr") + case None + thus ?thesis using nxt + by (simp add: modify_map_def) +next + case (Some cte) + let ?m = "m(ptr \ cteMDBNode_update (mdbPrev_update z) cte)" + + from nxt have "m \ x \\<^sup>+ y" + proof induct + case (base y) + thus ?case using Some + by (fastforce simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + next + case (step q r) + show ?case + proof + show "m \ q \ r" using step(2) Some + by (auto simp: modify_map_def mdb_next_update next_unfold' split: if_split_asm) + qed fact+ + qed + thus ?thesis using Some + by (simp add: modify_map_def) +qed + +lemma next_update_lhs: + "(m(p \ cte) \ p \ x) = (x = mdbNext (cteMDBNode cte))" + by (auto simp: mdb_next_update) + +lemma next_update_lhs_trancl: + assumes np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + shows "(m(p \ cte) \ p \\<^sup>+ x) = (m \ mdbNext (cteMDBNode cte) \\<^sup>* x)" +proof + assume "m(p \ cte) \ p \\<^sup>+ x" + thus "m \ mdbNext (cteMDBNode cte) \\<^sup>* x" + proof (cases rule: tranclE2') + case base + thus ?thesis + by (simp add: next_update_lhs) + next + case (trancl q) + hence "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>+ x" + by (simp add: next_update_lhs) + thus ?thesis + by (rule trancl_into_rtrancl [OF mdb_trancl_update_other]) fact+ + qed +next + assume "m \ mdbNext (cteMDBNode cte) \\<^sup>* x" + hence "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>* x" + by (rule mdb_rtrancl_other_update) fact+ + moreover + have "m(p \ cte) \ p \ mdbNext (cteMDBNode cte)" by (simp add: next_update_lhs) + ultimately show "m(p \ cte) \ p \\<^sup>+ x" by simp +qed + +lemma next_update_lhs_rtrancl: + assumes np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + shows "(m(p \ cte) \ p \\<^sup>* x) = (p = x \ m \ mdbNext (cteMDBNode cte) \\<^sup>* x)" + apply rule + apply (erule next_rtrancl_tranclE) + apply (auto simp add: next_update_lhs_trancl [OF np, symmetric]) + done + +definition + cte_mdb_prop :: "(machine_word \ cte) \ machine_word \ (mdbnode \ bool) \ bool" +where + "cte_mdb_prop m p P \ (\cte. m p = Some cte \ P (cteMDBNode cte))" + +lemma cte_mdb_prop_no_0: + "\ no_0 m; cte_mdb_prop m p P \ \ p \ 0" + unfolding cte_mdb_prop_def no_0_def + by auto + +lemma mdb_chain_0_modify_map_prev: + "mdb_chain_0 m \ mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbPrev_update f)))" + unfolding mdb_chain_0_def + apply rule + apply (rule update_prev_next_trancl) + apply (clarsimp simp: modify_map_def dom_def split: option.splits if_split_asm) + done + +lemma mdb_chain_0_modify_map_next: + assumes chain: "mdb_chain_0 m" + and no0: "no_0 m" + and dom: "target \ dom m" + and npath: "\ m \ target \\<^sup>* ptr" + shows + "mdb_chain_0 (modify_map m ptr (cteMDBNode_update (mdbNext_update (\_. target))))" + (is "mdb_chain_0 ?M") + unfolding mdb_chain_0_def +proof + fix x + assume "x \ dom ?M" + hence xd: "x \ dom m" + by (clarsimp simp: modify_map_def dom_def split: if_split_asm) + hence x0: "m \ x \\<^sup>+ 0" using chain unfolding mdb_chain_0_def by simp + + from dom have t0: "m \ target \\<^sup>+ 0" + using chain unfolding mdb_chain_0_def by simp + + show "?M \ x \\<^sup>+ 0" + proof (cases "m ptr") + case None + thus ?thesis + by (simp add: modify_map_def) (rule x0) + next + case (Some cte) + show ?thesis + proof (cases "m \ x \\<^sup>* ptr") + case False + thus ?thesis + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF x0]) + done + next + case True + hence "?M \ x \\<^sup>* ptr" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule next_rtrancl_tranclE) + apply simp + apply (rule trancl_into_rtrancl) + apply (erule no_loops_upd_last [OF mdb_chain_0_no_loops [OF chain no0]]) + done + moreover have "?M \ ptr \ target" + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (simp add: mdb_next_update) + done + moreover have "?M \ target \\<^sup>+ 0" using t0 + apply (subst next_update_is_modify [symmetric, OF _ refl]) + apply (rule Some) + apply (erule mdb_trancl_other_update [OF _ npath]) + done + ultimately show ?thesis by simp + qed + qed +qed + +lemma mdb_chain_0D: + "\ mdb_chain_0 m; p \ dom m \ \ m \ p \\<^sup>+ 0" + unfolding mdb_chain_0_def by auto + +lemma mdb_chain_0_nextD: + "\ mdb_chain_0 m; m p = Some cte \ \ m \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + apply (drule mdb_chain_0D) + apply (erule domI) + apply (erule tranclE2) + apply (simp add: next_unfold') + apply (simp add: next_unfold') + done + +lemma null_mdb_no_next: + "\ valid_dlist m; no_0 m; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ x \ target" + unfolding cte_mdb_prop_def + by (auto elim: valid_dlistE elim!: valid_mdb_ctesE + simp: nullPointer_def no_0_def next_unfold') + +lemma null_mdb_no_trancl: + "\ valid_dlist m; no_0 m; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ x \\<^sup>+ target" + by (auto dest: null_mdb_no_next elim: tranclE) + +lemma null_mdb_no_next2: + "\ no_0 m; x \ 0; + cte_mdb_prop m target (\m. mdbPrev m = nullPointer \ mdbNext m = nullPointer) \ + \ \ m \ target \ x" + unfolding cte_mdb_prop_def + by (auto elim!: valid_mdb_ctesE simp: nullPointer_def no_0_def next_unfold') + +definition + "capASID cap \ case cap of + ArchObjectCap (FrameCap _ _ _ _ (Some (asid, _))) \ Some asid + | ArchObjectCap (PageTableCap _ _ (Some (asid, _))) \ Some asid + | _ \ None" + +lemmas capASID_simps [simp] = + capASID_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "cap_asid_base' cap \ case cap of + ArchObjectCap (ASIDPoolCap _ asid) \ Some asid + | _ \ None" + +lemmas cap_asid_base'_simps [simp] = + cap_asid_base'_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "cap_vptr' cap \ case cap of + ArchObjectCap (FrameCap _ _ _ _ (Some (_, vptr))) \ Some vptr + | ArchObjectCap (PageTableCap _ _ (Some (_, vptr))) \ Some vptr + | _ \ None" + +lemmas cap_vptr'_simps [simp] = + cap_vptr'_def [split_simps capability.split arch_capability.split option.split prod.split] + +definition + "weak_derived' cap cap' \ + capMasterCap cap = capMasterCap cap' \ + capBadge cap = capBadge cap' \ + capASID cap = capASID cap' \ + cap_asid_base' cap = cap_asid_base' cap' \ + cap_vptr' cap = cap_vptr' cap' \ + \ \check all fields of ReplyCap except capReplyCanGrant\ + (isReplyCap cap \ capTCBPtr cap = capTCBPtr cap' \ + capReplyMaster cap = capReplyMaster cap')" + +lemma capASID_update [simp]: + "capASID (RetypeDecls_H.updateCapData P x c) = capASID c" + unfolding capASID_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma cap_vptr_update' [simp]: + "cap_vptr' (RetypeDecls_H.updateCapData P x c) = cap_vptr' c" + unfolding capASID_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma cap_asid_base_update' [simp]: + "cap_asid_base' (RetypeDecls_H.updateCapData P x c) = cap_asid_base' c" + unfolding cap_asid_base'_def + apply (cases c, simp_all add: updateCapData_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: updateCapData_def + AARCH64_H.updateCapData_def + isCap_simps Let_def) + done + +lemma updateCapData_Master: + "updateCapData P d cap \ NullCap \ + capMasterCap (updateCapData P d cap) = capMasterCap cap" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + split: if_split_asm) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all add: AARCH64_H.updateCapData_def) + done + +lemma updateCapData_Reply: + "isReplyCap (updateCapData P x c) = isReplyCap c" + apply (cases "updateCapData P x c = NullCap") + apply (clarsimp simp: isCap_simps) + apply (simp add: updateCapData_def isCap_simps Let_def) + apply (drule updateCapData_Master) + apply (rule master_eqI, rule isCap_Master) + apply simp + done + +lemma weak_derived_updateCapData: + "\ (updateCapData P x c) \ NullCap; weak_derived' c c'; + capBadge (updateCapData P x c) = capBadge c' \ + \ weak_derived' (updateCapData P x c) c'" + apply (clarsimp simp add: weak_derived'_def updateCapData_Master) + apply (clarsimp elim: impE dest!: iffD1[OF updateCapData_Reply]) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: Let_def isCap_simps updateCapData_def) + done + +lemma maskCapRights_Reply[simp]: + "isReplyCap (maskCapRights r c) = isReplyCap c" + apply (insert capMasterCap_maskCapRights) + apply (rule master_eqI, rule isCap_Master) + apply simp + done + +lemma capASID_mask [simp]: + "capASID (maskCapRights x c) = capASID c" + unfolding capASID_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma cap_vptr_mask' [simp]: + "cap_vptr' (maskCapRights x c) = cap_vptr' c" + unfolding cap_vptr'_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma cap_asid_base_mask' [simp]: + "cap_asid_base' (maskCapRights x c) = cap_asid_base' c" + unfolding cap_vptr'_def + apply (cases c, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemmas cteInsert_valid_objs = cap_insert_objs' + +lemma subtree_not_Null: + assumes null: "m p = Some (CTE capability.NullCap node)" + assumes sub: "m \ c \ p" + shows "False" using sub null + by induct (auto simp: parentOf_def) + +lemma Null_not_subtree: + assumes null: "m c = Some (CTE capability.NullCap node)" + assumes sub: "m \ c \ p" + shows "False" using sub null + by induct (auto simp: parentOf_def) + +lemma subtree_Null_update: + assumes "no_0 m" "valid_dlist m" + assumes null: "m p = Some (CTE NullCap node)" + assumes node: "mdbPrev node = 0" + assumes init: "mdbNext (cteMDBNode cte) = 0" + shows "m \ c \ c' = m (p \ cte) \ c \ c'" +proof + assume "m \ c \ c'" + thus "m (p \ cte) \ c \ c'" using null init + proof induct + case direct_parent + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold parentOf_def) + apply assumption + apply (simp add: parentOf_def) + apply (rule conjI) + apply clarsimp + apply clarsimp + done + next + case (trans_parent y z) + have "m \ c \ y" "m \ y \ z" "z \ 0" "m \ c parentOf z" by fact+ + with trans_parent.prems + show ?case + apply - + apply (rule subtree.trans_parent) + apply (erule (1) trans_parent.hyps) + apply (clarsimp simp: mdb_next_unfold parentOf_def) + apply (drule (1) subtree_not_Null) + apply simp + apply assumption + apply (fastforce simp: parentOf_def) + done + qed +next + assume m: "m (p \ cte) \ c \ c'" + thus "m \ c \ c'" using assms m + proof induct + case (direct_parent x) + thus ?case + apply - + apply (cases "c=p") + apply (clarsimp simp: mdb_next_unfold) + apply (rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (cases "p\x") + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z) + apply clarsimp + apply (clarsimp simp: no_0_def valid_dlist_def Let_def) + apply (erule_tac x=c in allE) + apply clarsimp + done + next + case (trans_parent x y) + have "m(p \ cte) \ c \ x" "m(p \ cte) \ x \ y" + "y \ 0" "m(p \ cte) \ c parentOf y" by fact+ + with trans_parent.prems + show ?case + apply - + apply (cases "p=x") + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (frule (5) trans_parent.hyps) + apply (rule subtree.trans_parent) + apply assumption + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def simp del: fun_upd_apply) + apply (cases "p=y") + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (clarsimp simp: valid_dlist_def Let_def) + apply (erule_tac x=x in allE) + apply (clarsimp simp: no_0_def) + apply (case_tac "p\c") + apply clarsimp + apply clarsimp + apply (erule (1) Null_not_subtree) + done + qed +qed + + +corollary descendants_of_Null_update': + assumes "no_0 m" "valid_dlist m" + assumes "m p = Some (CTE NullCap node)" + assumes "mdbPrev node = 0" + assumes "mdbNext (cteMDBNode cte) = 0" + shows "descendants_of' c (m (p \ cte)) = descendants_of' c m" using assms + by (simp add: descendants_of'_def subtree_Null_update [symmetric]) + +lemma ps_clear_32: + "\ ps_clear p tcbBlockSizeBits s; is_aligned p tcbBlockSizeBits \ + \ ksPSpace s (p + 2^cteSizeBits) = None" + apply (simp add: ps_clear_def) + apply (drule equals0D[where a="p + 2^cteSizeBits"]) + apply (simp add: dom_def field_simps objBits_defs) + apply (erule impE) + apply (rule word_plus_mono_right) + apply (simp add: mask_def) + apply (erule is_aligned_no_wrap') + apply (simp add: mask_def) + apply (erule mp) + apply (erule is_aligned_no_wrap') + apply simp + done + +lemma cte_at_cte_map_in_obj_bits: + "\ cte_at p s; pspace_aligned s; valid_objs s \ + \ cte_map p \ {fst p .. fst p + 2 ^ (obj_bits (the (kheap s (fst p)))) - 1} + \ kheap s (fst p) \ None" + apply (simp add: cte_at_cases) + apply (elim disjE conjE exE) + apply (clarsimp simp: well_formed_cnode_n_def) + apply (drule(1) pspace_alignedD[rotated]) + apply (erule(1) valid_objsE) + apply (frule arg_cong[where f="\S. snd p \ S"]) + apply (simp(no_asm_use) add: domIff) + apply (clarsimp simp: cte_map_def split_def + well_formed_cnode_n_def length_set_helper ex_with_length + valid_obj_def valid_cs_size_def valid_cs_def) + apply (subgoal_tac "of_bl (snd p) * 2^cte_level_bits < 2 ^ (cte_level_bits + length (snd p))") + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: shiftl_t2n mult_ac) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply (rule word_power_less_1) + apply (simp add: cte_level_bits_def word_bits_def) + apply (simp add: power_add) + apply (subst mult.commute, rule word_mult_less_mono1) + apply (rule of_bl_length) + apply (simp add: word_bits_def) + apply (simp add: cte_level_bits_def) + apply (simp add: cte_level_bits_def word_bits_def) + apply (drule power_strict_increasing [where a="2 :: nat"]) + apply simp + apply simp + apply (clarsimp simp: cte_map_def split_def field_simps) + apply (subgoal_tac "of_bl (snd p) * 2^cte_level_bits < (2^tcb_bits :: machine_word)") + apply (drule(1) pspace_alignedD[rotated]) + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv shiftl_t2n mult_ac) + apply simp + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (drule word_le_minus_one_leq) + apply simp + apply (erule is_aligned_no_wrap') + apply simp + apply (simp add: tcb_cap_cases_def tcb_cnode_index_def to_bl_1 cte_level_bits_def + split: if_split_asm) + done + +lemma cte_map_inj: + assumes neq: "p \ p'" + assumes c1: "cte_at p s" + assumes c2: "cte_at p' s" + assumes vo: "valid_objs s" + assumes al: "pspace_aligned s" + assumes pd: "pspace_distinct s" + shows "cte_map p \ cte_map p'" + using cte_at_cte_map_in_obj_bits [OF c1 al vo] + cte_at_cte_map_in_obj_bits [OF c2 al vo] + pd + apply (clarsimp simp: pspace_distinct_def + simp del: atLeastAtMost_iff Int_atLeastAtMost) + apply (elim allE, drule mp) + apply (erule conjI)+ + defer + apply (simp add: field_simps + del: atLeastAtMost_iff Int_atLeastAtMost) + apply blast + apply (clarsimp simp: cte_map_def split_def) + apply (thin_tac "b \ a" for b a)+ + apply (rule notE[OF neq]) + apply (insert cte_at_length_limit [OF c1 vo]) + apply (simp add: shiftl_t2n[where n=5, simplified, simplified mult.commute, symmetric] + word_bits_def cte_level_bits_def prod_eq_iff) + apply (insert cte_at_cref_len[where p="fst p" and c="snd p" and c'="snd p'", simplified, OF c1]) + apply (simp add: c2 prod_eqI) + apply (subst rev_is_rev_conv[symmetric]) + apply (rule nth_equalityI) + apply simp + apply clarsimp + apply (drule_tac x="i + 5" in word_eqD) + apply (simp add: nth_shiftl test_bit_of_bl nth_rev) + done + +lemma cte_map_inj_ps: + assumes "p \ p'" + assumes "cte_at p s" + assumes "cte_at p' s" + assumes "valid_pspace s" + shows "cte_map p \ cte_map p'" using assms + apply - + apply (rule cte_map_inj) + apply (auto simp: valid_pspace_def) + done + +lemma cte_map_inj_eq: + "\cte_map p = cte_map p'; + cte_at p s; cte_at p' s; + valid_objs s; pspace_aligned s; pspace_distinct s\ + \ p = p'" + apply (rule classical) + apply (drule (5) cte_map_inj) + apply simp + done + +lemma tcb_cases_related2: + "tcb_cte_cases (v - x) = Some (getF, setF) \ + \getF' setF' restr. tcb_cap_cases (tcb_cnode_index (unat ((v - x) >> cte_level_bits))) = Some (getF', setF', restr) + \ cte_map (x, tcb_cnode_index (unat ((v - x) >> cte_level_bits))) = v + \ (\tcb tcb'. tcb_relation tcb tcb' \ cap_relation (getF' tcb) (cteCap (getF tcb'))) + \ (\tcb tcb' cap cte. tcb_relation tcb tcb' \ cap_relation cap (cteCap cte) + \ tcb_relation (setF' (\x. cap) tcb) (setF (\x. cte) tcb'))" + apply (clarsimp simp: tcb_cte_cases_def tcb_relation_def cte_level_bits_def cteSizeBits_def + tcb_cap_cases_simps[simplified] + split: if_split_asm) + apply (simp_all add: tcb_cnode_index_def cte_level_bits_def cte_map_def field_simps to_bl_1) + done + +lemma other_obj_relation_KOCTE[simp]: + "\ other_obj_relation ko (KOCTE cte)" + by (simp add: other_obj_relation_def + split: Structures_A.kernel_object.splits + AARCH64_A.arch_kernel_obj.splits) + +lemma cte_map_pulls_tcb_to_abstract: + "\ y = cte_map z; pspace_relation (kheap s) (ksPSpace s'); + ksPSpace s' x = Some (KOTCB tcb); + pspace_aligned s; pspace_distinct s; valid_objs s; + cte_at z s; (y - x) \ dom tcb_cte_cases \ + \ \tcb'. kheap s x = Some (TCB tcb') \ tcb_relation tcb' tcb + \ (z = (x, tcb_cnode_index (unat ((y - x) >> cte_level_bits))))" + apply (rule pspace_dom_relatedE, assumption+) + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (clarsimp simp: other_obj_relation_def + split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm) + apply (drule tcb_cases_related2) + apply clarsimp + apply (frule(1) cte_wp_at_tcbI [OF _ _ TrueI, where t="(a, b)" for a b, simplified]) + apply (erule(5) cte_map_inj_eq [OF sym]) + done + +lemma pspace_relation_update_tcbs: + "\ pspace_relation s s'; s x = Some (TCB otcb); s' x = Some (KOTCB otcb'); + tcb_relation tcb tcb' \ + \ pspace_relation (s(x \ TCB tcb)) (s'(x \ KOTCB tcb'))" + apply (simp add: pspace_relation_def pspace_dom_update + dom_fun_upd2 + del: dom_fun_upd) + apply (erule conjE) + apply (rule ballI, drule(1) bspec) + apply (rule conjI, simp add: other_obj_relation_def) + apply (clarsimp split: Structures_A.kernel_object.split_asm) + apply (drule bspec, fastforce) + apply clarsimp + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + done + +lemma cte_map_pulls_cte_to_abstract: + "\ y = cte_map z; pspace_relation (kheap s) (ksPSpace s'); + ksPSpace s' y = Some (KOCTE cte); + valid_objs s; pspace_aligned s; pspace_distinct s; cte_at z s \ + \ \sz cs cap. kheap s (fst z) = Some (CNode sz cs) \ cs (snd z) = Some cap + \ cap_relation cap (cteCap cte)" + apply (rule pspace_dom_relatedE, assumption+) + apply (erule(1) obj_relation_cutsE, simp_all) + apply clarsimp + apply (frule(1) cte_map_inj_eq[OF sym], simp_all) + apply (rule cte_wp_at_cteI, (fastforce split: if_split_asm)+) + done + +lemma pspace_relation_update_ctes: + assumes ps: "pspace_relation s s'" + and octe: "s' z = Some (KOCTE octe)" + and s'': "\x. s'' x = (case (s x) of None \ None | Some ko \ + (case ko of CNode sz cs \ + Some (CNode sz (\y. if y \ dom cs \ cte_map (x, y) = z + then Some cap else cs y)) + | _ \ Some ko))" + and rel: "cap_relation cap (cteCap cte')" + shows "pspace_relation s'' (s'(z \ KOCTE cte'))" +proof - + have funny_update_no_dom: + "\fun P v. dom (\y. if y \ dom fun \ P y then Some v else fun y) + = dom fun" + by (rule set_eqI, simp add: domIff) + + have funny_update_well_formed_cnode: + "\sz fun P v. + well_formed_cnode_n sz (\y. if y \ dom fun \ P y then Some v else fun y) + = well_formed_cnode_n sz fun" + by (simp add: well_formed_cnode_n_def funny_update_no_dom) + + have obj_relation_cuts1: + "\ko x. obj_relation_cuts (the (case ko of CNode sz cs \ + Some (CNode sz (\y. if y \ dom cs \ cte_map (x, y) = z + then Some cap else cs y)) + | _ \ Some ko)) x + = obj_relation_cuts ko x" + by (simp split: Structures_A.kernel_object.split + add: funny_update_well_formed_cnode funny_update_no_dom) + + have domEq[simp]: + "dom s'' = dom s" + using s'' + apply (intro set_eqI) + apply (simp add: domIff split: option.split Structures_A.kernel_object.split) + done + + have obj_relation_cuts2: + "\x. x \ dom s'' \ obj_relation_cuts (the (s'' x)) x = obj_relation_cuts (the (s x)) x" + using s'' + by (clarsimp simp add: obj_relation_cuts1 dest!: domD) + + show ?thesis using ps octe + apply (clarsimp simp add: pspace_relation_def dom_fun_upd2 + simp del: dom_fun_upd split del: if_split) + apply (rule conjI) + apply (erule subst[where t="dom s'"]) + apply (simp add: pspace_dom_def obj_relation_cuts2) + apply (simp add: obj_relation_cuts2) + apply (rule ballI, drule(1) bspec)+ + apply clarsimp + apply (intro conjI impI) + apply (simp add: s'') + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def rel) + apply (rule obj_relation_cutsE, assumption+, simp_all add: s'') + apply (clarsimp simp: cte_relation_def) + apply (clarsimp simp: is_other_obj_relation_type other_obj_relation_def + split: Structures_A.kernel_object.split_asm) + done +qed + +definition pspace_relations where + "pspace_relations ekh kh kh' \ pspace_relation kh kh' \ ekheap_relation ekh kh'" + +lemma set_cap_not_quite_corres_prequel: + assumes cr: + "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + "(x,t') \ fst (setCTE p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + assumes c: "cap_relation c (cteCap c')" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t')" + using cr + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim disjE exE conjE) + apply (clarsimp simp: lookupAround2_char1 pspace_relations_def) + apply (frule(5) cte_map_pulls_tcb_to_abstract[OF p]) + apply (simp add: domI) + apply (frule tcb_cases_related2) + apply (clarsimp simp: set_cap_def2 split_def bind_def get_object_def + simpler_gets_def assert_def fail_def return_def + set_object_def get_def put_def) + apply (rule conjI) + apply (erule(2) pspace_relation_update_tcbs) + apply (simp add: c) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule bspec, erule domI) + apply (clarsimp simp: etcb_relation_def tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: pspace_relations_def) + apply (frule(5) cte_map_pulls_cte_to_abstract[OF p]) + apply (clarsimp simp: set_cap_def split_def bind_def get_object_def + simpler_gets_def assert_def a_type_def fail_def return_def + set_object_def get_def put_def domI) + apply (erule(1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_cs_def valid_cs_size_def exI) + apply (rule conjI, clarsimp) + apply (rule conjI) + apply (erule(1) pspace_relation_update_ctes[where cap=c]) + apply clarsimp + apply (intro conjI impI) + apply (rule ext, clarsimp simp add: domI p) + apply (drule cte_map_inj_eq [OF _ _ cr(6) cr(3-5)]) + apply (simp add: cte_at_cases domI) + apply (simp add: prod_eq_iff) + apply (insert p)[1] + apply (clarsimp split: option.split Structures_A.kernel_object.split + intro!: ext) + apply (drule cte_map_inj_eq [OF _ _ cr(6) cr(3-5)]) + apply (simp add: cte_at_cases domI well_formed_cnode_invsI[OF cr(3)]) + apply clarsimp + apply (simp add: c) + apply (clarsimp simp: ekheap_relation_def pspace_relation_def) + apply (drule bspec, erule domI) + apply (clarsimp simp: etcb_relation_def tcb_cte_cases_def split: if_split_asm) + apply (simp add: wf_cs_insert) + done + +lemma setCTE_pspace_only: + "(rv, s') \ fst (setCTE p v s) \ \ps'. s' = ksPSpace_update (\s. ps') s" + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def + dest!: in_inv_by_hoareD [OF updateObject_cte_inv]) + apply (rule exI, rule refl) + done + +lemma set_cap_not_quite_corres: + assumes cr: + "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + "cur_thread s = ksCurThread s'" + "idle_thread s = ksIdleThread s'" + "machine_state s = ksMachineState s'" + "work_units_completed s = ksWorkUnitsCompleted s'" + "domain_index s = ksDomScheduleIdx s'" + "domain_list s = ksDomSchedule s'" + "cur_domain s = ksCurDomain s'" + "domain_time s = ksDomainTime s'" + "(x,t') \ fst (updateCap p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s')" + "(arch_state s, ksArchState s') \ arch_state_relation" + assumes c: "cap_relation c c'" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t') \ + cdt t = cdt s \ + cdt_list t = cdt_list s \ + ekheap t = ekheap s \ + scheduler_action t = scheduler_action s \ + ready_queues t = ready_queues s \ + is_original_cap t = is_original_cap s \ + interrupt_state_relation (interrupt_irq_node t) (interrupt_states t) + (ksInterruptState t') \ + (arch_state t, ksArchState t') \ arch_state_relation \ + cur_thread t = ksCurThread t' \ + idle_thread t = ksIdleThread t' \ + machine_state t = ksMachineState t' \ + work_units_completed t = ksWorkUnitsCompleted t' \ + domain_index t = ksDomScheduleIdx t' \ + domain_list t = ksDomSchedule t' \ + cur_domain t = ksCurDomain t' \ + domain_time t = ksDomainTime t'" + using cr + apply (clarsimp simp: updateCap_def in_monad) + apply (drule use_valid [OF _ getCTE_sp[where P="\s. s2 = s" for s2], OF _ refl]) + apply clarsimp + apply (drule(7) set_cap_not_quite_corres_prequel) + apply simp + apply (rule c) + apply (rule p) + apply (erule exEI) + apply clarsimp + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def split_def in_monad set_object_def + get_object_def + split: Structures_A.kernel_object.split_asm if_split_asm) + done + +lemma descendants_of_eq': + assumes "cte_at p s" + assumes "cte_at src s" + assumes "cdt_relation (swp cte_at s) (cdt s) m'" + assumes "valid_mdb s" + assumes "valid_objs s" "pspace_aligned s" "pspace_distinct s" + shows "(cte_map src \ descendants_of' (cte_map p) m') = (src \ descendants_of p (cdt s))" + using assms + apply (simp add: cdt_relation_def del: split_paired_All) + apply (rule iffI) + prefer 2 + apply (auto simp del: split_paired_All)[1] + apply (erule_tac x=p in allE) + apply simp + apply (drule sym) + apply clarsimp + apply (frule (1) descendants_of_cte_at) + apply (drule (5) cte_map_inj_eq) + apply simp + done + +lemma updateCap_stuff: + assumes "(x, s'') \ fst (updateCap p cap s')" + shows "(ctes_of s'' = modify_map (ctes_of s') p (cteCap_update (K cap))) \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + (ksArchState s'' = ksArchState s') \ + (pspace_aligned' s' \ pspace_aligned' s'') \ + (pspace_distinct' s' \ pspace_distinct' s'')" using assms + apply (clarsimp simp: updateCap_def in_monad) + apply (drule use_valid [where P="\s. s2 = s" for s2, OF _ getCTE_sp refl]) + apply (rule conjI) + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_apply) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: setCTE_def) + apply (intro conjI impI) + apply (erule(1) use_valid [OF _ setObject_aligned]) + apply (erule(1) use_valid [OF _ setObject_distinct]) + done + +(* FIXME: move *) +lemma pspace_relation_cte_wp_atI': + "\ pspace_relation (kheap s) (ksPSpace s'); + cte_wp_at' ((=) cte) x s'; valid_objs s \ + \ \c slot. cte_wp_at ((=) c) slot s \ cap_relation c (cteCap cte) \ x = cte_map slot" + apply (simp add: cte_wp_at_cases') + apply (elim disjE conjE exE) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm)[1] + apply (intro exI, rule conjI[OF _ conjI [OF _ refl]]) + apply (simp add: cte_wp_at_cases domI well_formed_cnode_invsI) + apply (simp split: if_split_asm) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all split: if_split_asm) + apply (simp add: other_obj_relation_def + split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm) + apply (subgoal_tac "n = x - y", clarsimp) + apply (drule tcb_cases_related2, clarsimp) + apply (intro exI, rule conjI) + apply (erule(1) cte_wp_at_tcbI[where t="(a, b)" for a b, simplified]) + apply fastforce + apply simp + apply clarsimp + done + +lemma pspace_relation_cte_wp_atI: + "\ pspace_relation (kheap s) (ksPSpace s'); + ctes_of (s' :: kernel_state) x = Some cte; valid_objs s \ + \ \c slot. cte_wp_at ((=) c) slot s \ cap_relation c (cteCap cte) \ x = cte_map slot" + apply (erule pspace_relation_cte_wp_atI'[where x=x]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma sameRegion_corres: + "\ sameRegionAs c' d'; cap_relation c c'; cap_relation d d' \ + \ same_region_as c d" + by (simp add: same_region_as_relation) + +lemma is_final_cap_unique: + assumes cte: "ctes_of s' (cte_map slot) = Some cte" + assumes fin: "cte_wp_at (\c. final_matters c \ is_final_cap' c s) slot s" + assumes psr: "pspace_relation (kheap s) (ksPSpace s')" + assumes cte': "ctes_of s' x = Some cte'" + assumes neq: "x \ cte_map slot" + assumes region: "sameRegionAs (cteCap cte) (cteCap cte')" + assumes valid: "pspace_aligned s" "valid_objs s" "pspace_aligned' s'" "pspace_distinct' s'" + shows "False" +proof - + from fin obtain c where + c: "cte_wp_at ((=) c) slot s" and + final: "is_final_cap' c s" and + fm: "final_matters c" + by (auto simp add: cte_wp_at_cases) + with valid psr cte + have cr: "cap_relation c (cteCap cte)" + by (auto dest!: pspace_relation_ctes_ofI) + from cte' psr valid + obtain slot' c' where + c': "cte_wp_at ((=) c') slot' s" and + cr': "cap_relation c' (cteCap cte')" and + x: "x = cte_map slot'" + by (auto dest!: pspace_relation_cte_wp_atI) + with neq + have s: "slot \ slot'" by clarsimp + from region cr cr' + have reg: "same_region_as c c'" by (rule sameRegion_corres) + hence fm': "final_matters c'" using fm + apply - + apply (rule ccontr) + apply (simp add: final_matters_def split: cap.split_asm arch_cap.split_asm) + done + hence ref: "obj_refs c = obj_refs c'" using fm reg + apply (simp add: final_matters_def split: cap.split_asm arch_cap.split_asm) + done + have irq: "cap_irqs c = cap_irqs c'" using reg fm fm' + by (simp add: final_matters_def split: cap.split_asm) + have arch_ref: "arch_gen_refs c = arch_gen_refs c'" using fm reg + by (clarsimp simp: final_matters_def is_cap_simps + split: cap.split_asm arch_cap.split_asm) + + from final have refs_non_empty: "obj_refs c \ {} \ cap_irqs c \ {} \ arch_gen_refs c \ {}" + by (clarsimp simp add: is_final_cap'_def gen_obj_refs_def) + + define S where "S \ {cref. \cap'. fst (get_cap cref s) = {(cap', s)} \ + (gen_obj_refs c \ gen_obj_refs cap' \ {})}" + + have "is_final_cap' c s = (\cref. S = {cref})" + by (simp add: is_final_cap'_def S_def) + moreover + from c refs_non_empty + have "slot \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + moreover + from c' refs_non_empty ref irq arch_ref + have "slot' \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + ultimately + show False using s final by auto +qed + +lemma obj_refs_relation_Master: + "cap_relation cap cap' \ + obj_refs cap = (if isUntypedCap (capMasterCap cap') then {} + else if capClass (capMasterCap cap') = PhysicalClass + then {capUntypedPtr (capMasterCap cap')} + else {})" + by (simp add: isCap_simps + split: cap_relation_split_asm arch_cap.split_asm) + +lemma cap_irqs_relation_Master: + "cap_relation cap cap' \ + cap_irqs cap = (case capMasterCap cap' of IRQHandlerCap irq \ {irq} | _ \ {})" + by (simp split: cap_relation_split_asm arch_cap.split_asm) + +lemma arch_gen_refs_relation_Master: + "cap_relation cap cap' \ arch_gen_refs cap = {}" + by (simp split: cap_relation_split_asm arch_cap.split_asm) + +lemma is_final_cap_unique_sym: + assumes cte: "ctes_of s' (cte_map slot) = Some cte" + assumes fin: "cte_wp_at (\c. is_final_cap' c s) slot s" + assumes psr: "pspace_relation (kheap s) (ksPSpace s')" + assumes cte': "ctes_of s' x = Some cte'" + assumes neq: "x \ cte_map slot" + assumes master: "capMasterCap (cteCap cte') = capMasterCap (cteCap cte)" + assumes valid: "pspace_aligned s" "valid_objs s" "pspace_aligned' s'" "pspace_distinct' s'" + shows "False" +proof - + from fin obtain c where + c: "cte_wp_at ((=) c) slot s" and + final: "is_final_cap' c s" + by (auto simp add: cte_wp_at_cases) + with valid psr cte + have cr: "cap_relation c (cteCap cte)" + by (auto dest!: pspace_relation_ctes_ofI) + from cte' psr valid + obtain slot' c' where + c': "cte_wp_at ((=) c') slot' s" and + cr': "cap_relation c' (cteCap cte')" and + x: "x = cte_map slot'" + by (auto dest!: pspace_relation_cte_wp_atI) + with neq + have s: "slot \ slot'" by clarsimp + have irq: "cap_irqs c = cap_irqs c'" + using master cr cr' + by (simp add: cap_irqs_relation_Master) + have ref: "obj_refs c = obj_refs c'" + using master cr cr' + by (simp add: obj_refs_relation_Master) + have arch_ref: "arch_gen_refs c = arch_gen_refs c'" + using master cr cr' + by (clarsimp simp: arch_gen_refs_relation_Master) + + from final have refs_non_empty: "obj_refs c \ {} \ cap_irqs c \ {} \ arch_gen_refs c \ {}" + by (clarsimp simp add: is_final_cap'_def gen_obj_refs_def) + + define S where "S \ {cref. \cap'. fst (get_cap cref s) = {(cap', s)} \ + (gen_obj_refs c \ gen_obj_refs cap' \ {})}" + + have "is_final_cap' c s = (\cref. S = {cref})" + by (simp add: is_final_cap'_def S_def) + moreover + from c refs_non_empty + have "slot \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + moreover + from c' refs_non_empty ref irq arch_ref + have "slot' \ S" by (simp add: S_def cte_wp_at_def gen_obj_refs_def) + ultimately + show False using s final by auto +qed + +lemma isMDBParent_sameRegion: + "isMDBParentOf cte cte' \ sameRegionAs (cteCap cte) (cteCap cte')" + by (simp add: isMDBParentOf_def split: cte.split_asm if_split_asm) + +lemma no_loops_no_subtree: + "no_loops m \ m \ x \ x = False" + apply clarsimp + apply (drule subtree_mdb_next) + apply (simp add: no_loops_def) + done + +definition + "caps_contained2 m \ + \c c' cap n cap' n'. + m c = Some (CTE cap n) \ m c' = Some (CTE cap' n') \ + (isCNodeCap cap' \ isThreadCap cap') \ + capUntypedPtr cap' \ untypedRange cap \ + capUntypedPtr cap' + capUntypedSize cap' - 1 \ untypedRange cap" + +lemma capUntypedPtr_capRange: + "\ ctes_of s p = Some (CTE cap node); + capClass cap = PhysicalClass; + valid_objs' s \ \ + capUntypedPtr cap \ capRange cap" + apply (erule capAligned_capUntypedPtr[rotated]) + apply (drule (1) ctes_of_valid_cap') + apply (erule valid_capAligned) + done + +lemma descendants_of_update_ztc: + assumes c: "\x. \ m \ x \ slot; \ P \ \ + \cte'. m x = Some cte' + \ capMasterCap (cteCap cte') \ capMasterCap (cteCap cte) + \ sameRegionAs (cteCap cte') (cteCap cte)" + assumes m: "m slot = Some cte" + assumes z: "isZombie cap \ isCNodeCap cap \ isThreadCap cap" + defines "cap' \ cteCap cte" + assumes F: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ isUntypedCap (cteCap cte') \ capClass (cteCap cte') \ PhysicalClass + \ capUntypedPtr (cteCap cte') \ capUntypedPtr cap'" + assumes pu: "capRange cap' = capRange cap \ capUntypedPtr cap' = capUntypedPtr cap" + assumes a: "capAligned cap'" + assumes t: "isZombie cap' \ isCNodeCap cap' \ isThreadCap cap'" + assumes n: "no_loops m" + defines "m' \ m(slot \ cteCap_update (\_. cap) cte)" + shows "((c \ slot \ P) \ descendants_of' c m \ descendants_of' c m') + \ (P \ descendants_of' c m' \ descendants_of' c m)" +proof (simp add: descendants_of'_def subset_iff, + simp only: all_simps(6)[symmetric], intro conjI allI) + note isMDBParentOf_CTE[simp] + + have utp: "capUntypedPtr cap' \ capRange cap'" + using t a + by (auto elim!: capAligned_capUntypedPtr simp: isCap_simps) + + have ztc_parent: "\cap cap'. isZombie cap \ isCNodeCap cap \ isThreadCap cap + \ sameRegionAs cap cap' + \ capUntypedPtr cap = capUntypedPtr cap' + \ capClass cap' = PhysicalClass \ \ isUntypedCap cap'" + by (auto simp: isCap_simps sameRegionAs_def3) + + have ztc_child: "\cap cap'. isZombie cap \ isCNodeCap cap \ isThreadCap cap + \ sameRegionAs cap' cap + \ capClass cap' = PhysicalClass \ + (isUntypedCap cap' \ capUntypedPtr cap' = capUntypedPtr cap)" + by (auto simp: isCap_simps sameRegionAs_def3) + + have notparent: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ \ isMDBParentOf cte cte'" + using t utp + apply clarsimp + apply (drule_tac cte'=cte' in F, simp+) + apply (simp add: cap'_def) + apply (cases cte, case_tac cte', clarsimp) + apply (frule(1) ztc_parent, clarsimp) + done + + have notparent2: "\x cte'. \ m x = Some cte'; x \ slot; P \ + \ \ isMDBParentOf (cteCap_update (\_. cap) cte) cte'" + using z utp + apply clarsimp + apply (drule_tac cte'=cte' in F, simp+) + apply (cases cte, case_tac cte', clarsimp) + apply (frule(1) ztc_parent) + apply (clarsimp simp: pu) + done + + fix x + { assume cx: "m \ c \ x" and cP: "c \ slot \ P" + hence c_neq_x [simp]: "c \ x" + by (clarsimp simp: n no_loops_no_subtree) + from cx c_neq_x cP m + have s_neq_c [simp]: "c \ slot" + apply (clarsimp simp del: c_neq_x) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def notparent) + done + + have parent: "\x cte'. \ m x = Some cte'; isMDBParentOf cte' cte; m \ x \ slot; x \ slot \ + \ isMDBParentOf cte' (cteCap_update (\_. cap) cte)" + using t z pu + apply - + apply (cases P) + apply (frule(2) F) + apply (clarsimp simp: cap'_def) + apply (case_tac cte') + apply (rename_tac capability mdbnode) + apply (case_tac cte) + apply clarsimp + apply (frule(1) ztc_child) + apply (case_tac "isUntypedCap capability") + apply (simp add: isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply simp + apply (frule(1) c, clarsimp) + apply (clarsimp simp: cap'_def) + apply (case_tac cte') + apply (case_tac cte) + apply clarsimp + apply (erule sameRegionAsE) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps)+ + done + + from cx + have "m' \ c \ x" + proof induct + case (direct_parent c') + hence "m \ c \ c'" by (rule subtree.direct_parent) + with direct_parent m + show ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold m'_def m) + apply assumption + apply (clarsimp simp: parentOf_def) + apply (clarsimp simp add: m'_def) + apply (erule(2) parent) + apply simp + done + next + case (trans_parent c' c'') + moreover + from trans_parent + have "m \ c \ c''" by - (rule subtree.trans_parent) + ultimately + show ?case using z m pu t + apply - + apply (erule subtree.trans_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m) + apply assumption + apply (clarsimp simp: parentOf_def m'_def) + apply (erule(2) parent) + apply simp + done + qed + } + thus "(c = slot \ P) \ m \ c \ x \ m' \ c \ x" + by blast + + { assume subcx: "m' \ c \ x" and P: "P" + + have mdb_next_eq: "\x y. m' \ x \ y = m \ x \ y" + by (simp add: mdb_next_unfold m'_def m) + have mdb_next_eq_trans: "\x y. m' \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply (rule arg_cong[where f="\S. v \ S\<^sup>+" for v]) + apply (simp add: set_eq_iff mdb_next_eq) + done + + have subtree_neq: "\x y. m' \ x \ y \ x \ y" + apply clarsimp + apply (drule subtree_mdb_next) + apply (clarsimp simp: mdb_next_eq_trans n no_loops_trancl_simp) + done + + have parent2: "\x cte'. \ m x = Some cte'; isMDBParentOf cte' (cteCap_update (\_. cap) cte); + x \ slot \ + \ isMDBParentOf cte' cte" + using t z pu P + apply (drule_tac cte'=cte' in F, simp, simp) + apply (simp add: cap'_def) + apply (cases cte) + apply (case_tac cte') + apply (rename_tac cap' node') + apply (clarsimp) + apply (frule(1) ztc_child) + apply (case_tac "isUntypedCap cap'") + apply (simp add:isCap_simps) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply clarsimp + done + + from subcx have "m \ c \ x" + proof induct + case (direct_parent c') + thus ?case + using subtree_neq [OF subtree.direct_parent [OF direct_parent(1-3)]] + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m split: if_split_asm) + apply assumption + apply (insert z m t pu) + apply (simp add: cap'_def) + apply (simp add: m'_def parentOf_def split: if_split_asm) + apply (clarsimp simp: parent2) + apply (clarsimp simp add: notparent2 [OF _ _ P]) + done + next + case (trans_parent c' c'') + thus ?case + using subtree_neq [OF subtree.trans_parent [OF trans_parent(1, 3-5)]] + apply - + apply (erule subtree.trans_parent) + apply (clarsimp simp: mdb_next_unfold m'_def m split: if_split_asm) + apply assumption + apply (insert z m t pu) + apply (simp add: cap'_def) + apply (simp add: m'_def parentOf_def split: if_split_asm) + apply (clarsimp simp: parent2) + apply (clarsimp simp: notparent2 [OF _ _ P]) + done + qed + } + thus "P \ m' \ c \ x \ m \ c \ x" + by simp +qed + +lemma use_update_ztc_one: + "((c \ slot \ True) \ descendants_of' c m \ descendants_of' c m') + \ (True \ descendants_of' c m' \ descendants_of' c m) + \ descendants_of' c m = descendants_of' c m'" + by clarsimp + +lemma use_update_ztc_two: + "((c \ slot \ False) \ descendants_of' c m \ descendants_of' c m') + \ (False \ descendants_of' c m' \ descendants_of' c m) + \ descendants_of' slot m = {} + \ descendants_of' c m \ descendants_of' c m'" + by auto + +lemmas cte_wp_at'_obj_at' = cte_wp_at_obj_cases' + +lemma cte_at'_obj_at': + "cte_at' addr s = (obj_at' (\_ :: cte. True) addr s + \ (\n \ dom tcb_cte_cases. tcb_at' (addr - n) s))" + by (simp add: cte_wp_at'_obj_at') + +lemma ctes_of_valid: + "\ cte_wp_at' ((=) cte) p s; valid_objs' s \ + \ s \' cteCap cte" + apply (simp add: cte_wp_at'_obj_at') + apply (erule disjE) + apply (subgoal_tac "ko_at' cte p s") + apply (drule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def valid_cte'_def) + apply (simp add: obj_at'_def cte_level_bits_def objBits_simps) + apply clarsimp + apply (drule obj_at_ko_at') + apply clarsimp + apply (drule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def valid_tcb'_def) + apply (fastforce) + done + +lemma no_fail_setCTE [wp]: + "no_fail (cte_at' p) (setCTE p c)" + apply (clarsimp simp: setCTE_def setObject_def split_def unless_def + updateObject_cte alignCheck_def alignError_def + typeError_def is_aligned_mask[symmetric] + cong: kernel_object.case_cong) + apply (wp|wpc)+ + apply (clarsimp simp: cte_wp_at'_def getObject_def split_def + in_monad loadObject_cte + dest!: in_singleton + split del: if_split) + apply (clarsimp simp: typeError_def alignCheck_def alignError_def + in_monad is_aligned_mask[symmetric] objBits_simps + magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.splits + split del: if_split) + apply simp_all + done + +lemma no_fail_updateCap [wp]: + "no_fail (cte_at' p) (updateCap p cap')" + apply (simp add: updateCap_def) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma capRange_cap_relation: + "\ cap_relation cap cap'; cap_relation cap cap' \ capClass cap' = PhysicalClass \ + \ capRange cap' = {obj_ref_of cap .. obj_ref_of cap + obj_size cap - 1}" + by (simp add: capRange_def objBits_simps' cte_level_bits_def + asid_low_bits_def zbits_map_def bit_simps + split: cap_relation_split_asm arch_cap.split_asm + option.split sum.split) + +lemma cap_relation_untyped_ptr_obj_refs: + "cap_relation cap cap' \ capClass cap' = PhysicalClass \ \ isUntypedCap cap' + \ capUntypedPtr cap' \ obj_refs cap" + by (clarsimp simp add: isCap_simps + simp del: not_ex + split: cap_relation_split_asm arch_cap.split_asm) + +lemma obj_refs_cap_relation_untyped_ptr: + "\ cap_relation cap cap'; obj_refs cap \ {} \ \ capUntypedPtr cap' \ obj_refs cap" + by (clarsimp split: cap_relation_split_asm arch_cap.split_asm) + +lemma is_final_untyped_ptrs: + "\ ctes_of (s' :: kernel_state) (cte_map slot) = Some cte; ctes_of s' y = Some cte'; cte_map slot \ y; + pspace_relation (kheap s) (ksPSpace s'); valid_objs s; pspace_aligned s; pspace_distinct s; + cte_wp_at (\cap. is_final_cap' cap s \ obj_refs cap \ {}) slot s \ + \ capClass (cteCap cte') \ PhysicalClass + \ isUntypedCap (cteCap cte') + \ capUntypedPtr (cteCap cte) \ capUntypedPtr (cteCap cte')" + apply clarsimp + apply (drule(2) pspace_relation_cte_wp_atI[rotated])+ + apply clarsimp + apply (drule_tac s=s in cte_map_inj_eq, + (clarsimp elim!: cte_wp_at_weakenE[OF _ TrueI])+) + apply (clarsimp simp: cte_wp_at_def) + apply (erule(3) final_cap_duplicate [where r="ObjRef (capUntypedPtr (cteCap cte))", + OF _ _ distinct_lemma[where f=cte_map]]) + apply (rule obj_ref_is_gen_obj_ref) + apply (erule(1) obj_refs_cap_relation_untyped_ptr) + apply (rule obj_ref_is_gen_obj_ref) + apply (erule(1) obj_refs_cap_relation_untyped_ptr) + apply (rule obj_ref_is_gen_obj_ref) + apply (drule(2) cap_relation_untyped_ptr_obj_refs)+ + apply simp + done + +lemma capClass_ztc_relation: + "\ is_zombie c \ is_cnode_cap c \ is_thread_cap c; + cap_relation c c' \ \ capClass c' = PhysicalClass" + by (auto simp: is_cap_simps) + +lemma pspace_relationsD: + "\pspace_relation kh kh'; ekheap_relation ekh kh'\ \ pspace_relations ekh kh kh'" + by (simp add: pspace_relations_def) + +lemma updateCap_corres: + "\cap_relation cap cap'; + is_zombie cap \ is_cnode_cap cap \ is_thread_cap cap \ + \ corres dc (\s. invs s \ + cte_wp_at (\c. (is_zombie c \ is_cnode_cap c \ + is_thread_cap c) \ + is_final_cap' c s \ + obj_ref_of c = obj_ref_of cap \ + obj_size c = obj_size cap) slot s) + invs' + (set_cap cap slot) (updateCap (cte_map slot) cap')" + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (drule cte_wp_at_norm) + apply (clarsimp simp: state_relation_def) + apply (drule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp add: state_relation_def) + apply (drule(1) pspace_relationsD) + apply (frule (3) set_cap_not_quite_corres; fastforce?) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply clarsimp + apply (rule bexI) + prefer 2 + apply simp + apply (clarsimp simp: in_set_cap_cte_at_swp pspace_relations_def) + apply (drule updateCap_stuff) + apply simp + apply (rule conjI) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (unfold cdt_list_relation_def)[1] + apply (intro allI impI) + apply (erule_tac x=c in allE) + apply (auto elim!: modify_map_casesE)[1] + apply (unfold revokable_relation_def)[1] + apply (drule set_cap_caps_of_state_monad) + apply (simp add: cte_wp_at_caps_of_state del: split_paired_All) + apply (intro allI impI) + apply (erule_tac x=c in allE) + apply (erule impE[where P="\y. v = Some y" for v]) + apply (clarsimp simp: null_filter_def is_zombie_def split: if_split_asm) + apply (auto elim!: modify_map_casesE del: disjE)[1] (* slow *) + apply (case_tac "ctes_of b (cte_map slot)") + apply (simp add: modify_map_None) + apply (simp add: modify_map_apply) + apply (simp add: cdt_relation_def del: split_paired_All) + apply (intro allI impI) + apply (rule use_update_ztc_one [OF descendants_of_update_ztc]) + apply simp + apply assumption + apply (auto simp: is_cap_simps isCap_simps)[1] + apply (frule(3) is_final_untyped_ptrs [OF _ _ not_sym], clarsimp+) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (simp add: is_cap_simps, elim disjE exE, simp_all)[1] + apply (simp add: eq_commute) + apply (drule cte_wp_at_norm, clarsimp) + apply (drule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (drule(1) capClass_ztc_relation)+ + apply (simp add: capRange_cap_relation obj_ref_of_relation[symmetric]) + apply (rule valid_capAligned, rule ctes_of_valid) + apply (simp add: cte_wp_at_ctes_of) + apply clarsimp + apply (drule cte_wp_at_norm, clarsimp) + apply (drule(1) pspace_relation_ctes_ofI, clarsimp+) + apply (simp add: is_cap_simps, elim disjE exE, simp_all add: isCap_simps)[1] + apply clarsimp + done + +lemma exst_set_cap: + "(x,s') \ fst (set_cap p c s) \ exst s' = exst s" + by (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + +lemma updateMDB_eqs: + assumes "(x, s'') \ fst (updateMDB p f s')" + shows "ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s'" using assms + apply (clarsimp simp: updateMDB_def Let_def in_monad split: if_split_asm) + apply (drule in_inv_by_hoareD [OF getCTE_inv]) + apply (clarsimp simp: setCTE_def setObject_def in_monad split_def) + apply (drule in_inv_by_hoareD [OF updateObject_cte_inv]) + apply simp + done + +lemma updateMDB_pspace_relation: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relation (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" + shows "pspace_relation (kheap s) (ksPSpace s'')" using assms + apply (clarsimp simp: updateMDB_def Let_def in_monad split: if_split_asm) + apply (drule_tac P="(=) s'" in use_valid [OF _ getCTE_sp], rule refl) + apply clarsimp + apply (clarsimp simp: setCTE_def setObject_def in_monad + split_def) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim disjE conjE exE) + apply (clarsimp simp: cte_wp_at_cases' lookupAround2_char1) + apply (erule disjE) + apply (clarsimp simp: tcb_ctes_clear cte_level_bits_def objBits_defs) + apply clarsimp + apply (rule pspace_dom_relatedE, assumption+) + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp split: Structures_A.kernel_object.split_asm + AARCH64_A.arch_kernel_obj.split_asm + simp: other_obj_relation_def) + apply (frule(1) tcb_cte_cases_aligned_helpers(1)) + apply (frule(1) tcb_cte_cases_aligned_helpers(2)) + apply (clarsimp simp del: diff_neg_mask) + apply (subst map_upd_triv[symmetric, where t="kheap s"], assumption) + apply (erule(2) pspace_relation_update_tcbs) + apply (case_tac tcba) + apply (simp add: tcb_cte_cases_def tcb_relation_def del: diff_neg_mask + split: if_split_asm) + apply (clarsimp simp: cte_wp_at_cases') + apply (erule disjE) + apply (rule pspace_dom_relatedE, assumption+) + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def) + apply (simp add: pspace_relation_def dom_fun_upd2 + del: dom_fun_upd) + apply (erule conjE) + apply (rule ballI, drule(1) bspec) + apply (rule ballI, drule(1) bspec) + apply clarsimp + apply (rule obj_relation_cutsE, assumption+, simp_all split: if_split_asm)[1] + apply (clarsimp simp: cte_relation_def) + apply clarsimp + apply (drule_tac y=p in tcb_ctes_clear[rotated], assumption+) + apply fastforce + apply fastforce + done + +lemma updateMDB_ekheap_relation: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "ekheap_relation (ekheap s) (ksPSpace s')" + shows "ekheap_relation (ekheap s) (ksPSpace s'')" using assms + apply (clarsimp simp: updateMDB_def Let_def setCTE_def setObject_def in_monad ekheap_relation_def etcb_relation_def split_def split: if_split_asm) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (drule_tac P="(=) s'" in use_valid [OF _ getCTE_sp], rule refl) + apply (drule bspec, erule domI) + apply (clarsimp simp: tcb_cte_cases_def lookupAround2_char1 split: if_split_asm) + done + +lemma updateMDB_pspace_relations: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" + shows "pspace_relations (ekheap s) (kheap s) (ksPSpace s'')" using assms + by (simp add: pspace_relations_def updateMDB_pspace_relation updateMDB_ekheap_relation) + +lemma updateMDB_ctes_of: + assumes "(x, s') \ fst (updateMDB p f s)" + assumes "no_0 (ctes_of s)" + shows "ctes_of s' = modify_map (ctes_of s) p (cteMDBNode_update f)" + using assms + apply (clarsimp simp: valid_def) + apply (drule use_valid) + apply (rule updateMDB_ctes_of_no_0) + prefer 2 + apply assumption + apply simp + done + +crunch aligned[wp]: updateMDB "pspace_aligned'" +crunch pdistinct[wp]: updateMDB "pspace_distinct'" + +lemma updateMDB_the_lot: + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap s) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" "no_0 (ctes_of s')" + shows "ctes_of s'' = modify_map (ctes_of s') p (cteMDBNode_update f) \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + pspace_relations (ekheap s) (kheap s) (ksPSpace s'') \ + pspace_aligned' s'' \ pspace_distinct' s'' \ + no_0 (ctes_of s'') \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s'" +using assms + apply (simp add: updateMDB_eqs updateMDB_pspace_relations split del: if_split) + apply (frule (1) updateMDB_ctes_of) + apply clarsimp + apply (rule conjI) + apply (erule use_valid) + apply wp + apply simp + apply (erule use_valid) + apply wp + apply simp + done + +lemma is_cap_revocable_eq: + "\ cap_relation c c'; cap_relation src_cap src_cap'; sameRegionAs src_cap' c'; + is_untyped_cap src_cap \ \ is_ep_cap c \ \ is_ntfn_cap c\ + \ is_cap_revocable c src_cap = isCapRevocable c' src_cap'" + apply (clarsimp simp: isCap_simps objBits_simps bit_simps arch_is_cap_revocable_def + bits_of_def is_cap_revocable_def Retype_H.isCapRevocable_def + sameRegionAs_def3 isCapRevocable_def + split: cap_relation_split_asm arch_cap.split_asm) + done + +lemma isMDBParentOf_prev_update [simp]: + "isMDBParentOf (cteMDBNode_update (mdbPrev_update f) cte) cte' = + isMDBParentOf cte cte'" + "isMDBParentOf cte (cteMDBNode_update (mdbPrev_update f) cte') = + isMDBParentOf cte cte'" + apply (cases cte) + apply (cases cte') + apply (simp add: isMDBParentOf_def) + apply (cases cte) + apply (cases cte') + apply (clarsimp simp: isMDBParentOf_def) + done + +lemma prev_update_subtree [simp]: + "modify_map m' x (cteMDBNode_update (mdbPrev_update f)) \ a \ b = m' \ a \ b" + (is "?m' = ?m") +proof + assume "?m" + thus ?m' + proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold modify_map_def) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def) + apply fastforce + done + next + case (trans_parent c c') + thus ?case + apply - + apply (rule subtree.trans_parent) + apply (rule trans_parent.hyps) + apply (clarsimp simp add: mdb_next_unfold modify_map_def) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def) + apply fastforce + done + qed +next + assume "?m'" + thus ?m + proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (clarsimp simp add: mdb_next_unfold modify_map_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done + next + case (trans_parent c c') + thus ?case + apply - + apply (rule subtree.trans_parent) + apply (rule trans_parent.hyps) + apply (clarsimp simp add: mdb_next_unfold modify_map_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done + qed +qed + +lemma prev_update_modify_mdb_relation: + "cdt_relation c m (modify_map m' x (cteMDBNode_update (mdbPrev_update f))) + = cdt_relation c m m'" + by (fastforce simp: cdt_relation_def descendants_of'_def) + +lemma subtree_prev_0: + assumes s: "m \ a \ b" + assumes n: "m b = Some cte" "mdbPrev (cteMDBNode cte) = 0" + assumes d: "valid_dlist m" + assumes 0: "no_0 m" + shows "False" using s n +proof induct + case (direct_parent c) + have "m \ a \ c" by fact+ + then obtain cte' where a: "m a = Some cte'" and "mdbNext (cteMDBNode cte') = c" + by (auto simp add: mdb_next_unfold) + moreover + have "m c = Some cte" by fact+ + moreover + have "c \ 0" by fact+ + ultimately + have "mdbPrev (cteMDBNode cte) = a" using d + by (fastforce simp add: valid_dlist_def Let_def) + moreover + have "mdbPrev (cteMDBNode cte) = 0" by fact+ + moreover + from a have "a \ 0" using assms by auto + ultimately + show False by simp +next + case (trans_parent c' c) + have "m \ c' \ c" by fact+ + then obtain cte' where c': "m c' = Some cte'" and "mdbNext (cteMDBNode cte') = c" + by (auto simp add: mdb_next_unfold) + moreover + have "m c = Some cte" by fact+ + moreover + have "c \ 0" by fact+ + ultimately + have "mdbPrev (cteMDBNode cte) = c'" using d + by (fastforce simp add: valid_dlist_def Let_def) + moreover + have "mdbPrev (cteMDBNode cte) = 0" by fact+ + moreover + from c' have "c' \ 0" using assms by auto + ultimately + show False by simp +qed + +lemma subtree_next_0: + assumes s: "m \ a \ b" + assumes n: "m a = Some cte" "mdbNext (cteMDBNode cte) = 0" + shows "False" using s n + by induct (auto simp: mdb_next_unfold) + +definition + "isArchCap P cap \ case cap of ArchObjectCap acap \ P acap | _ \ False" + +lemma isArchCap_simps[simp]: + "isArchCap P (capability.ThreadCap xc) = False" + "isArchCap P capability.NullCap = False" + "isArchCap P capability.DomainCap = False" + "isArchCap P (capability.NotificationCap xca xba xaa xd) = False" + "isArchCap P (capability.EndpointCap xda xcb xbb xab xe xi) = False" + "isArchCap P (capability.IRQHandlerCap xf) = False" + "isArchCap P (capability.Zombie xbc xac xg) = False" + "isArchCap P (capability.ArchObjectCap xh) = P xh" + "isArchCap P (capability.ReplyCap xad xi xia) = False" + "isArchCap P (capability.UntypedCap d xae xj f) = False" + "isArchCap P (capability.CNodeCap xfa xea xdb xcc) = False" + "isArchCap P capability.IRQControlCap = False" + by (simp add: isArchCap_def)+ + +definition + "badge_derived' cap' cap \ + capMasterCap cap = capMasterCap cap' \ + (capBadge cap, capBadge cap') \ capBadge_ordering False" + +definition vs_cap_ref_arch' :: "arch_capability \ (asid \ vspace_ref) option" where + "vs_cap_ref_arch' acap \ + case acap of + ASIDPoolCap _ asid \ Some (asid, 0) + | ASIDControlCap \ None + | FrameCap _ _ _ _ m \ m + | PageTableCap _ _ m \ m" + +lemmas vs_cap_ref_arch'_simps[simp] = vs_cap_ref_arch'_def[split_simps arch_capability.split] + +definition + "vs_cap_ref' = arch_cap'_fun_lift vs_cap_ref_arch' None" + +lemmas vs_cap_ref'_simps[simp] = + vs_cap_ref'_def[THEN fun_cong, unfolded arch_cap'_fun_lift_def, split_simps capability.split] + +definition + "is_derived' m p cap' cap \ + cap' \ NullCap \ + \ isZombie cap \ + \ isIRQControlCap cap' \ + badge_derived' cap' cap \ + (isUntypedCap cap \ descendants_of' p m = {}) \ + (isReplyCap cap = isReplyCap cap') \ + (isReplyCap cap \ capReplyMaster cap) \ + (isReplyCap cap' \ \ capReplyMaster cap') \ + (vs_cap_ref' cap = vs_cap_ref' cap' \ isArchFrameCap cap) \ + (isArchCap isPageTableCap cap \ capASID cap = capASID cap' \ capASID cap \ None)" + +lemma zbits_map_eq[simp]: + "(zbits_map zbits = zbits_map zbits') = (zbits = zbits')" + by (simp add: zbits_map_def split: option.split sum.split) + +lemma master_cap_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + (capMasterCap c' = capMasterCap d') = + (cap_master_cap c = cap_master_cap d)" + by (auto simp add: cap_master_cap_def capMasterCap_def split: cap.splits arch_cap.splits) + +lemma cap_badge_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + (capBadge c' = capBadge d') = + (cap_badge c = cap_badge d)" + by (auto simp add: cap_badge_def split: cap.splits arch_cap.splits) + +lemma capBadge_ordering_relation: + "\ cap_relation c c'; cap_relation d d' \ \ + ((capBadge c', capBadge d') \ capBadge_ordering f) = + ((cap_badge c, cap_badge d) \ capBadge_ordering f)" + apply (cases c) + by (auto simp add: cap_badge_def capBadge_ordering_def split: cap.splits) + +lemma is_reply_cap_relation: + "cap_relation c c' \ is_reply_cap c = (isReplyCap c' \ \ capReplyMaster c')" + by (cases c, auto simp: is_cap_simps isCap_simps) + +lemma is_reply_master_relation: + "cap_relation c c' \ + is_master_reply_cap c = (isReplyCap c' \ capReplyMaster c')" + by (cases c, auto simp add: is_cap_simps isCap_simps) + +lemma cap_asid_cap_relation: + "cap_relation c c' \ capASID c' = map_option ucast (cap_asid c)" + by (auto simp: capASID_def cap_asid_def split: cap.splits arch_cap.splits option.splits) + +lemma isArchCapE[elim!]: + "\ isArchCap P cap; \arch_cap. cap = ArchObjectCap arch_cap \ P arch_cap \ Q \ \ Q" + by (cases cap, simp_all) + +lemma is_derived_eq: + "\ cap_relation c c'; cap_relation d d'; + cdt_relation (swp cte_at s) (cdt s) (ctes_of s'); cte_at p s \ \ + is_derived (cdt s) p c d = is_derived' (ctes_of s') (cte_map p) c' d'" + unfolding cdt_relation_def + apply (erule allE, erule impE, simp) + apply (clarsimp simp: is_derived_def is_derived'_def badge_derived'_def) + apply (rule conjI) + apply (clarsimp simp: is_cap_simps isCap_simps) + apply (cases c, auto simp: isCap_simps cap_master_cap_def capMasterCap_def)[1] + apply (case_tac "isIRQControlCap d'") + apply (frule(1) master_cap_relation) + apply (clarsimp simp: isCap_simps cap_master_cap_def + is_zombie_def is_reply_cap_def is_master_reply_cap_def + split: cap_relation_split_asm arch_cap.split_asm)[1] + apply (frule(1) master_cap_relation) + apply (frule(1) cap_badge_relation) + apply (frule cap_asid_cap_relation) + apply (frule(1) capBadge_ordering_relation) + apply (case_tac d) + apply (simp_all add: isCap_simps is_cap_simps cap_master_cap_def + capMasterCap_def + split: cap_relation_split_asm arch_cap.split_asm) + apply fastforce + apply (auto simp: up_ucast_inj_eq split:arch_cap.splits arch_capability.splits option.splits) + done +end + +locale masterCap = + fixes cap cap' + assumes master: "capMasterCap cap = capMasterCap cap'" +begin +interpretation Arch . (*FIXME: arch_split*) + +lemma isZombie [simp]: + "isZombie cap' = isZombie cap" using master + by (simp add: capMasterCap_def isZombie_def split: capability.splits) + +lemma isUntypedCap [simp]: + "isUntypedCap cap' = isUntypedCap cap" using master + by (simp add: capMasterCap_def isUntypedCap_def split: capability.splits) + +lemma isArchFrameCap [simp]: + "isArchFrameCap cap' = isArchFrameCap cap" using master + by (simp add: capMasterCap_def isArchFrameCap_def + split: capability.splits arch_capability.splits) + +lemma isIRQHandlerCap [simp]: + "isIRQHandlerCap cap' = isIRQHandlerCap cap" using master + by (simp add: capMasterCap_def isIRQHandlerCap_def split: capability.splits) + +lemma isEndpointCap [simp]: + "isEndpointCap cap' = isEndpointCap cap" using master + by (simp add: capMasterCap_def isEndpointCap_def split: capability.splits) + +lemma isNotificationCap [simp]: + "isNotificationCap cap' = isNotificationCap cap" using master + by (simp add: capMasterCap_def isNotificationCap_def split: capability.splits) + +lemma isIRQControlCap [simp]: + "isIRQControlCap cap' = isIRQControlCap cap" using master + by (simp add: capMasterCap_def isIRQControlCap_def split: capability.splits) + +lemma isReplyCap [simp]: + "isReplyCap cap' = isReplyCap cap" using master + by (simp add: capMasterCap_def isReplyCap_def split: capability.splits) + +lemma capRange [simp]: + "capRange cap' = capRange cap" using master + by (simp add: capRange_def capMasterCap_def split: capability.splits arch_capability.splits) + +lemma isDomain1: + "(cap' = DomainCap) = (cap = DomainCap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isDomain2: + "(DomainCap = cap') = (DomainCap = cap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isNull1: + "(cap' = NullCap) = (cap = NullCap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemma isNull2: + "(NullCap = cap') = (NullCap = cap)" using master + by (simp add: capMasterCap_def split: capability.splits) + +lemmas isNull [simp] = isNull1 isNull2 + +lemmas isDomain [simp] = isDomain1 isDomain2 + +lemma sameRegionAs1: + "sameRegionAs c cap' = sameRegionAs c cap" using master + by (simp add: sameRegionAs_def3) + +lemma sameRegionAs2: + "sameRegionAs cap' c = sameRegionAs cap c" using master + by (simp add: sameRegionAs_def3) + +lemmas sameRegionAs [simp] = sameRegionAs1 sameRegionAs2 + +lemma isMDBParentOf1: + assumes "\isReplyCap cap" + assumes "\isEndpointCap cap" + assumes "\isNotificationCap cap" + shows "isMDBParentOf c (CTE cap' m) = isMDBParentOf c (CTE cap m)" +proof - + from assms + have c': + "\isReplyCap cap'" "\isEndpointCap cap'" + "\isNotificationCap cap'" by auto + note isReplyCap [simp del] isEndpointCap [simp del] isNotificationCap [simp del] + from c' assms + show ?thesis + apply (cases c, clarsimp) + apply (simp add: isMDBParentOf_CTE) + apply (rule iffI) + apply clarsimp + apply (clarsimp simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm) + done +qed + +lemma isMDBParentOf2: + assumes "\isReplyCap cap" + assumes "\isEndpointCap cap" + assumes "\isNotificationCap cap" + shows "isMDBParentOf (CTE cap' m) c = isMDBParentOf (CTE cap m) c" +proof - + from assms + have c': + "\isReplyCap cap'" "\isEndpointCap cap'" + "\isNotificationCap cap'" by auto + note isReplyCap [simp del] isEndpointCap [simp del] isNotificationCap [simp del] + from c' assms + show ?thesis + apply (cases c, clarsimp) + apply (simp add: isMDBParentOf_CTE) + apply (auto simp: capBadge_ordering_def capBadge_def isCap_simps sameRegionAs_def3 + split: if_split_asm)[1] + done +qed + +lemmas isMDBParentOf = isMDBParentOf1 isMDBParentOf2 + +end + + +lemma same_master_descendants: + assumes slot: "m slot = Some cte" + assumes master: "capMasterCap (cteCap cte) = capMasterCap cap'" + assumes c': "\isReplyCap cap'" "\isEndpointCap cap'" "\isNotificationCap cap'" + defines "m' \ m(slot \ cteCap_update (\_. cap') cte)" + shows "descendants_of' p m' = descendants_of' p m" +proof (rule set_eqI, simp add: descendants_of'_def) + obtain cap n where cte: "cte = CTE cap n" by (cases cte) + then + interpret masterCap cap cap' + using master by (simp add: masterCap_def) + + from c' + have c: "\isReplyCap cap" + "\isEndpointCap cap" + "\isNotificationCap cap" by auto + + note parent [simp] = isMDBParentOf [OF c] + + { fix a b + from slot + have "m' \ a \ b = m \ a \ b" + by (simp add: m'_def mdb_next_unfold) + } note this [simp] + + { fix a b + from slot cte + have "m' \ a parentOf b = m \ a parentOf b" + by (simp add: m'_def parentOf_def) + } note this [simp] + + fix x + { assume "m \ p \ x" + hence "m' \ p \ x" + proof induct + case (direct_parent c') + thus ?case + by (auto intro: subtree.direct_parent) + next + case trans_parent + thus ?case + by (auto elim: subtree.trans_parent) + qed + } + moreover { + assume "m' \ p \ x" + hence "m \ p \ x" + proof induct + case (direct_parent c') + thus ?case + by (auto intro: subtree.direct_parent) + next + case trans_parent + thus ?case + by (auto elim: subtree.trans_parent) + qed + } + ultimately + show "m' \ p \ x = m \ p \ x" by blast +qed + +lemma is_ep_cap_relation: + "cap_relation c c' \ isEndpointCap c' = is_ep_cap c" + apply (simp add: isCap_simps is_cap_simps) + apply (cases c, auto) + done + +lemma is_ntfn_cap_relation: + "cap_relation c c' \ isNotificationCap c' = is_ntfn_cap c" + apply (simp add: isCap_simps is_cap_simps) + apply (cases c, auto) + done + +(* Just for convenience like free_index_update *) +definition freeIndex_update where + "freeIndex_update c' g \ case c' of capability.UntypedCap d ref sz f \ capability.UntypedCap d ref sz (g f) | _ \ c'" + +lemma freeIndex_update_not_untyped[simp]: "\isUntypedCap c \ freeIndex_update c g = c" + by (case_tac c,simp_all add:freeIndex_update_def isCap_simps) + +locale mdb_insert = + mdb_ptr_src?: mdb_ptr m _ _ src src_cap src_node + + mdb_ptr_dest?: mdb_ptr m _ _ dest dest_cap dest_node + for m src src_cap src_node dest dest_cap dest_node + + + fixes c' :: capability + + assumes dest_cap: "dest_cap = NullCap" + assumes dest_prev: "mdbPrev dest_node = 0" + assumes dest_next: "mdbNext dest_node = 0" + + assumes valid_badges: "valid_badges m" + assumes ut_rev: "ut_revocable' m" + + fixes n + + defines "n \ + modify_map + (modify_map + (modify_map m dest (cteCap_update (\_. c'))) + dest + (cteMDBNode_update + (\m. mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))) + src + (cteMDBNode_update (mdbNext_update (\a. dest)))" + + assumes neq: "src \ dest" + +locale mdb_insert_der = mdb_insert + + assumes partial_is_derived': "is_derived' m src c' src_cap" + + +context mdb_insert +begin + +lemmas src = mdb_ptr_src.m_p +lemmas dest = mdb_ptr_dest.m_p + + +lemma no_0_n [intro!]: "no_0 n" by (auto simp: n_def) +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] + +lemmas neqs [simp] = neq neq [symmetric] + +definition + "new_dest \ CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node)))" + +definition + "new_src \ CTE src_cap (mdbNext_update (\a. dest) src_node)" + +lemma n: "n = m (dest \ new_dest, src \ new_src)" + using src dest + by (simp add: n_def modify_map_apply new_dest_def new_src_def) + +lemma dest_no_parent [iff]: + "m \ dest \ x = False" using dest dest_next + by (auto dest: subtree_next_0) + +lemma dest_no_child [iff]: + "m \ x \ dest = False" using dest dest_prev + by (auto dest: subtree_prev_0) + +lemma dest_no_descendants: "descendants_of' dest m = {}" + by (simp add: descendants_of'_def) + +lemma descendants_not_dest: "dest \ descendants_of' p m \ False" + by (simp add: descendants_of'_def) + +lemma src_next: "m \ src \ mdbNext src_node" + by (simp add: src mdb_next_unfold) + +lemma src_next_rtrancl_conv [simp]: + "m \ mdbNext src_node \\<^sup>* dest = m \ src \\<^sup>+ dest" + apply (rule iffI) + apply (insert src_next) + apply (erule (1) rtrancl_into_trancl2) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma dest_no_next [iff]: + "m \ x \ dest = False" using dest dest_prev dlist + apply clarsimp + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (rule dlistEn [where p=x], assumption) + apply clarsimp + apply clarsimp + done + +lemma dest_no_next_trans [iff]: + "m \ x \\<^sup>+ dest = False" + by (clarsimp dest!: tranclD2) + +lemma dest_no_prev [iff]: + "m \ dest \ p = (p = 0)" using dest dest_next + by (simp add: mdb_next_unfold) + +lemma dest_no_prev_trancl [iff]: + "m \ dest \\<^sup>+ p = (p = 0)" + apply (rule iffI) + apply (drule tranclD) + apply (clarsimp simp: dest_next) + apply simp + apply (insert chain dest) + apply (simp add: mdb_chain_0_def) + apply auto + done + +lemma chain_n: + "mdb_chain_0 n" +proof - + from chain + have "m \ mdbNext src_node \\<^sup>* 0" using dlist src + apply (cases "mdbNext src_node = 0") + apply simp + apply (erule dlistEn, simp) + apply (auto simp: mdb_chain_0_def) + done + moreover + have "\m \ mdbNext src_node \\<^sup>* src" + using src_next + apply clarsimp + apply (drule (1) rtrancl_into_trancl2) + apply simp + done + moreover + have "\ m \ 0 \\<^sup>* dest" using no_0 dest + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + have "\ m \ 0 \\<^sup>* src" using no_0 src + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + note chain + ultimately + show "mdb_chain_0 n" using no_0 src dest + apply (simp add: n new_src_def new_dest_def) + apply (auto intro!: mdb_chain_0_update no_0_update simp: next_update_lhs_rtrancl) + done +qed + +lemma no_loops_n: "no_loops n" using chain_n no_0_n + by (rule mdb_chain_0_no_loops) + +lemma irrefl_trancl_simp [iff]: + "n \ x \\<^sup>+ x = False" + using no_loops_n by (rule no_loops_trancl_simp) + +lemma n_direct_eq: + "n \ p \ p' = (if p = src then p' = dest else + if p = dest then m \ src \ p' + else m \ p \ p')" + using src dest dest_prev + by (auto simp: mdb_next_update n new_src_def new_dest_def + src_next mdb_next_unfold) + +lemma n_dest: + "n dest = Some new_dest" + by (simp add: n) + +end + +lemma revokable_plus_orderD: + "\ isCapRevocable new old; (capBadge old, capBadge new) \ capBadge_ordering P; + capMasterCap old = capMasterCap new \ + \ (isUntypedCap new \ (\x. capBadge old = Some 0 \ capBadge new = Some x \ x \ 0))" + by (clarsimp simp: Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def isCap_simps + AARCH64_H.arch_capability.simps + split: if_split_asm capability.split_asm AARCH64_H.arch_capability.split_asm) + +lemma valid_badges_def2: + "valid_badges m = + (\p p' cap node cap' node'. + m p = Some (CTE cap node) \ + m p' = Some (CTE cap' node') \ + m \ p \ p' \ + capMasterCap cap = capMasterCap cap' \ + capBadge cap \ None \ + capBadge cap \ capBadge cap' \ + capBadge cap' \ Some 0 \ + mdbFirstBadged node')" + apply (simp add: valid_badges_def) + apply (intro arg_cong[where f=All] ext imp_cong [OF refl]) + apply (case_tac cap, simp_all add: isCap_simps cong: weak_imp_cong) + apply (fastforce simp: sameRegionAs_def3 isCap_simps)+ + done + +lemma sameRegionAs_update_untyped: + "RetypeDecls_H.sameRegionAs (capability.UntypedCap d a b c) = + RetypeDecls_H.sameRegionAs (capability.UntypedCap d a b c')" + apply (rule ext) + apply (case_tac x) + apply (clarsimp simp:sameRegionAs_def isCap_simps)+ + done + +lemma sameRegionAs_update_untyped': + "RetypeDecls_H.sameRegionAs cap (capability.UntypedCap d a b f) = + RetypeDecls_H.sameRegionAs cap (capability.UntypedCap d a b f')" + apply (case_tac cap) + apply (clarsimp simp:sameRegionAs_def isCap_simps)+ + done + +(*The newly inserted cap should never have children.*) +lemma (in mdb_insert_der) dest_no_parent_n: + "n \ dest \ p = False" + using src partial_is_derived' valid_badges ut_rev + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold n_dest new_dest_def n) + apply (cases "mdbNext src_node = dest") + apply (subgoal_tac "m \ src \ dest") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: src) + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp: isCap_simps isMDBParentOf_CTE is_derived'_def + badge_derived'_def freeIndex_update_def capMasterCap_def + split: capability.splits) + apply (simp add: ut_revocable'_def) + apply (drule spec[where x=src], simp add: isCap_simps) + apply (simp add: descendants_of'_def) + apply (drule spec[where x="mdbNext src_node"]) + apply (erule notE, rule direct_parent) + apply (simp add: mdb_next_unfold) + apply simp + apply (simp add: parentOf_def src isMDBParentOf_CTE isCap_simps + cong: sameRegionAs_update_untyped) + apply (clarsimp simp: isMDBParentOf_CTE is_derived'_def badge_derived'_def) + apply (drule(2) revokable_plus_orderD) + apply (erule sameRegionAsE, simp_all) + apply (simp add: valid_badges_def2) + apply (erule_tac x=src in allE) + apply (erule_tac x="mdbNext src_node" in allE) + apply (clarsimp simp: src mdb_next_unfold) + apply (case_tac "capBadge cap'", simp_all) + apply (clarsimp simp add: isCap_simps capMasterCap_def + simp del: not_ex + split: capability.splits) + apply (clarsimp simp: isCap_simps)+ + done + +locale mdb_insert_child = mdb_insert_der + + assumes child: + "isMDBParentOf + (CTE src_cap src_node) + (CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))" + +context mdb_insert_child +begin + +lemma new_child [simp]: + "isMDBParentOf new_src new_dest" + by (simp add: new_src_def new_dest_def) (rule child) + +lemma n_dest_child: + "n \ src \ dest" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def src dest n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = src then n \ p \ dest \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = src") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=dest]) + apply (rule n_dest_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_src_def src) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_src_def src) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_dest_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = dest", simp) + apply (cases "d = dest", simp) + apply (cases "c = src") + apply clarsimp + apply (erule subtree.trans_parent [where c'=dest]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_src_def src) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_dest_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: src new_src_def) + done +qed + +lemma n_to_dest [simp]: + "n \ p \ dest = (p = src)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = dest then p \ src \ m \ p \ src else m \ p \ p'" +proof - + from assms have [simp]: "p \ dest" by (clarsimp simp: dest_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=src") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + qed +qed + +lemma descendants: + "descendants_of' p n = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_dest_child split: if_split_asm) + done + +end + +locale mdb_insert_sib = mdb_insert_der + + assumes no_child: + "\isMDBParentOf + (CTE src_cap src_node) + (CTE c' (mdbFirstBadged_update (\a. isCapRevocable c' src_cap) + (mdbRevocable_update (\a. isCapRevocable c' src_cap) + (mdbPrev_update (\a. src) src_node))))" +begin +interpretation Arch . (*FIXME: arch_split*) + +(* If dest is inserted as sibling, src can not have had children. + If it had had children, then dest_node which is just a derived copy + of src_node would be a child as well. *) +lemma src_no_mdb_parent: + "isMDBParentOf (CTE src_cap src_node) cte = False" + using no_child partial_is_derived' + apply clarsimp + apply (case_tac cte) + apply (clarsimp simp: isMDBParentOf_CTE is_derived'_def badge_derived'_def) + apply (erule sameRegionAsE) + apply (clarsimp simp add: sameRegionAs_def3) + subgoal by (cases src_cap; auto simp: capMasterCap_def Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def + freeIndex_update_def isCap_simps + split: capability.split_asm arch_capability.split_asm) (* long *) + apply (clarsimp simp: isCap_simps sameRegionAs_def3 capMasterCap_def freeIndex_update_def + split:capability.splits arch_capability.splits) + apply (clarsimp simp: isCap_simps sameRegionAs_def3 freeIndex_update_def + capRange_def split:capability.splits + simp del: Int_atLeastAtMost atLeastAtMost_iff) + apply auto[1] + apply (clarsimp simp: isCap_simps sameRegionAs_def3)+ + done + +lemma src_no_parent: + "m \ src \ p = False" + by (clarsimp dest!: subtree_parent simp: src_no_mdb_parent parentOf_def src) + +lemma parent_preserved: + "isMDBParentOf cte (CTE src_cap src_node) \ isMDBParentOf cte new_dest" + using partial_is_derived' + apply (cases cte) + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp:isCap_simps isMDBParentOf_CTE freeIndex_update_def new_dest_def) + apply (clarsimp simp:is_derived'_def isCap_simps badge_derived'_def capMasterCap_def split:capability.splits) + apply (clarsimp simp:sameRegionAs_def2 capMasterCap_def isCap_simps split:capability.splits) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: new_dest_def) + apply (rename_tac cap node) + apply (clarsimp simp: is_derived'_def badge_derived'_def) + apply (rule conjI) + apply (simp add: sameRegionAs_def2) + apply (cases "isCapRevocable c' src_cap") + apply simp + apply (drule(2) revokable_plus_orderD) + apply (erule disjE) + apply (clarsimp simp: isCap_simps) + by ((fastforce elim: capBadge_ordering_trans simp: isCap_simps)+) + +lemma src_no_parent_n [simp]: + "n \ src \ p = False" + apply clarsimp + apply (erule subtree.induct) + apply (simp add: n_direct_eq) + apply (clarsimp simp: parentOf_def n src dest new_src_def + new_dest_def no_child) + apply simp + done + +lemma parent_n: + "n \ p \ p' \ if p' = dest then m \ p \ src else m \ p \ p'" + apply (cases "p=dest", simp add: dest_no_parent_n) + apply (cases "p=src", simp) + apply (erule subtree.induct) + apply simp + apply (rule conjI) + apply (clarsimp simp: n_direct_eq) + apply clarsimp + apply (rule direct_parent) + apply (simp add: n_direct_eq) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply simp + apply (rule conjI) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply (erule trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + +lemma parent_m: + "m \ p \ p' \ n \ p \ p' \ (p' = src \ n \ p \ dest)" + apply (cases "p=src", simp add: src_no_parent) + apply (erule subtree.induct) + apply (rule conjI) + apply (rule direct_parent) + apply (clarsimp simp: n_direct_eq) + apply assumption + apply (fastforce simp add: parentOf_def n src new_src_def) + apply clarsimp + apply (rule trans_parent [where c'=src]) + apply (rule direct_parent) + apply (simp add: n_direct_eq) + apply (rule notI, simp) + apply simp + apply (simp add: parentOf_def n src new_src_def) + apply (clarsimp simp: dest dest_cap) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def dest src n) + apply (rule conjI, clarsimp simp: dest dest_cap) + apply (clarsimp intro!: parent_preserved) + apply clarsimp + apply (case_tac "c'=src") + apply simp + apply (erule trans_parent [where c'=dest]) + apply (clarsimp simp: n_direct_eq) + apply clarsimp + apply (fastforce simp: parentOf_def dest src n) + apply clarsimp + apply (rule conjI) + apply (erule trans_parent) + apply (simp add: n_direct_eq) + apply clarsimp + apply assumption + apply (fastforce simp: parentOf_def dest src n new_src_def) + apply clarsimp + apply (rule trans_parent [where c'=src]) + apply (erule trans_parent) + apply (simp add: n_direct_eq) + apply clarsimp + apply simp + apply (fastforce simp: parentOf_def dest src n new_src_def) + apply (simp add: n_direct_eq) + apply simp + apply (fastforce simp: parentOf_def dest src n new_src_def + intro!: parent_preserved) + done + +lemma parent_n_eq: + "n \ p \ p' = (if p' = dest then m \ p \ src else m \ p \ p')" + apply (rule iffI) + apply (erule parent_n) + apply (simp split: if_split_asm) + apply (drule parent_m, simp) + apply (drule parent_m, clarsimp) + done + +lemma descendants: + "descendants_of' p n = + descendants_of' p m \ (if src \ descendants_of' p m then {dest} else {})" + by (rule set_eqI) (simp add: descendants_of'_def parent_n_eq) + +end +context begin interpretation Arch . (*FIXME: arch_split*) +lemma mdb_None: + assumes F: "\p'. cte_map p \ descendants_of' p' m' \ False" + assumes R: "cdt_relation (swp cte_at s) (cdt s) m'" + assumes "valid_mdb s" + shows "cdt s p = None" + apply (simp add: descendants_of_None [symmetric]) + apply clarsimp + apply (frule descendants_of_cte_at2, rule assms) + apply (insert R) + apply (simp add: cdt_relation_def) + apply (erule allE, erule allE, erule (1) impE) + apply (rule_tac p'="cte_map (a,b)" in F) + apply (drule sym) + apply simp + done + +declare if_split [split del] + +lemma derived_sameRegionAs: + "\ is_derived' m p cap' cap; s \' cap' \ + \ sameRegionAs cap cap'" + unfolding is_derived'_def badge_derived'_def + apply (simp add: sameRegionAs_def3) + apply (cases "isUntypedCap cap \ isArchFrameCap cap") + apply (rule disjI2, rule disjI1) + apply (erule disjE) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def + is_aligned_no_overflow capRange_def + split: capability.splits arch_capability.splits option.splits) + apply (clarsimp simp: isCap_simps valid_cap'_def capAligned_def + is_aligned_no_overflow capRange_def + split: capability.splits arch_capability.splits option.splits) + apply (clarsimp simp: isCap_simps valid_cap'_def + is_aligned_no_overflow capRange_def vs_cap_ref_arch'_def + split: capability.splits arch_capability.splits option.splits) + done + +lemma no_fail_updateMDB [wp]: + "no_fail (\s. p \ 0 \ cte_at' p s) (updateMDB p f)" + apply (simp add: updateMDB_def) + apply (rule no_fail_pre, wp) + apply (simp split: if_split) + done + +lemma updateMDB_cte_at' [wp]: + "\cte_at' p\ + updateMDB x y + \\_. cte_at' p\" + by (wpsimp wp: updateMDB_weak_cte_wp_at) + +lemma updateCap_cte_at' [wp]: + "\cte_at' p\ updateCap c p' \\_. cte_at' p\" + unfolding updateCap_def by wp + +lemma nullMDBNode_pointers[simp]: + "mdbPrev nullMDBNode = nullPointer" + "mdbNext nullMDBNode = nullPointer" + by (simp add: nullMDBNode_def)+ + +lemma maxFreeIndex_eq[simp]: "maxFreeIndex nat1 = max_free_index nat1" + by (clarsimp simp:maxFreeIndex_def max_free_index_def shiftL_nat) + +definition maskedAsFull :: "capability \ capability \ capability" +where "maskedAsFull srcCap newCap \ + if isUntypedCap srcCap \ isUntypedCap newCap \ + capPtr srcCap = capPtr newCap \ capBlockSize srcCap = capBlockSize newCap + then capFreeIndex_update (\_. maxFreeIndex (capBlockSize srcCap)) srcCap + else srcCap" + +lemma is_derived_maskedAsFull[simp]: + "is_derived' m slot c (maskedAsFull src_cap' a) = + is_derived' m slot c src_cap'" + apply (clarsimp simp: maskedAsFull_def isCap_simps split:if_splits) + apply (case_tac c) + apply (clarsimp simp:is_derived'_def isCap_simps badge_derived'_def)+ + done + + +lemma maskedAsFull_revokable: + "is_derived' x y c' src_cap' \ + isCapRevocable c' (maskedAsFull src_cap' a) = isCapRevocable c' src_cap'" + apply (case_tac src_cap') + apply (simp_all add:maskedAsFull_def isCap_simps) + apply (case_tac c') + apply (simp_all add:maskedAsFull_def is_derived'_def isCap_simps) + apply (simp_all add:badge_derived'_def capMasterCap_simps split:arch_capability.splits) + apply (clarsimp split:if_splits simp:Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def isCap_simps)+ + done + +lemma parentOf_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "(m \ p parentOf x) \ (m' \ p parentOf x)" + apply (clarsimp simp:parentOf_def) + apply (frule iffD1[OF dom,OF domI]) + apply (frule iffD1[OF dom[where x = p],OF domI]) + apply clarsimp + apply (frule_tac x1 = p in conjunct1[OF sameRegion]) + apply assumption + apply (frule_tac x1 = x in conjunct2[OF sameRegion]) + apply assumption + apply (drule_tac x = "cteCap y" in fun_cong) + apply (drule_tac x = "cteCap cte'" in fun_cong) + apply (drule_tac x = p in misc) + apply assumption + apply (drule_tac x = x in misc) + apply assumption + apply ((simp only: isMDBParentOf_def split_def split: cte.splits if_split_asm); clarsimp) + by (clarsimp simp: sameRegionAs_def isCap_simps Let_def split: if_split_asm)+ (* long *) + +lemma parentOf_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "(m \ p parentOf x) = (m' \ p parentOf x)" + apply (rule iffI) + apply (rule parentOf_preserve_oneway[OF dom sameRegion misc node]) + apply (assumption)+ + apply (rule parentOf_preserve_oneway) + apply (auto simp:dom sameRegion misc node) +done + +lemma updateUntypedCap_descendants_of: + "\m src = Some cte; isUntypedCap (cteCap cte)\ + \ descendants_of' slot (m(src \ cteCap_update + (\_. (capFreeIndex_update (\_. idx) (cteCap cte))) cte)) = + descendants_of' slot m" + apply (rule set_eqI) + + apply (clarsimp simp:descendants_of'_def subtree_def) + apply (rule_tac x = x in fun_cong) + apply (rule_tac f = lfp in arg_cong) + apply (rule ext)+ + apply (cut_tac x = xa in parentOf_preserve + [where m = "m(src \ cteCap_update (\_. capFreeIndex_update (\_. idx) (cteCap cte)) cte)" + and m' = m and p = slot]) + apply (clarsimp,rule iffI,fastforce+) + apply (clarsimp simp:isCap_simps split:if_splits) + apply (clarsimp simp:sameRegionAs_def isCap_simps split:if_splits) + apply (rule ext) + apply (clarsimp simp:sameRegionAs_def isCap_simps split:if_splits)+ + apply (simp add:mdb_next_def split:if_splits) + apply (simp del:fun_upd_apply) + apply (subgoal_tac "\p. m(src \ cteCap_update (\_. capFreeIndex_update (\_. idx) (cteCap cte)) cte) \ p \ xa + = m \ p \ xa") + apply simp + apply (clarsimp simp:mdb_next_rel_def mdb_next_def split:if_splits) + done + +lemma setCTE_UntypedCap_corres: + "\cap_relation cap (cteCap cte); is_untyped_cap cap; idx' = idx\ + \ corres dc (cte_wp_at ((=) cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_wp_at' ((=) cte) (cte_map src) and + pspace_distinct' and pspace_aligned') + (set_cap (free_index_update (\_. idx) cap) src) + (setCTE (cte_map src) (cteCap_update + (\cap. (capFreeIndex_update (\_. idx') (cteCap cte))) cte))" + apply (case_tac cte) + apply (clarsimp simp:is_cap_simps) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply clarsimp + apply (clarsimp simp add: state_relation_def split_def) + apply (drule (1) pspace_relationsD) + apply (frule_tac c = "cap.UntypedCap dev r bits idx" + in set_cap_not_quite_corres_prequel) + apply assumption+ + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption+ + apply simp+ + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: pspace_relations_def) + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (frule mdb_set_cap, frule exst_set_cap) + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: cdt_list_relation_def cte_wp_at_ctes_of split: if_split_asm) + apply (rule conjI) + prefer 2 + apply (frule setCTE_pspace_only) + apply clarsimp + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (frule set_cap_caps_of_state_monad) + apply (drule is_original_cap_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (clarsimp split: if_split_asm) + apply (frule cte_map_inj_eq) + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (simp add: null_filter_def split: if_split_asm) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule caps_of_state_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + apply (simp add: null_filter_def split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps isCap_simps cte_wp_at_ctes_of) + apply (simp add: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (clarsimp) + apply (clarsimp simp: cdt_relation_def) + apply (frule set_cap_caps_of_state_monad) + apply (frule mdb_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (frule cte_wp_at_norm) + apply (clarsimp simp:cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (drule_tac slot = "cte_map (aa,bb)" in updateUntypedCap_descendants_of) + apply (clarsimp simp:isCap_simps) + apply (drule_tac x = aa in spec) + apply (drule_tac x = bb in spec) + apply (erule impE) + apply (clarsimp simp: cte_wp_at_caps_of_state split:if_splits) + apply auto + done + +lemma getCTE_get: + "\cte_wp_at' P src\ getCTE src \\rv s. P rv\" + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma setUntypedCapAsFull_corres: + "\cap_relation c c'; cap_relation src_cap (cteCap srcCTE)\ + \ corres dc (cte_wp_at ((=) src_cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_wp_at' ((=) srcCTE) (cte_map src) and + pspace_aligned' and pspace_distinct') + (set_untyped_cap_as_full src_cap c src) + (setUntypedCapAsFull (cteCap srcCTE) c' (cte_map src))" + apply (clarsimp simp:set_untyped_cap_as_full_def setUntypedCapAsFull_def + split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp del:capFreeIndex_update.simps simp:updateCap_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F="cte = srcCTE" in corres_gen_asm2) + apply (simp) + apply (rule setCTE_UntypedCap_corres) + apply simp+ + apply (clarsimp simp:free_index_update_def isCap_simps is_cap_simps) + apply (subst identity_eq) + apply (wp getCTE_sp getCTE_get)+ + apply (clarsimp simp:cte_wp_at_ctes_of)+ + apply (clarsimp simp:is_cap_simps isCap_simps)+ + apply (case_tac c,simp_all) + apply (case_tac src_cap,simp_all) + done + +(* FIXME: SELFOUR-421 move *) +lemma isUntypedCap_simps[simp]: + "isUntypedCap (capability.UntypedCap uu uv uw ux) = True" + "isUntypedCap (capability.NullCap) = False" + "isUntypedCap (capability.EndpointCap v va vb vc vd ve) = False" + "isUntypedCap (capability.NotificationCap v va vb vc) = False" + "isUntypedCap (capability.ReplyCap v1 v2 v3) = False" + "isUntypedCap (capability.CNodeCap x1 x2 x3 x4) = False" + "isUntypedCap (capability.ThreadCap v) = False" + "isUntypedCap (capability.DomainCap) = False" + "isUntypedCap (capability.IRQControlCap) = False" + "isUntypedCap (capability.IRQHandlerCap y1) = False" + "isUntypedCap (capability.Zombie v va1 vb1) = False" + "isUntypedCap (capability.ArchObjectCap z) = False" + by (simp_all add: isUntypedCap_def split: capability.splits) + +lemma cap_relation_masked_as_full: + "\cap_relation src_cap src_cap';cap_relation c c'\ \ + cap_relation (masked_as_full src_cap c) (maskedAsFull src_cap' c')" + apply (clarsimp simp: masked_as_full_def maskedAsFull_def + split: if_splits) + apply (case_tac src_cap; clarsimp) + by (case_tac c; clarsimp) + +lemma setUntypedCapAsFull_pspace_distinct[wp]: + "\pspace_distinct' and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot \\r. pspace_distinct'\" + apply (clarsimp simp: setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:valid_def) + apply (drule updateCap_stuff) + apply simp + apply (wp|clarsimp)+ +done + +lemma setUntypedCapAsFull_pspace_aligned[wp]: + "\pspace_aligned' and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot + \\r. pspace_aligned'\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:valid_def) + apply (drule updateCap_stuff) + apply simp + apply (wp|clarsimp)+ +done + +(* wp rules about setFreeIndex and setUntypedCapAsFull *) +lemma setUntypedCapAsFull_ctes_of: + "\\s. src \ dest \ P (ctes_of s dest) \ + src = dest \ P (Some (CTE (maskedAsFull (cteCap srcCTE) cap) + (cteMDBNode srcCTE))) \ + cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ctes_of s dest)\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (simp add:updateCap_def) + apply (wp getCTE_wp) + apply (clarsimp split:if_splits simp:cte_wp_at_ctes_of if_distrib) + apply (case_tac "src = dest") + apply (case_tac srcCTE) + apply (clarsimp simp:maskedAsFull_def) + apply clarsimp + apply wp + apply (case_tac srcCTE) + apply (fastforce simp:maskedAsFull_def cte_wp_at_ctes_of split: if_splits) + done + +lemma setUntypedCapAsFull_ctes_of_no_0: + "\\s. no_0 ((ctes_of s)(a:=b)) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. no_0 ((ctes_of s)(a:=b)) \" + apply (clarsimp simp:no_0_def split:if_splits) + apply (wp hoare_vcg_imp_lift setUntypedCapAsFull_ctes_of[where dest = 0]) + apply (auto simp:cte_wp_at_ctes_of) + done + +lemma valid_dlist_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte'" + shows "valid_dlist m \ valid_dlist m'" + apply (clarsimp simp:valid_dlist_def Let_def) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (elim allE impE) + apply assumption + apply (intro conjI impI) + apply clarsimp + apply (frule(1) misc) + apply (clarsimp) + apply (frule_tac b = cte' in domI[where m = m]) + apply (drule iffD1[OF dom]) + apply clarsimp + apply (drule(1) misc)+ + apply simp + apply clarsimp + apply (frule(1) misc) + apply (clarsimp) + apply (frule_tac b = cte' in domI[where m = m]) + apply (drule iffD1[OF dom]) + apply clarsimp + apply (drule(1) misc)+ + apply simp +done + +lemma valid_dlist_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte'" + shows "valid_dlist m = valid_dlist m'" + apply (rule iffI) + apply (rule valid_dlist_preserve_oneway[OF dom misc]) + apply simp+ + apply (rule valid_dlist_preserve_oneway) + apply (simp add:dom misc)+ +done + +lemma ut_revocable_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')" + shows "ut_revocable' m \ ut_revocable' m'" + apply (clarsimp simp:ut_revocable'_def Let_def) + apply (drule_tac x = p in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (elim allE impE) + apply (frule(1) misc) + apply (clarsimp) + apply (drule(1) misc)+ + apply simp +done + +lemma ut_revocable_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte')" + shows "ut_revocable' m = ut_revocable' m'" + apply (rule iffI) + apply (rule ut_revocable_preserve_oneway[OF dom misc]) + apply (assumption)+ + apply (rule ut_revocable_preserve_oneway[OF dom[symmetric]]) + apply (simp add:misc)+ +done + +lemma class_links_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ capClass (cteCap cte) = capClass (cteCap cte')" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "class_links m \ class_links m'" + apply (clarsimp simp:class_links_def Let_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (frule_tac b = cte' in domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (elim allE impE) + apply simp + apply (frule(1) misc) + apply (clarsimp simp:mdb_next_rel_def node) + apply (drule(1) misc)+ + apply simp +done + +lemma class_links_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ capClass (cteCap cte) = capClass (cteCap cte')" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows "class_links m = class_links m'" + apply (rule iffI) + apply (rule class_links_preserve_oneway[OF dom misc]) + apply (simp add:node)+ + apply (rule class_links_preserve_oneway) + apply (simp add:dom misc node)+ +done + +lemma distinct_zombies_preserve_oneway: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isZombie (cteCap cte) = isZombie (cteCap cte') \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') \ + isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') \ + capBits (cteCap cte) = capBits (cteCap cte') \ + capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') \ + capClass (cteCap cte) = capClass (cteCap cte')" + assumes node: "\p. mdb_next m p = mdb_next m' p" + shows "distinct_zombies m \ distinct_zombies m'" + apply (clarsimp simp:distinct_zombies_def distinct_zombie_caps_def Let_def) + apply (drule_tac x = ptr in spec) + apply (drule_tac x = ptr' in spec) + apply (frule domI[where m = m'],drule iffD2[OF dom],erule domE) + apply (case_tac r) + apply clarsimp + apply (frule_tac a=ptr' in domI[where m = m'],drule iffD2[OF dom],erule domE) + apply clarsimp + apply (drule(1) misc)+ + apply clarsimp + done + +lemma distinct_zombies_preserve: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isZombie (cteCap cte) = isZombie (cteCap cte') \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') \ + isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') \ + capBits (cteCap cte) = capBits (cteCap cte') \ + capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') \ + capClass (cteCap cte) = capClass (cteCap cte')" + assumes node: "\p. mdb_next m p = mdb_next m' p" + shows "distinct_zombies m = distinct_zombies m'" + apply (rule iffI) + apply (rule distinct_zombies_preserve_oneway[OF dom misc node]) + apply (assumption)+ + apply (rule distinct_zombies_preserve_oneway) + apply (simp add:dom misc node)+ + done + +lemma caps_contained'_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + shows "caps_contained' m \ caps_contained' m'" + apply (clarsimp simp:caps_contained'_def) + apply (frule iffD2[OF dom,OF domI]) + apply (frule_tac x1 = p' in iffD2[OF dom,OF domI]) + apply clarsimp + apply (case_tac y,case_tac ya) + apply (drule_tac x= p in spec) + apply (drule_tac x= p' in spec) + apply (frule_tac x = p in misc) + apply assumption + apply (frule_tac x = p' in misc) + apply assumption + apply (elim allE impE) + apply fastforce+ + done + +lemma caps_contained'_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + shows "caps_contained' m = caps_contained' m'" + apply (rule iffI) + apply (rule caps_contained'_preserve_oneway[OF dom misc]) + apply (assumption)+ + apply (rule caps_contained'_preserve_oneway) + apply (auto simp:dom misc) + done + +lemma is_chunk_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows " \m x =Some (CTE a b);m' x = Some (CTE c d)\ \ is_chunk m a p p' \ is_chunk m' c p p'" + apply (clarsimp simp:is_chunk_def) + apply (drule_tac x= p'' in spec) + apply (subgoal_tac "m \ p \\<^sup>+ p'' = m' \ p \\<^sup>+ p''") + apply (subgoal_tac "m \ p'' \\<^sup>* p' = m' \ p'' \\<^sup>* p'") + apply (frule iffD1[OF dom,OF domI]) + apply (clarsimp) + apply (frule_tac x1 = p'' in iffD1[OF dom,OF domI]) + apply clarsimp + apply (frule_tac x = p'' in sameRegion,assumption) + apply clarsimp + apply (frule_tac x = x in sameRegion,assumption) + apply clarsimp + apply (case_tac y) + apply (drule_tac fun_cong)+ + apply fastforce + apply simp + apply (erule iffD1[OF connect_eqv_singleE',rotated -1]) + apply (clarsimp simp: mdb_next_rel_def node) + apply (rule connect_eqv_singleE) + apply (clarsimp simp: mdb_next_rel_def node) + done + +lemma is_chunk_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows " \m x =Some (CTE a b);m' x = Some (CTE c d)\ \ is_chunk m a p p' = is_chunk m' c p p'" + apply (rule iffI) + apply (rule is_chunk_preserve_oneway[OF dom sameRegion node],assumption+) + apply (rule is_chunk_preserve_oneway) + apply (auto simp:dom sameRegion node) + done + +lemma mdb_chunked_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows + "mdb_chunked m \ mdb_chunked m'" + apply (clarsimp simp:mdb_chunked_def) + apply (drule_tac x=p in spec) + apply (drule_tac x=p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac ya) + apply (case_tac y) + apply (frule_tac x = p in sameRegion,assumption) + apply (frule_tac x = p' in sameRegion,assumption) + apply clarsimp + apply (erule impE) + apply (drule fun_cong)+ + apply fastforce + apply (subgoal_tac "m \ p \\<^sup>+ p' = m' \ p \\<^sup>+ p'") + apply (subgoal_tac "m \ p' \\<^sup>+ p = m' \ p' \\<^sup>+ p") + apply (frule_tac m = m and + x = p and c = cap and p = p and p'=p' in is_chunk_preserve[rotated -1]) + apply (simp add:dom) + apply (rule sameRegion) + apply simp+ + apply (rule node) + apply assumption + apply (frule_tac x = p' and c = cap' and p = p' and p'=p in is_chunk_preserve[rotated -1]) + apply (rule dom) + apply (rule sameRegion) + apply assumption+ + apply (rule node) + apply assumption + apply clarsimp + apply (rule connect_eqv_singleE) + apply (clarsimp simp:mdb_next_rel_def node) + apply (rule connect_eqv_singleE) + apply (clarsimp simp:mdb_next_rel_def node) + done + +lemma mdb_chunked_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes node:"\p. mdb_next m p = mdb_next m' p" + shows + "mdb_chunked m = mdb_chunked m'" + apply (rule iffI) + apply (erule mdb_chunked_preserve_oneway[rotated -1]) + apply (simp add:dom sameRegion node)+ + apply (erule mdb_chunked_preserve_oneway[rotated -1]) + apply (simp add:dom[symmetric]) + apply (frule sameRegion) + apply assumption + apply simp + apply (simp add:node) + done + +lemma valid_badges_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows "valid_badges m \ valid_badges m'" + apply (clarsimp simp:valid_badges_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac y,case_tac ya) + apply clarsimp + apply (erule impE) + apply (simp add: mdb_next mdb_next_rel_def) + apply (erule impE) + apply (drule(1) sameRegion)+ + apply clarsimp + apply (drule fun_cong)+ + apply fastforce + apply (drule(1) misc)+ + apply (clarsimp simp:isCap_simps sameRegionAs_def split:if_splits) + done + +lemma valid_badges_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows "valid_badges m = valid_badges m'" + apply (rule iffI) + apply (rule valid_badges_preserve_oneway[OF dom misc sameRegion mdb_next]) + apply assumption+ + apply (rule valid_badges_preserve_oneway) + apply (simp add:dom misc sameRegion mdb_next)+ + done + +lemma mdb_untyped'_preserve_oneway: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows + "untyped_mdb' m \ untyped_mdb' m'" + apply (clarsimp simp:untyped_mdb'_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (case_tac y,case_tac ya) + apply (frule misc) + apply fastforce + apply clarsimp + apply (frule_tac x = p' in misc) + apply fastforce + apply (frule_tac x = p in misc) + apply assumption + apply clarsimp + apply (clarsimp simp: descendants_of'_def Invariants_H.subtree_def) + apply (erule_tac f1 = "\x. lfp x y" for y in iffD1[OF arg_cong,rotated]) + apply (rule ext)+ + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply (thin_tac "P" for P)+ + apply (subgoal_tac "(m \ p parentOf x) = (m' \ p parentOf x)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + + +lemma untyped_mdb'_preserve: + assumes dom:"\x. (x \ dom m) = (x \ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ capRange (cteCap cte) = capRange (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ cteMDBNode cte = cteMDBNode cte' + \ sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" + shows + "untyped_mdb' m = untyped_mdb' m'" + apply (rule iffI) + apply (erule mdb_untyped'_preserve_oneway[rotated -1]) + apply (simp add:dom misc sameRegion range mdb_next)+ + apply (erule mdb_untyped'_preserve_oneway[rotated -1]) + apply (simp add:dom[symmetric]) + apply (frule(1) misc,simp) + apply (frule(1) sameRegion,simp) + apply (simp add:mdb_next[symmetric])+ +done + +lemma irq_control_preserve_oneway: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') \ + cteMDBNode cte = cteMDBNode cte'" + shows "irq_control m \ irq_control m'" + apply (clarsimp simp:irq_control_def) + apply (frule iffD2[OF dom,OF domI]) + apply clarsimp + apply (frule(1) misc) + apply (clarsimp simp:isCap_simps) + apply (case_tac y) + apply (elim allE impE) + apply fastforce + apply clarsimp + apply (drule_tac x = p' in spec) + apply (erule impE) + apply (frule_tac x1 = p' in iffD2[OF dom,OF domI]) + apply clarsimp + apply (drule(1) misc)+ + apply (case_tac y) + apply (simp add:isCap_simps)+ + done + +lemma irq_control_preserve: + assumes dom: "\x. (x \ dom m) = (x \ dom m')" + assumes misc: + "\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') \ + cteMDBNode cte = cteMDBNode cte'" + shows "irq_control m = irq_control m'" + apply (rule iffI[OF irq_control_preserve_oneway[OF dom misc]]) + apply (assumption)+ + apply (rule irq_control_preserve_oneway) + apply (simp add:dom misc)+ + done + +end + +locale mdb_inv_preserve = + fixes m m' + assumes dom: "\x. (x\ dom m) = (x\ dom m')" + assumes misc:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + isUntypedCap (cteCap cte) = isUntypedCap (cteCap cte') + \ isNullCap (cteCap cte) = isNullCap (cteCap cte') + \ isReplyCap (cteCap cte) = isReplyCap (cteCap cte') + \ (isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte) = capReplyMaster (cteCap cte')) + \ isNotificationCap (cteCap cte) = isNotificationCap (cteCap cte') + \ (isNotificationCap (cteCap cte) \ (capNtfnBadge (cteCap cte) = capNtfnBadge (cteCap cte'))) + \ (isEndpointCap (cteCap cte) = isEndpointCap (cteCap cte')) + \ (isEndpointCap (cteCap cte) \ (capEPBadge (cteCap cte) = capEPBadge (cteCap cte'))) + \ untypedRange (cteCap cte) = untypedRange (cteCap cte') + \ capClass (cteCap cte) = capClass (cteCap cte') + \ isZombie (cteCap cte) = isZombie (cteCap cte') + \ isArchFrameCap (cteCap cte) = isArchFrameCap (cteCap cte') + \ capBits (cteCap cte) = capBits (cteCap cte') + \ RetypeDecls_H.capUntypedPtr (cteCap cte) = RetypeDecls_H.capUntypedPtr (cteCap cte') + \ capRange (cteCap cte) = capRange (cteCap cte') + \ isIRQControlCap (cteCap cte) = isIRQControlCap (cteCap cte') + \ cteMDBNode cte = cteMDBNode cte'" + assumes sameRegion:"\x cte cte'. \m x =Some cte;m' x = Some cte'\ \ + sameRegionAs (cteCap cte) = sameRegionAs (cteCap cte') + \ (\x. sameRegionAs x (cteCap cte)) = (\x. sameRegionAs x (cteCap cte'))" + assumes mdb_next:"\p. mdb_next m p = mdb_next m' p" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma preserve_stuff: + "valid_dlist m = valid_dlist m' + \ ut_revocable' m = ut_revocable' m' + \ class_links m = class_links m' + \ distinct_zombies m = distinct_zombies m' + \ caps_contained' m = caps_contained' m' + \ mdb_chunked m = mdb_chunked m' + \ valid_badges m = valid_badges m' + \ untyped_mdb' m = untyped_mdb' m' + \ irq_control m = irq_control m'" + apply (intro conjI) + apply (rule valid_dlist_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule ut_revocable_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule class_links_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule distinct_zombies_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule caps_contained'_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule mdb_chunked_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule valid_badges_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule untyped_mdb'_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + apply (rule irq_control_preserve) + apply (simp add:mdb_inv_preserve_def dom misc sameRegion mdb_next)+ + done + +lemma untyped_inc': + assumes subeq: "\x cte cte'. \m x =Some cte;m' x = Some cte';isUntypedCap (cteCap cte)\ \ + usableUntypedRange (cteCap cte') \ usableUntypedRange (cteCap cte)" + shows "untyped_inc' m \ untyped_inc' m'" + apply (clarsimp simp:untyped_inc'_def) + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply clarsimp + apply (rename_tac cte cte') + apply (case_tac cte) + apply (rename_tac cap node) + apply (case_tac cte') + apply (drule_tac x = cap in spec) + apply clarsimp + apply (frule_tac x = p' in misc) + apply assumption + apply (frule_tac x = p in misc) + apply assumption + apply clarsimp + apply (drule(1) subeq,simp)+ + apply (subgoal_tac "\p p'. (p' \descendants_of' p m) = (p' \ descendants_of' p m')") + apply clarsimp + apply (intro conjI impI) + apply clarsimp + apply (drule(1) disjoint_subset2[rotated],clarsimp+)+ + apply (erule disjE) + apply clarsimp+ + apply (thin_tac "P" for P)+ + apply (clarsimp simp: descendants_of'_def Invariants_H.subtree_def) + apply (rule_tac f = "\x. lfp x c" for c in arg_cong) + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply (rule ext)+ + apply clarsimp + apply (subgoal_tac "(m \ pa parentOf x) = (m' \ pa parentOf x)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + +lemma descendants_of: + "descendants_of' p m = descendants_of' p m'" + apply (rule set_eqI) + apply (clarsimp simp:descendants_of'_def Invariants_H.subtree_def) + apply (rule_tac f = "\x. lfp x c" for c in arg_cong) + apply (rule ext)+ + apply (subgoal_tac "\p p'. (m \ p \ p') = (m' \ p \ p')") + apply clarsimp + apply (subgoal_tac "(m \ p parentOf xa) = (m' \ p parentOf xa)") + apply fastforce + apply (rule parentOf_preserve[OF dom]) + apply (simp add:misc sameRegion mdb_next mdb_next_rel_def)+ + done + +lemma by_products: + "reply_masters_rvk_fb m = reply_masters_rvk_fb m' + \ no_0 m = no_0 m' \ mdb_chain_0 m = mdb_chain_0 m' + \ valid_nullcaps m = valid_nullcaps m'" +apply (intro conjI) + apply (simp add:ran_dom reply_masters_rvk_fb_def mdb_inv_preserve_def dom misc sameRegion mdb_next) + apply (rule iffI) + apply clarsimp + apply (drule_tac x = y in bspec) + apply (rule iffD2[OF dom]) + apply clarsimp + apply (frule iffD2[OF dom,OF domI],rotate_tac) + apply (clarsimp simp:misc)+ + apply (drule_tac x = y in bspec) + apply (rule iffD1[OF dom]) + apply clarsimp + apply (frule iffD1[OF dom,OF domI],rotate_tac) + apply (clarsimp simp:misc)+ + apply (clarsimp simp:no_0_def) + apply (rule ccontr) + apply (simp add:dom_in) + apply (subst (asm) dom[symmetric]) + apply fastforce + apply (rule iffI) + apply (clarsimp simp:mdb_chain_0_def) + apply (drule_tac x =x in bspec) + apply (rule iffD2[OF dom],clarsimp) + apply (erule_tac iffD1[OF connect_eqv_singleE,rotated]) + apply (cut_tac p = p in mdb_next) + apply (clarsimp simp: mdb_next_rel_def) + apply (clarsimp simp:mdb_chain_0_def) + apply (drule_tac x =x in bspec) + apply (rule iffD1[OF dom],clarsimp) + apply (erule_tac iffD1[OF connect_eqv_singleE,rotated]) + apply (cut_tac p = p in mdb_next) + apply (clarsimp simp: mdb_next_rel_def) + apply (simp add:valid_nullcaps_def) + apply (rule forall_eq,clarsimp)+ + apply (rule iffI) + apply clarsimp + apply (frule iffD2[OF dom,OF domI]) + apply (clarsimp) + apply (case_tac y) + apply (drule misc) + apply assumption + apply (clarsimp simp:isCap_simps) + apply clarsimp + apply (frule iffD1[OF dom,OF domI]) + apply (clarsimp) + apply (case_tac y) + apply (drule misc) + apply assumption + apply (clarsimp simp:isCap_simps) +done + +end + +lemma mdb_inv_preserve_modify_map: + "mdb_inv_preserve m m' \ + mdb_inv_preserve (modify_map m slot (cteMDBNode_update f)) + (modify_map m' slot (cteMDBNode_update f))" + apply (clarsimp simp:mdb_inv_preserve_def split:if_splits) + apply (intro conjI) + apply (clarsimp simp:modify_map_dom) + apply (clarsimp simp:modify_map_def split:if_splits)+ + apply (clarsimp simp:option_map_def o_def split:option.splits if_splits) + apply (drule_tac x = p in spec)+ + apply (intro allI conjI impI) + apply (clarsimp simp:mdb_next_def split:if_splits)+ + done + +lemma mdb_inv_preserve_updateCap: + "\m slot = Some cte;isUntypedCap (cteCap cte)\ \ + mdb_inv_preserve m (modify_map m slot + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap cte))))" + apply (clarsimp simp:mdb_inv_preserve_def modify_map_dom isCap_simps modify_map_def split:if_splits) + apply (intro conjI impI allI) + apply fastforce + apply (simp add:sameRegionAs_update_untyped) + apply (rule ext,simp add:sameRegionAs_update_untyped') + apply (simp add:mdb_next_def split:if_splits) + done + +lemma mdb_inv_preserve_fun_upd: + "mdb_inv_preserve m m' \ mdb_inv_preserve (m(a \ b)) (m'(a \ b))" + by (clarsimp simp:mdb_inv_preserve_def mdb_next_def split:if_splits) + +lemma updateCap_ctes_of_wp: + "\\s. P (modify_map (ctes_of s) ptr (cteCap_update (\_. cap)))\ + updateCap ptr cap + \\r s. P (ctes_of s)\" + by (rule validI, simp add: updateCap_stuff) + +lemma updateCap_cte_wp_at': + "\\s. cte_at' ptr s \ Q (cte_wp_at' (\cte. if p = ptr then P' (CTE cap (cteMDBNode cte)) else P' cte) p s)\ + updateCap ptr cap \\rv s. Q (cte_wp_at' P' p s)\" + apply (simp add:updateCap_def cte_wp_at_ctes_of) + apply (wp setCTE_ctes_of_wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, auto split: if_split) + done + +lemma updateCapFreeIndex_mdb_chain_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chain_0 (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (mdb_chain_0 (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_valid_badges: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_badges (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_badges (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_caps_contained: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (caps_contained' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (caps_contained' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_valid_nullcaps: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_nullcaps (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_nullcaps (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_ut_revocable: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (ut_revocable'(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (ut_revocable' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_class_links: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (class_links (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (class_links (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_reply_masters_rvk_fb: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (reply_masters_rvk_fb (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (reply_masters_rvk_fb (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_distinct_zombies: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (distinct_zombies (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (distinct_zombies (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_mdb_chunked: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chunked (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (mdb_chunked (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_untyped_mdb': + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (untyped_mdb' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (untyped_mdb' (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma updateCapFreeIndex_irq_control: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (irq_control (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (irq_control (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma setUntypedCapAsFull_mdb_chunked: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chunked (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (mdb_chunked (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_mdb_chunked) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_untyped_mdb': + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (untyped_mdb' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (untyped_mdb' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_untyped_mdb') + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_mdb_chain_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (mdb_chain_0 (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (mdb_chain_0 (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_mdb_chain_0) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_irq_control: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (irq_control (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (irq_control (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_irq_control) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_valid_badges: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_badges (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_badges (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_valid_badges) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_caps_contained: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (caps_contained' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (caps_contained' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_caps_contained) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_valid_nullcaps: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_nullcaps (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_nullcaps (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_valid_nullcaps) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_ut_revocable: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (ut_revocable' (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ut_revocable' (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_ut_revocable) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_class_links: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (class_links(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (class_links (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_class_links) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_distinct_zombies: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (distinct_zombies (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (distinct_zombies (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_distinct_zombies) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma setUntypedCapAsFull_reply_masters_rvk_fb: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (reply_masters_rvk_fb (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ +setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (reply_masters_rvk_fb (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_reply_masters_rvk_fb) + apply (clarsimp simp:cte_wp_at_ctes_of preserve)+ + apply wp + apply clarsimp +done + +lemma modify_map_eq[simp]: + "\m slot = Some srcCTE; cap = cteCap srcCTE\ + \(modify_map m slot (cteCap_update (\_. cap))) = m" + apply (rule ext) + apply (case_tac srcCTE) + apply (auto simp:modify_map_def split:if_splits) + done + +lemma setUntypedCapAsFull_ctes: + "\\s. cte_wp_at' (\c. c = srcCTE) src s \ + P (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull (cteCap srcCTE) cap))) + \ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (ctes_of s)\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:isCap_simps max_free_index_def maskedAsFull_def) + apply wp + apply (clarsimp simp:isCap_simps cte_wp_at_ctes_of + max_free_index_def maskedAsFull_def split:if_splits) + done + +lemma setUntypedCapAsFull_valid_cap: + "\valid_cap' cap and cte_wp_at' ((=) srcCTE) slot\ + setUntypedCapAsFull (cteCap srcCTE) c slot + \\r. valid_cap' cap\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def) + apply (wp|clarsimp)+ +done + +lemma cteCap_update_simps: + "cteCap_update f srcCTE = CTE (f (cteCap srcCTE)) (cteMDBNode srcCTE)" + apply (case_tac srcCTE) + apply auto +done + +lemma setUntypedCapAsFull_cte_wp_at: + "\cte_wp_at' ((=) srcCTE) slot and + (\s. cte_wp_at' (\c. P c) dest s \ dest \ slot \ + dest = slot \ cte_wp_at' (\c. P (CTE (maskedAsFull (cteCap c) c') + (cteMDBNode c))) slot s) \ + setUntypedCapAsFull (cteCap srcCTE) c' slot + \\r s. cte_wp_at' (\c. P c) dest s\" + apply (clarsimp simp:setUntypedCapAsFull_def cte_wp_at_ctes_of split:if_splits) + apply (case_tac "dest = slot") + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp:maskedAsFull_def cte_wp_at_ctes_of cteCap_update_simps) + apply wp + apply (case_tac srcCTE) + apply (fastforce simp:maskedAsFull_def cte_wp_at_ctes_of) + apply (intro conjI impI) + apply (clarsimp simp:updateCap_def | wp setCTE_weak_cte_wp_at getCTE_wp)+ + apply (simp add:cte_wp_at'_def) + apply (clarsimp simp:updateCap_def | wp setCTE_weak_cte_wp_at getCTE_wp)+ + done + +lemma mdb_inv_preserve_sym:"mdb_inv_preserve a b \ mdb_inv_preserve b a" + by (simp add:mdb_inv_preserve_def) + + +lemma mdb_inv_preserve_refl[simp]: + "mdb_inv_preserve m m" + by (simp add:mdb_inv_preserve_def) + +lemma setUntypedCapAsFull_mdb[wp]: + "\\s. valid_mdb' s \ cte_wp_at' (\c. c = srcCTE) slot s \ + setUntypedCapAsFull (cteCap srcCTE) cap slot + \\rv s. valid_mdb' s\" + apply (clarsimp simp:valid_mdb'_def) + apply (wp setUntypedCapAsFull_ctes) + apply (subgoal_tac "mdb_inv_preserve (ctes_of s) (modify_map (ctes_of s) slot + (cteCap_update (\_. maskedAsFull (cteCap srcCTE) cap)))") + apply (frule mdb_inv_preserve.untyped_inc') + apply (clarsimp simp:modify_map_def max_free_index_def + maskedAsFull_def isCap_simps cte_wp_at_ctes_of + split:if_splits) + apply (clarsimp simp:valid_mdb_ctes_def mdb_inv_preserve.preserve_stuff)+ + apply (clarsimp simp:mdb_inv_preserve.by_products[OF mdb_inv_preserve_sym]) + apply (clarsimp simp:maskedAsFull_def cte_wp_at_ctes_of split:if_splits) + apply (erule(1) mdb_inv_preserve_updateCap) + done + +lemma (in mdb_insert_abs_sib) next_slot_no_parent': + "\valid_list_2 t m; finite_depth m; no_mloop m; m src = None\ + \ next_slot p t (m(dest := None)) = next_slot p t m" + by (insert next_slot_no_parent, simp add: n_def) + +lemma no_parent_next_not_child_None: + "\m p = None; finite_depth m\ \ next_not_child p t m = None" + apply(rule next_not_child_NoneI) + apply(fastforce simp: descendants_of_def cdt_parent_defs dest: tranclD2) + apply(simp add: next_sib_def) + apply(simp) + done + +lemma (in mdb_insert_abs_sib) next_slot': + "\valid_list_2 t m; finite_depth m; no_mloop m; m src = Some src_p; t src = []\ + \ next_slot p (t(src_p := list_insert_after (t src_p) src dest)) + (m(dest := Some src_p)) = + (if p = src then Some dest + else if p = dest then next_slot src t m else next_slot p t m)" + by (insert next_slot, simp add: n_def) + +lemmas valid_list_def = valid_list_2_def + +crunch valid_list[wp]: set_untyped_cap_as_full valid_list + +lemma updateMDB_the_lot': + assumes "(x, s'') \ fst (updateMDB p f s')" + assumes "pspace_relations (ekheap sa) (kheap s) (ksPSpace s')" + assumes "pspace_aligned' s'" "pspace_distinct' s'" "no_0 (ctes_of s')" "ekheap s = ekheap sa" + shows "ctes_of s'' = modify_map (ctes_of s') p (cteMDBNode_update f) \ + ksMachineState s'' = ksMachineState s' \ + ksWorkUnitsCompleted s'' = ksWorkUnitsCompleted s' \ + ksCurThread s'' = ksCurThread s' \ + ksIdleThread s'' = ksIdleThread s' \ + ksReadyQueues s'' = ksReadyQueues s' \ + ksSchedulerAction s'' = ksSchedulerAction s' \ + ksInterruptState s'' = ksInterruptState s' \ + ksArchState s'' = ksArchState s' \ + gsUserPages s'' = gsUserPages s' \ + gsCNodes s'' = gsCNodes s' \ + pspace_relations (ekheap s) (kheap s) (ksPSpace s'') \ + pspace_aligned' s'' \ pspace_distinct' s'' \ + no_0 (ctes_of s'') \ + ksDomScheduleIdx s'' = ksDomScheduleIdx s' \ + ksDomSchedule s'' = ksDomSchedule s' \ + ksCurDomain s'' = ksCurDomain s' \ + ksDomainTime s'' = ksDomainTime s'" + apply (rule updateMDB_the_lot) + using assms + apply (fastforce simp: pspace_relations_def)+ + done + +lemma cte_map_inj_eq': + "\(cte_map p = cte_map p'); + cte_at p s \ cte_at p' s \ + valid_objs s \ pspace_aligned s \ pspace_distinct s\ + \ p = p'" + apply (rule cte_map_inj_eq; fastforce) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma cteInsert_corres: + notes split_paired_All[simp del] split_paired_Ex[simp del] + trans_state_update'[symmetric,simp] + assumes "cap_relation c c'" "src' = cte_map src" "dest' = cte_map dest" + shows "corres dc + (valid_objs and pspace_distinct and pspace_aligned and + valid_mdb and valid_list and K (src\dest) and + cte_wp_at (\c. c=Structures_A.NullCap) dest and + (\s. cte_wp_at (is_derived (cdt s) src c) src s)) + (pspace_distinct' and pspace_aligned' and valid_mdb' and valid_cap' c' and + cte_wp_at' (\c. cteCap c=NullCap) dest') + (cap_insert c src dest) + (cteInsert c' src' dest')" + (is "corres _ (?P and (\s. cte_wp_at _ _ s)) (?P' and cte_wp_at' _ _) _ _") + using assms + unfolding cap_insert_def cteInsert_def + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap rv' = NullCap" in corres_gen_asm2) + apply simp + apply (rule_tac P="?P and cte_at dest and + (\s. cte_wp_at (is_derived (cdt s) src c) src s) and + cte_wp_at ((=) src_cap) src" and + Q="?P' and + cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) srcCTE) (cte_map src)" + in corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (case_tac rv') + apply (simp add: initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullPointer_def) + apply (rule corres_guard_imp) + apply (rule_tac R="\r. ?P and cte_at dest and + (\s. (is_derived (cdt s) src c) src_cap) and + cte_wp_at ((=) (masked_as_full src_cap c)) src" and + R'="\r. ?P' and cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) (CTE (maskedAsFull (cteCap srcCTE) c') (cteMDBNode srcCTE))) + (cte_map src)" + in corres_split[where r'=dc]) + apply (rule setUntypedCapAsFull_corres; simp) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre) + apply (wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule_tac valid_dlistEn[where p = "cte_map src"]) + apply (simp+)[3] + apply (clarsimp simp: corres_underlying_def state_relation_def + in_monad valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) pspace_relationsD) + apply (drule (18) set_cap_not_quite_corres) + apply (rule refl) + apply (elim conjE exE) + apply (rule bind_execI, assumption) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_Null_None) + apply (simp add: op_equal) + apply simp + apply (rule mdb_Null_descendants) + apply (simp add: op_equal) + apply simp + apply (subgoal_tac "no_mloop (cdt a)") + prefer 2 + apply (simp add: valid_mdb_def) + apply (clarsimp simp: exec_gets update_cdt_def bind_assoc set_cdt_def + exec_get exec_put set_original_def modify_def + simp del: fun_upd_apply + | (rule bind_execI[where f="cap_insert_ext x y z i p" for x y z i p], clarsimp simp: exec_gets exec_get put_def mdb_insert_abs.cap_insert_ext_det_def2 update_cdt_list_def set_cdt_list_def, rule refl))+ + apply (clarsimp simp: put_def state_relation_def) + apply (drule updateCap_stuff) + apply clarsimp + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (drule (3) updateMDB_the_lot', simp, simp, elim conjE) + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def + prev_update_modify_mdb_relation) + apply (subgoal_tac "cte_map dest \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "cte_map src \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (thin_tac "ksMachineState t = p" for p t)+ + apply (thin_tac "ksCurThread t = p" for p t)+ + apply (thin_tac "ksIdleThread t = p" for p t)+ + apply (thin_tac "ksReadyQueues t = p" for p t)+ + apply (thin_tac "ksSchedulerAction t = p" for p t)+ + apply (clarsimp simp: pspace_relations_def) + + apply (rule conjI) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (rule conjI) + defer + apply(rule conjI) + apply (thin_tac "ctes_of s = t" for s t)+ + apply (thin_tac "pspace_relation s t" for s t)+ + apply (thin_tac "machine_state t = s" for s t)+ + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ is_derived (cdt a) src c src_cap") + prefer 2 + apply (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_insert_axioms.intro) + apply (rule refl) + apply assumption + apply assumption + apply assumption + apply assumption + apply (erule (5) cte_map_inj) + apply (frule mdb_insert_der.intro) + apply (rule mdb_insert_der_axioms.intro) + apply (simp add: is_derived_eq) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (subgoal_tac "descendants_of dest (cdt a) = {}") + prefer 2 + apply (drule mdb_insert.dest_no_descendants) + apply (fastforce simp add: cdt_relation_def) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_None) + apply (erule(1) mdb_insert.descendants_not_dest) + apply assumption + apply assumption + apply assumption + apply(simp add: cdt_list_relation_def) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def) + apply(intro conjI impI allI) + apply(simp cong: option.case_cong) + apply(simp split: option.split) + apply(subgoal_tac "\aa. cdt a src = Some aa \ src \ aa") + prefer 2 + apply(fastforce simp: no_mloop_weaken) + apply(simp add: fun_upd_twist) + apply(rule allI) + apply(case_tac "next_child src (cdt_list (a))") + apply(frule next_child_NoneD) + apply(subst mdb_insert_abs.next_slot) + apply(simp_all)[5] + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + apply(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(rule impI) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(rule impI) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(frule(1) next_childD) + apply(simp add: mdb_insert_abs.next_slot) + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(rule impI) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(rule impI) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(subgoal_tac "mdb_insert_sib (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') + src_node (cte_map dest) capability.NullCap dest_node c'") + prefer 2 + apply(simp add: mdb_insert_sib_def) + apply(rule mdb_insert_sib_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric]; assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in + maskedAsFull_revokable[where a = c',symmetric]) + subgoal by(simp) + apply (simp add: revokable_relation_def) + apply (erule_tac x=src in allE)+ + apply simp + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_splits) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + apply(simp) + + apply(subgoal_tac "cdt_list (a) src = []") + prefer 2 + apply(rule ccontr) + apply(simp add: empty_list_empty_desc) + apply(simp add: no_children_empty_desc[symmetric]) + apply(erule exE) + apply(drule_tac p="cte_map caa" in mdb_insert_sib.src_no_parent) + apply(subgoal_tac "cte_map caa\descendants_of' (cte_map src) (ctes_of b)") + subgoal by(simp add: descendants_of'_def) + apply(simp add: cdt_relation_def) + apply(erule_tac x=src in allE) + apply(drule child_descendant)+ + apply(drule_tac x=caa and f=cte_map in imageI) + subgoal by(simp) + + apply(case_tac "cdt a src") + apply(simp) + apply(subst mdb_insert_abs_sib.next_slot_no_parent') + apply(simp add: mdb_insert_abs_sib_def) + apply(simp_all add: fun_upd_idem)[5] + + apply(case_tac "ca=src") + subgoal by(simp add: next_slot_def no_parent_next_not_child_None) + apply(case_tac "ca = dest") + subgoal by(simp add: next_slot_def no_parent_next_not_child_None + mdb_insert_abs.dest empty_list_empty_desc) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + + apply(simp add: fun_upd_idem) + apply(subst mdb_insert_abs_sib.next_slot') + subgoal by(simp add: mdb_insert_abs_sib_def) + apply(simp_all)[5] + apply(case_tac "ca=src") + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x=src in allE)+ + subgoal by(fastforce) + apply(simp) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule_tac p="cte_map src" in valid_mdbD1') + subgoal by(simp) + subgoal by(simp add: valid_mdb'_def valid_mdb_ctes_def) + subgoal by(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(drule cte_map_inj_eq') + apply(simp_all)[2] + apply(erule_tac x="(aa, bb)" in allE)+ + subgoal by(fastforce) + apply (thin_tac "ctes_of t = t'" for t t')+ + apply (clarsimp simp: modify_map_apply) + apply (clarsimp simp: revokable_relation_def split: if_split) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbRevocable node = isCapRevocable c' (cteCap srcCTE)") + prefer 2 + apply (case_tac rv') + subgoal by (clarsimp simp add: const_def modify_map_def split: if_split_asm) + apply simp + apply (rule is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (drule(3) is_derived_eq[THEN iffD1,rotated -1]) + subgoal by (simp add: cte_wp_at_def) + apply assumption + apply assumption + subgoal by (clarsimp simp: cap_master_cap_simps cte_wp_at_def is_derived_def is_cap_simps + split:if_splits dest!:cap_master_cap_eqDs) + apply clarsimp + apply (case_tac srcCTE) + apply (case_tac rv') + apply clarsimp + apply (subgoal_tac "\cap' node'. ctes_of b (cte_map (aa,bb)) = Some (CTE cap' node')") + prefer 2 + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (case_tac z) + subgoal by clarsimp + apply clarsimp + apply (drule set_cap_caps_of_state_monad)+ + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_splits) + + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "mdbRevocable node = mdbRevocable node'") + subgoal by clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply (erule (5) cte_map_inj) + apply (wp set_untyped_cap_full_valid_objs set_untyped_cap_as_full_valid_mdb + set_untyped_cap_as_full_cte_wp_at setUntypedCapAsFull_valid_cap + setUntypedCapAsFull_cte_wp_at | clarsimp simp: cte_wp_at_caps_of_state| wps)+ + apply (case_tac rv',clarsimp simp:cte_wp_at_ctes_of maskedAsFull_def) + apply (wp getCTE_wp' get_cap_wp)+ + apply clarsimp + subgoal by (fastforce elim: cte_wp_at_weakenE) + apply (clarsimp simp: cte_wp_at'_def) + apply (thin_tac "ctes_of s = t" for s t)+ + apply (thin_tac "pspace_relation s t" for s t)+ + apply (thin_tac "machine_state t = s" for s t)+ + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ is_derived (cdt a) src c src_cap") + prefer 2 + subgoal by (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + subgoal by (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + subgoal by (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_insert_axioms.intro) + apply (rule refl) + apply assumption + apply assumption + apply assumption + apply assumption + apply (erule (5) cte_map_inj) + apply (frule mdb_insert_der.intro) + apply (rule mdb_insert_der_axioms.intro) + apply (simp add: is_derived_eq) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (subgoal_tac "descendants_of dest (cdt a) = {}") + prefer 2 + apply (drule mdb_insert.dest_no_descendants) + subgoal by (fastforce simp add: cdt_relation_def simp del: split_paired_All) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (erule mdb_insert_abs.intro) + apply (rule mdb_None) + apply (erule(1) mdb_insert.descendants_not_dest) + apply assumption + apply assumption + apply assumption + apply (rule conjI) + apply (intro impI allI) + apply (unfold const_def) + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in + maskedAsFull_revokable[where a = c',symmetric]) + apply simp + apply (subst mdb_insert_child.descendants) + apply (rule mdb_insert_child.intro) + apply simp + apply (rule mdb_insert_child_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric], assumption, assumption, + assumption, assumption, assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + prefer 2 + apply (simp add: revokable_relation_def del: split_paired_All) + apply (erule_tac x=src in allE) + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state cap_master_cap_simps + split: if_splits dest!:cap_master_cap_eqDs) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + subgoal by simp + subgoal by clarsimp + apply (subst mdb_insert_abs.descendants_child, assumption) + apply (frule_tac p=ca in in_set_cap_cte_at) + apply (subst descendants_of_eq') + prefer 2 + apply assumption + apply (simp_all)[6] + apply (simp add: cdt_relation_def split: if_split del: split_paired_All) + apply clarsimp + apply (drule (5) cte_map_inj)+ + apply simp + apply clarsimp + apply (subst mdb_insert_abs_sib.descendants, erule mdb_insert_abs_sib.intro) + apply (frule(4) iffD1[OF is_derived_eq]) + apply (drule_tac src_cap' = src_cap' in maskedAsFull_revokable[where a = c',symmetric]) + apply simp + apply (subst mdb_insert_sib.descendants) + apply (rule mdb_insert_sib.intro, assumption) + apply (rule mdb_insert_sib_axioms.intro) + apply (subst can_be_is [symmetric]) + apply simp + apply (rule cap_relation_masked_as_full) + apply (simp+)[3] + apply simp + apply simp + apply simp + apply (subst (asm) is_cap_revocable_eq, assumption, assumption) + apply (rule derived_sameRegionAs) + apply (subst is_derived_eq[symmetric], assumption, assumption, + assumption, assumption, assumption) + apply assumption + subgoal by (clarsimp simp: cte_wp_at_def is_derived_def is_cap_simps cap_master_cap_simps + dest!:cap_master_cap_eqDs) + apply (subgoal_tac "is_original_cap a src = mdbRevocable src_node") + subgoal by simp + apply (simp add: revokable_relation_def del: split_paired_All) + apply (erule_tac x=src in allE) + apply (erule impE) + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_splits) + subgoal by (clarsimp simp: masked_as_full_def is_cap_simps free_index_update_def split: if_splits) + subgoal by simp + apply (simp split: if_split) + apply (frule_tac p="(aa, bb)" in in_set_cap_cte_at) + apply (rule conjI) + apply (clarsimp simp: descendants_of_eq') + subgoal by (simp add: cdt_relation_def del: split_paired_All) + apply (clarsimp simp: descendants_of_eq') + subgoal by (simp add: cdt_relation_def del: split_paired_All) + done + + +declare if_split [split] + +lemma updateCap_no_0: + "\\s. no_0 (ctes_of s)\ updateCap cap ptr \\_ s. no_0 (ctes_of s)\" + apply (simp add: updateCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of no_0_def) + done + +lemma revokable_relation_prev [simp]: + "revokable_relation revo cs (modify_map m p (cteMDBNode_update (mdbPrev_update f))) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma revokable_relation_next [simp]: + "revokable_relation revo cs (modify_map m p (cteMDBNode_update (mdbNext_update f))) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma revokable_relation_cap [simp]: + "revokable_relation revo cs (modify_map m p (cteCap_update f)) = + revokable_relation revo cs m" + apply (rule iffI) + apply (clarsimp simp add: revokable_relation_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (clarsimp simp: modify_map_def split: if_split_asm) + apply (clarsimp simp add: revokable_relation_def modify_map_def) + apply (erule allE, erule allE, erule impE, erule exI) + apply (case_tac z) + apply auto + done + +lemma mdb_cap_update: + "cteMDBNode_update f (cteCap_update f' x) = + cteCap_update f' (cteMDBNode_update f x)" + by (cases x) simp + +lemmas modify_map_mdb_cap = + modify_map_com [where f="cteMDBNode_update f" and + g="cteCap_update f'" for f f', + OF mdb_cap_update] + +lemma prev_leadstoD: + "\ m \ mdbPrev node \ c; m p = Some (CTE cap node); + valid_dlist m; no_0 m \ \ + c = p" + by (fastforce simp add: next_unfold' valid_dlist_def Let_def no_0_def) + +lemma prev_leadstoI: + "\ m p = Some (CTE cap node); mdbPrev node \ 0; valid_dlist m\ + \ m \ mdbPrev node \ p" + by (fastforce simp add: valid_dlist_def Let_def next_unfold') + +lemma mdb_next_modify_prev: + "modify_map m x (cteMDBNode_update (mdbPrev_update f)) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemma mdb_next_modify_revocable: + "modify_map m x (cteMDBNode_update (mdbRevocable_update f)) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemma mdb_next_modify_cap: + "modify_map m x (cteCap_update f) \ a \ b = m \ a \ b" + by (auto simp add: next_unfold' modify_map_def) + +lemmas mdb_next_modify [simp] = + mdb_next_modify_prev + mdb_next_modify_revocable + mdb_next_modify_cap + +lemma in_getCTE: + "(cte, s') \ fst (getCTE p s) \ s' = s \ cte_wp_at' ((=) cte) p s" + apply (frule in_inv_by_hoareD [OF getCTE_inv]) + apply (drule use_valid [OF _ getCTE_cte_wp_at], rule TrueI) + apply (simp add: cte_wp_at'_def) + done + +lemma isMDBParentOf_eq_parent: + "\ isMDBParentOf c cte; + weak_derived' (cteCap c) (cteCap c'); + mdbRevocable (cteMDBNode c') = mdbRevocable (cteMDBNode c) \ + \ isMDBParentOf c' cte" + apply (cases c) + apply (cases c') + apply (cases cte) + apply clarsimp + apply (clarsimp simp: weak_derived'_def isMDBParentOf_CTE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma isMDBParentOf_eq_child: + "\ isMDBParentOf cte c; + weak_derived' (cteCap c) (cteCap c'); + mdbFirstBadged (cteMDBNode c') = mdbFirstBadged (cteMDBNode c) \ + \ isMDBParentOf cte c'" + apply (cases c) + apply (cases c') + apply (cases cte) + apply clarsimp + apply (clarsimp simp: weak_derived'_def isMDBParentOf_CTE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma isMDBParentOf_eq: + "\ isMDBParentOf c d; + weak_derived' (cteCap c) (cteCap c'); + mdbRevocable (cteMDBNode c') = mdbRevocable (cteMDBNode c); + weak_derived' (cteCap d) (cteCap d'); + mdbFirstBadged (cteMDBNode d') = mdbFirstBadged (cteMDBNode d) \ + \ isMDBParentOf c' d'" + apply (drule (2) isMDBParentOf_eq_parent) + apply (erule (2) isMDBParentOf_eq_child) + done + +lemma weak_derived_refl' [intro!, simp]: + "weak_derived' c c" + by (simp add: weak_derived'_def) + +lemma weak_derived_sym': + "weak_derived' c d \ weak_derived' d c" + by (clarsimp simp: weak_derived'_def isCap_simps) +end +locale mdb_swap = + mdb_ptr_src?: mdb_ptr m _ _ src src_cap src_node + + mdb_ptr_dest?: mdb_ptr m _ _ dest dest_cap dest_node + for m src src_cap src_node dest dest_cap dest_node + + + assumes neq: "src \ dest" + + fixes scap dcap + + assumes src_derived: "weak_derived' src_cap scap" + assumes dest_derived: "weak_derived' dest_cap dcap" + + fixes n' + defines "n' \ + modify_map + (modify_map + (modify_map + (modify_map m src (cteCap_update (\_. dcap))) + dest + (cteCap_update (\_. scap))) + (mdbPrev src_node) + (cteMDBNode_update (mdbNext_update (\_. dest)))) + (mdbNext src_node) + (cteMDBNode_update (mdbPrev_update (\_. dest)))" + + fixes dest2 + assumes dest2: "n' dest = Some dest2" + + fixes n + defines "n \ + (modify_map + (modify_map + (modify_map + (modify_map n' + src (cteMDBNode_update (const (cteMDBNode dest2)))) + dest (cteMDBNode_update (const src_node))) + (mdbPrev (cteMDBNode dest2)) (cteMDBNode_update (mdbNext_update (\_. src)))) + (mdbNext (cteMDBNode dest2)) (cteMDBNode_update (mdbPrev_update (\_. src))))" + +begin + +lemma no_0_n' [intro!]: "no_0 n'" by (auto simp: n'_def) +lemma no_0_n [intro!]: "no_0 n" by (auto simp: n_def) + +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] +lemmas n'_0_simps [iff] = no_0_simps [OF no_0_n'] + +lemmas neqs [simp] = neq neq [symmetric] + +lemma src: "m src = Some (CTE src_cap src_node)" .. +lemma dest: "m dest = Some (CTE dest_cap dest_node)" .. + +lemma src_prev: + "\ mdbPrev src_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbNext node = src" + using src + apply - + apply (erule dlistEp, simp) + apply (case_tac cte') + apply simp + done + +lemma src_next: + "\ mdbNext src_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbPrev node = src" + using src + apply - + apply (erule dlistEn, simp) + apply (case_tac cte') + apply simp + done + +lemma dest_prev: + "\ mdbPrev dest_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbNext node = dest" + using dest + apply - + apply (erule dlistEp, simp) + apply (case_tac cte') + apply simp + done + +lemma dest_next: + "\ mdbNext dest_node = p; p \ 0\ \ \cap node. m p = Some (CTE cap node) \ mdbPrev node = dest" + using dest + apply - + apply (erule dlistEn, simp) + apply (case_tac cte') + apply simp + done + +lemma next_dest_prev_src [simp]: + "(mdbNext dest_node = src) = (mdbPrev src_node = dest)" + apply (rule iffI) + apply (drule dest_next, simp) + apply (clarsimp simp: src) + apply (drule src_prev, simp) + apply (clarsimp simp: dest) + done + +lemmas next_dest_prev_src_sym [simp] = next_dest_prev_src [THEN x_sym] + +lemma prev_dest_next_src [simp]: + "(mdbPrev dest_node = src) = (mdbNext src_node = dest)" + apply (rule iffI) + apply (drule dest_prev, simp) + apply (clarsimp simp: src) + apply (drule src_next, simp) + apply (clarsimp simp: dest) + done + +lemmas prev_dest_next_src_sym [simp] = prev_dest_next_src [THEN x_sym] + +lemma revokable_n': + "\ n' p = Some (CTE cap node) \ \ + \cap' node'. m p = Some (CTE cap' node') \ mdbRevocable node = mdbRevocable node'" + by (fastforce simp add: n'_def elim!: modify_map_casesE) + +lemma badge_n': + "\ n' p = Some (CTE cap node) \ \ + \cap' node'. m p = Some (CTE cap' node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (fastforce simp add: n'_def elim!: modify_map_casesE) + +lemma cteMDBNode_update_split_asm: + "P (cteMDBNode_update f cte) = (\ (\cap mdb. cte = CTE cap mdb \ \ P (CTE cap (f mdb))))" + by (cases cte, simp) + +lemma revokable: + "n p = Some (CTE cap node) \ + if p = src then mdbRevocable node = mdbRevocable dest_node + else if p = dest then mdbRevocable node = mdbRevocable src_node + else \cap' node'. m p = Some (CTE cap' node') \ + mdbRevocable node = mdbRevocable node'" + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply (clarsimp simp: n_def) + apply (clarsimp simp add: modify_map_def map_option_case split: if_split_asm option.splits) + apply (auto split: cteMDBNode_update_split_asm elim: revokable_n' revokable_n'[OF sym]) + done + +lemma badge_n: + "n p = Some (CTE cap node) \ + if p = src then mdbFirstBadged node = mdbFirstBadged dest_node + else if p = dest then mdbFirstBadged node = mdbFirstBadged src_node + else \cap' node'. m p = Some (CTE cap' node') \ + mdbFirstBadged node = mdbFirstBadged node'" + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def const_def split: if_split_asm) + apply (clarsimp simp: n_def) + apply (clarsimp simp add: modify_map_def map_option_case split: if_split_asm option.splits) + apply (auto split: cteMDBNode_update_split_asm elim: badge_n' badge_n'[OF sym]) + done + +lemma n'_cap: + "n' p = Some (CTE cap node) \ + if p = src then cap = dcap + else if p = dest then cap = scap + else \node'. m p = Some (CTE cap node')" + apply clarsimp + apply (rule conjI) + apply (fastforce simp add: n'_def modify_map_cases) + apply clarsimp + apply (rule conjI) + apply (fastforce simp add: n'_def modify_map_cases) + apply clarsimp + apply (simp add: n'_def modify_map_cases) + apply fastforce + done + +lemma n_cap: + "n p = Some (CTE cap node) \ + if p = src then cap = dcap + else if p = dest then cap = scap + else \node'. m p = Some (CTE cap node')" + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule sym) + apply (insert src dest dest2 [symmetric])[1] + apply (simp add: n_def n'_def modify_map_apply) + apply (simp add: modify_map_def split: if_split_asm) + apply clarsimp + apply (insert src dest dest2) + apply (clarsimp simp: n_def modify_map_cases) + apply (auto dest: n'_cap) + done + +lemma dest2_cap [simp]: + "cteCap dest2 = scap" + using dest2 by (cases dest2) (simp add: n'_cap) + +lemma n'_next: + "n' p = Some (CTE cap node) \ + if p = mdbPrev src_node then mdbNext node = dest + else \cap' node'. m p = Some (CTE cap' node') \ mdbNext node = mdbNext node'" + apply (simp add: n'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: modify_map_cases) + apply clarsimp + apply clarsimp + apply (auto simp add: modify_map_cases) + done + +lemma dest2_next: + "mdbNext (cteMDBNode dest2) = + (if dest = mdbPrev src_node then dest else mdbNext dest_node)" + using dest2 dest by (cases dest2) (clarsimp dest!: n'_next) + +lemma n'_prev: + "n' p = Some (CTE cap node) \ + if p = mdbNext src_node then mdbPrev node = dest + else \cap' node'. m p = Some (CTE cap' node') \ mdbPrev node = mdbPrev node'" + apply (simp add: n'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: modify_map_cases) + apply clarsimp + apply clarsimp + by (auto simp add: modify_map_cases) + +lemma dest2_prev: + "mdbPrev (cteMDBNode dest2) = + (if dest = mdbNext src_node then dest else mdbPrev dest_node)" + using dest2 dest by (cases dest2) (clarsimp dest!: n'_prev) + +lemma dest2_rev [simp]: + "mdbRevocable (cteMDBNode dest2) = mdbRevocable dest_node" + using dest2 dest by (cases dest2) (clarsimp dest!: revokable_n') + +lemma dest2_bdg [simp]: + "mdbFirstBadged (cteMDBNode dest2) = mdbFirstBadged dest_node" + using dest2 dest by (cases dest2) (clarsimp dest!: badge_n') + +definition + "dest2_node \ MDB (if dest = mdbPrev src_node then dest else mdbNext dest_node) + (if dest = mdbNext src_node then dest else mdbPrev dest_node) + (mdbRevocable dest_node) + (mdbFirstBadged dest_node)" + +lemma dest2_parts [simp]: + "dest2 = CTE scap dest2_node" + unfolding dest2_node_def + apply (subst dest2_prev [symmetric]) + apply (subst dest2_next [symmetric]) + apply (subst dest2_rev [symmetric]) + apply (subst dest2_bdg [symmetric]) + apply (subst dest2_cap [symmetric]) + apply (cases dest2) + apply (rename_tac mdbnode) + apply (case_tac mdbnode) + apply (simp del: dest2_cap) + done + +lemma prev_dest_src [simp]: + "(mdbPrev dest_node = mdbPrev src_node) = (mdbPrev dest_node = 0 \ mdbPrev src_node = 0)" + apply (subst mdb_ptr.p_prev_eq) + apply (rule mdb_ptr_axioms) + apply rule + apply simp + done + +lemmas prev_dest_src_sym [simp] = prev_dest_src [THEN x_sym] + +lemma next_dest_src [simp]: + "(mdbNext dest_node = mdbNext src_node) = (mdbNext dest_node = 0 \ mdbNext src_node = 0)" + apply (subst mdb_ptr.p_next_eq) + apply (rule mdb_ptr_axioms) + apply rule + apply simp + done + +lemmas next_dest_src_sym [simp] = next_dest_src [THEN x_sym] + +definition + s_d_swp :: "machine_word \ machine_word" +where + "s_d_swp p \ s_d_swap p src dest" + +declare s_d_swp_def [simp] + + +lemma n_exists: + "m p = Some (CTE cap node) \ \cap' node'. n p = Some (CTE cap' node')" + apply (simp add: n_def n'_def) + apply (intro modify_map_exists) + apply simp + done + +lemma m_exists: + "n p = Some (CTE cap node) \ \cap' node'. m p = Some (CTE cap' node')" + apply (simp add: n_def n'_def) + apply (drule modify_map_exists_rev, clarsimp)+ + done + +lemma next_src_node [simp]: + "(m (mdbNext src_node) = Some (CTE cap src_node)) = False" + apply clarsimp + apply (subgoal_tac "m \ mdbNext src_node \ mdbNext src_node") + apply simp + apply (simp add: mdb_next_unfold) + done + +lemma mdbNext_update_self [simp]: + "(mdbNext_update (\_. x) node = node) = (mdbNext node = x)" + by (cases node) auto + +lemmas p_next_eq_src = mdb_ptr_src.p_next_eq + +lemma next_m_n: + shows "m \ p \ p' = n \ s_d_swp p \ s_d_swp p'" + using src dest + apply (simp add: n_def n'_def modify_map_mdb_cap const_def) + apply (simp add: s_d_swap_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def + split: if_split_asm) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases) + apply (auto simp add: dest2_node_def split: if_split_asm)[1] + apply clarsimp + apply (simp add: mdb_next_unfold modify_map_cases) + apply (simp add: dest2_node_def const_def) + apply (intro impI) + apply (rule conjI, clarsimp) + apply (rule iffI) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI) + apply clarsimp + apply (rule_tac x="CTE dest_cap (mdbNext_update (\_. src) src_node)" + in exI) + apply simp + apply (rule_tac x=dest_node in exI) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (rule_tac x="CTE dest_cap src_node" in exI) + apply simp + apply (case_tac "mdbPrev src_node = dest") + apply clarsimp + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI, clarsimp) + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply clarsimp + apply (frule p_next_eq_src [where p'=p]) + apply simp + apply (case_tac "mdbNext src_node = 0", simp) + apply simp + apply (rule allI) + apply (rule disjCI2) + apply simp + apply (erule disjE) + apply clarsimp + apply (rule disjCI2) + apply (clarsimp del: notI) + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply clarsimp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply clarsimp + apply (case_tac "mdbPrev dest_node = p") + apply simp + apply (frule dest_prev, clarsimp) + apply (elim exE conjE) + apply simp + apply (case_tac "mdbNext src_node = p") + apply fastforce + apply fastforce + apply (subgoal_tac "dest \ mdbNext node") + prefer 2 + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply simp + apply (rule allI) + apply (cases "mdbNext src_node = p") + apply simp + apply (subgoal_tac "mdbPrev src_node \ p") + prefer 2 + apply clarsimp + apply simp + apply (subgoal_tac "src \ mdbNext node") + apply clarsimp + apply (rule notI) + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply simp + apply (subgoal_tac "src \ mdbPrev node") + prefer 2 + apply (rule notI) + apply (erule dlistEp [where p=p]) + apply clarsimp + apply clarsimp + apply (rule disjCI2) + apply simp + apply (erule disjE) + apply clarsimp + apply simp + apply (rule disjCI) + apply simp + apply (erule dlistEn [where p=p]) + apply clarsimp + apply clarsimp + apply clarsimp + apply (rule conjI, clarsimp) + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (rule conjI) + apply (rule impI) + apply simp + apply (rule iffI) + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply (elim exE conjE) + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (drule src_prev, clarsimp) + apply clarsimp + apply clarsimp + apply (drule p_next_eq_src [where p'=p]) + apply simp + apply clarsimp + apply (rule iffI) + apply simp + apply (subgoal_tac "mdbPrev src_node = p") + prefer 2 + apply (erule dlistEn [where p=p], clarsimp) + apply clarsimp + apply fastforce + apply (elim exE conjE) + apply simp + apply (case_tac "mdbPrev dest_node = p") + apply (frule dest_prev) + apply clarsimp + apply hypsubst_thin + apply clarsimp + apply simp + apply (case_tac "mdbNext src_node = p") + apply simp + apply (elim exE conjE) + apply (frule src_next, clarsimp) + apply simp + apply (case_tac "mdbPrev src_node = p") + apply clarsimp + apply (subgoal_tac "mdbNext (cteMDBNode z) = mdbNext node") + prefer 2 + apply (case_tac nodea) + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply (case_tac mdbnode) + apply clarsimp + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply simp + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (frule src_prev, clarsimp) + apply simp + apply simp + apply (rule dlistEn [where p=p], assumption, clarsimp) + apply clarsimp + apply clarsimp + apply (simp add: mdb_next_unfold modify_map_cases dest2_node_def) + apply (rule conjI) + apply (rule impI) + apply simp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (case_tac "mdbPrev src_node \ p") + apply simp + apply simp + apply (frule src_prev, clarsimp) + apply simp + apply clarsimp + apply (case_tac "m p", simp) + apply (case_tac a) + apply (rename_tac cap node) + apply simp + apply (case_tac "mdbPrev dest_node = p") + apply simp + apply (frule dest_prev, clarsimp) + apply clarsimp + apply simp + apply (case_tac "mdbPrev src_node = p") + apply simp + apply (frule src_prev, clarsimp) + apply fastforce + apply simp + apply (case_tac "mdbNext src_node = p") + apply simp + apply simp + done + +lemma n_next: + "n p = Some (CTE cap node) \ + if p = dest then + (if mdbNext src_node = dest then mdbNext node = src + else mdbNext node = mdbNext src_node) + else if p = src then + (if mdbNext dest_node = src then mdbNext node = dest + else mdbNext node = mdbNext dest_node) + else if p = mdbPrev src_node then mdbNext node = dest + else if p = mdbPrev dest_node then mdbNext node = src + else \cap' node'. m p = Some (CTE cap' node') \ mdbNext node = mdbNext node'" + apply (simp add: n_def del: dest2_parts split del: if_split) + apply (simp only: dest2_next dest2_prev split del: if_split) + apply (simp add: dest2_node_def split del: if_split) + apply (simp add: n'_def const_def cong: if_cong split del: if_split) + apply(case_tac "p=dest") + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(case_tac "p=src") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(case_tac "p=mdbPrev src_node") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce) + apply(fastforce) + apply(case_tac "p=mdbPrev dest_node") + apply(simp split del: if_split) + apply(clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce) + apply(simp split del: if_split) + apply (clarsimp simp: modify_map_cases const_def split: if_split_asm) + apply(fastforce)+ + done + +lemma parent_of_m_n: + "m \ p parentOf c = + n \ s_d_swp p parentOf s_d_swp c" + apply (clarsimp simp add: parentOf_def) + apply (rule iffI) + apply clarsimp + apply (case_tac cte, case_tac cte') + apply (rename_tac cap0 node0 cap1 node1) + apply clarsimp + apply (subgoal_tac "\cap0' node0'. n (s_d_swap c src dest) = Some (CTE cap0' node0')") + prefer 2 + apply (simp add: s_d_swap_def) + apply (fastforce intro: n_exists) + apply (subgoal_tac "\cap1' node1'. n (s_d_swap p src dest) = Some (CTE cap1' node1')") + prefer 2 + apply (simp add: s_d_swap_def) + apply (fastforce intro: n_exists) + apply clarsimp + apply (insert src_derived dest_derived)[1] + apply (erule isMDBParentOf_eq) + apply simp + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule revokable)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule badge_n)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply clarsimp + apply (case_tac cte, case_tac cte') + apply (rename_tac cap0 node0 cap1 node1) + apply clarsimp + apply (subgoal_tac "\cap0' node0' cap1' node1'. + m c = Some (CTE cap0' node0') \ + m p = Some (CTE cap1' node1')") + prefer 2 + apply (drule m_exists)+ + apply clarsimp + apply (simp add: s_d_swap_def src dest split: if_split_asm) + apply clarsimp + apply (insert src_derived dest_derived)[1] + apply (erule isMDBParentOf_eq) + apply simp + apply (rule weak_derived_sym') + apply (drule n_cap)+ + apply (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule revokable)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (rule weak_derived_sym') + apply (drule n_cap)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + apply simp + apply (drule badge_n)+ + subgoal by (simp add: s_d_swap_def src dest split: if_split_asm) + done + +lemma parency_m_n: + assumes "m \ p \ p'" + shows "n \ s_d_swp p \ s_d_swp p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (subst (asm) next_m_n, assumption) + apply simp + apply (subst (asm) parent_of_m_n, assumption) + done +next + case (trans_parent c c') + thus ?case + apply - + apply (erule subtree.trans_parent) + apply (subst (asm) next_m_n, assumption) + apply simp + apply (subst (asm) parent_of_m_n, assumption) + done +qed + +lemma parency_n_m: + assumes "n \ p \ p'" + shows "m \ s_d_swp p \ s_d_swp p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply - + apply (rule subtree.direct_parent) + apply (subst next_m_n, simp) + apply simp + apply (subst parent_of_m_n, simp) + done +next + case (trans_parent c c') + thus ?case + apply - + apply (erule subtree.trans_parent) + apply (subst next_m_n, simp) + apply simp + apply (subst parent_of_m_n, simp) + done +qed + +lemma parency: + "n \ p \ p' = m \ s_d_swp p \ s_d_swp p'" + apply (rule iffI) + apply (erule parency_n_m) + apply (drule parency_m_n) + apply simp + done + +lemma descendants: + "descendants_of' p n = + (let swap = \S. S - {src,dest} \ + (if src \ S then {dest} else {}) \ + (if dest \ S then {src} else {}) in + swap (descendants_of' (s_d_swp p) m))" + apply (simp add: Let_def parency descendants_of'_def s_d_swap_def) + apply auto + done + +end + +lemma inj_on_descendants_cte_map: + "\ valid_mdb s; + valid_objs s; pspace_distinct s; pspace_aligned s \ \ + inj_on cte_map (descendants_of p (cdt s))" + apply (clarsimp simp add: inj_on_def) + apply (drule (1) descendants_of_cte_at)+ + apply (drule (5) cte_map_inj_eq) + apply simp + done + +lemmas revokable_relation_simps [simp del] = + revokable_relation_cap revokable_relation_next revokable_relation_prev + +declare if_split [split del] + +(* +lemma corres_bind_ext: +"corres_underlying srel nf rrel G G' g (g') \ +corres_underlying srel nf rrel G G' (do do_extended_op (return ()); g od) (g')" + apply (simp add: corres_underlying_def do_extended_op_def return_def gets_def get_def put_def bind_def select_f_def modify_def mk_ef_def wrap_ext_op_det_ext_ext_def wrap_ext_op_unit_def) + done +*) + +(* consider putting in AINVS or up above cteInsert_corres *) +lemma next_slot_eq: + "\next_slot p t' m' = x; t' = t; m' = m\ \ next_slot p t m = x" + by simp + +lemma inj_on_image_set_diff15 : (* for compatibility of assumptions *) + "\inj_on f C; A \ C; B \ C\ \ f ` (A - B) = f ` A - f ` B" +by (rule inj_on_image_set_diff; auto) + +lemma cteSwap_corres: + assumes srcdst: "src' = cte_map src" "dest' = cte_map dest" + assumes scr: "cap_relation scap scap'" + assumes dcr: "cap_relation dcap dcap'" + assumes wf_caps: "wellformed_cap scap" "wellformed_cap dcap" + notes trans_state_update'[symmetric,simp] + shows "corres dc + (valid_objs and pspace_aligned and pspace_distinct and + valid_mdb and valid_list and + (\s. cte_wp_at (weak_derived scap) src s \ + cte_wp_at (weak_derived dcap) dest s \ + src \ dest \ (\cap. tcb_cap_valid cap src s) + \ (\cap. tcb_cap_valid cap dest s))) + (valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. cte_wp_at' (weak_derived' scap' o cteCap) src' s \ + cte_wp_at' (weak_derived' dcap' o cteCap) dest' s)) + (cap_swap scap src dcap dest) (cteSwap scap' src' dcap' dest')" + (is "corres _ ?P ?P' _ _") using assms including no_pre + supply None_upd_eq[simp del] + apply (unfold cap_swap_def cteSwap_def) + apply (cases "src=dest") + apply (rule corres_assume_pre) + apply simp + apply (rule corres_assume_pre) + apply (subgoal_tac "cte_map src \ cte_map dest") + prefer 2 + apply (erule cte_map_inj) + apply (fastforce simp: cte_wp_at_def) + apply (fastforce simp: cte_wp_at_def) + apply simp + apply simp + apply simp + apply (thin_tac "t : state_relation" for t) + apply (thin_tac "(P and (\s. Q s)) s" for Q P) + apply (thin_tac "(P and (\s. Q s)) s'" for Q P) + apply clarsimp + apply (rule corres_symb_exec_r) + prefer 2 + apply (rule getCTE_sp) + defer + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at'_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply (wp hoare_weak_lift_imp getCTE_wp' updateCap_no_0 updateCap_ctes_of_wp| + simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def modify_map_exists_eq) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp) + apply simp + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (case_tac cte) + apply (rename_tac cap node) + apply (case_tac cte1) + apply (rename_tac src_cap src_node) + apply (case_tac ctea) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (rule conjI, clarsimp) + apply (subgoal_tac "mdbPrev node = mdbNext src_node \ + mdbPrev node = mdbPrev dest_node") + apply (erule disjE) + apply simp + apply (erule (1) valid_dlistEn, simp) + apply simp + apply (erule_tac p="cte_map dest" in valid_dlistEp, assumption, simp) + apply simp + apply (auto simp: modify_map_if split: if_split_asm)[1] + apply clarsimp + apply (subgoal_tac "mdbNext node = mdbPrev src_node \ + mdbNext node = mdbNext dest_node") + apply (erule disjE) + apply simp + apply (erule (1) valid_dlistEp, simp) + apply simp + apply (erule_tac p="cte_map dest" in valid_dlistEn, assumption, simp) + apply simp + apply (auto simp: modify_map_if split: if_split_asm)[1] + apply (clarsimp simp: corres_underlying_def in_monad + state_relation_def) + apply (clarsimp simp: valid_mdb'_def) + apply (drule(1) pspace_relationsD) + apply (drule (12) set_cap_not_quite_corres) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption+ + apply (rule refl) + apply (elim exE conjE) + apply (rule bind_execI, assumption) + apply (drule updateCap_stuff, elim conjE, erule(1) impE) + apply (subgoal_tac "valid_objs t \ pspace_distinct t \ pspace_aligned t \ cte_at dest t") + prefer 2 + apply (rule conjI) + apply (erule use_valid, rule set_cap_valid_objs) + apply simp + apply (drule_tac p=dest in cte_wp_at_norm, clarsimp) + apply (drule (1) cte_wp_valid_cap) + apply (erule (2) weak_derived_valid_cap) + apply (fastforce elim: use_valid [OF _ set_cap_aligned] + use_valid [OF _ set_cap_cte_at] + use_valid [OF _ set_cap_distinct] + cte_wp_at_weakenE) + apply (elim conjE) + apply (drule (14) set_cap_not_quite_corres) + apply simp + apply assumption+ + apply (rule refl) + apply (elim exE conjE) + apply (rule bind_execI, assumption) + apply (clarsimp simp: exec_gets) + apply (clarsimp simp: set_cdt_def bind_assoc) + + apply (clarsimp simp: set_original_def bind_assoc exec_get exec_put exec_gets modify_def cap_swap_ext_def + update_cdt_list_def set_cdt_list_def simp del: fun_upd_apply + | rule refl | clarsimp simp: put_def simp del: fun_upd_apply )+ + apply (simp cong: option.case_cong) + apply (drule updateCap_stuff, elim conjE, erule(1) impE) + apply (drule (2) updateMDB_the_lot') + apply (erule (1) impE, assumption) + apply (fastforce simp only: no_0_modify_map) + apply assumption + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (drule in_getCTE, elim conjE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (elim conjE TrueE, simp only:) + apply (drule (2) updateMDB_the_lot', fastforce, simp only: no_0_modify_map, assumption) + apply (simp only: pspace_relations_def refl) + apply (rule conjI, rule TrueI)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (simp only: simp_thms no_0_modify_map) + apply (clarsimp simp: cte_wp_at_ctes_of cong: if_cong) + apply (thin_tac "ctes_of x = y" for x y)+ + apply (case_tac cte1) + apply (rename_tac src_cap src_node) + apply (case_tac cte) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (subgoal_tac "mdb_swap (ctes_of b) (cte_map src) src_cap src_node + (cte_map dest) dest_cap dest_node scap' dcap' cte2") + prefer 2 + apply (rule mdb_swap.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (erule vmdb.intro) + apply (erule mdb_ptr_axioms.intro) + apply (erule mdb_swap_axioms.intro) + apply (erule weak_derived_sym') + apply (erule weak_derived_sym') + apply assumption + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_list t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "ekheap t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "pspace_relation s s'" for s s')+ + apply (thin_tac "ekheap_relation e p" for e p)+ + apply (thin_tac "interrupt_state_relation n s s'" for n s s')+ + apply (thin_tac "(s,s') \ arch_state_relation" for s s')+ + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv AARCH64.data_at_def) + apply(subst conj_assoc[symmetric]) + apply (rule conjI) + prefer 2 + apply (clarsimp simp add: revokable_relation_def in_set_cap_cte_at + simp del: split_paired_All) + apply (drule set_cap_caps_of_state_monad)+ + apply (simp del: split_paired_All split: if_split) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule(1) mdb_swap.revokable) + apply (erule_tac x="dest" in allE) + apply (erule impE) + subgoal by (clarsimp simp: null_filter_def weak_derived_Null split: if_splits) + apply simp + apply (clarsimp simp del: split_paired_All) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule (1) mdb_swap.revokable) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map src") + apply (simp del: split_paired_All) + apply (erule_tac x="src" in allE) + apply (erule impE) + subgoal by (clarsimp simp: null_filter_def weak_derived_Null split: if_splits) + subgoal by simp + apply (drule caps_of_state_cte_at)+ + apply (erule (5) cte_map_inj) + apply (clarsimp simp: cte_wp_at_caps_of_state simp del: split_paired_All) + apply (drule (1) mdb_swap.revokable) + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: null_filter_def split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map src") + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp del: split_paired_All) + apply (drule caps_of_state_cte_at)+ + apply (drule null_filter_caps_of_stateD)+ + apply (erule cte_map_inj, erule cte_wp_cte_at, assumption+) + apply (drule caps_of_state_cte_at)+ + apply (drule null_filter_caps_of_stateD)+ + apply (erule cte_map_inj, erule cte_wp_cte_at, assumption+) + apply (subgoal_tac "no_loops (ctes_of b)") + prefer 2 + subgoal by (simp add: valid_mdb_ctes_def mdb_chain_0_no_loops) + apply (subgoal_tac "mdb_swap_abs (cdt a) src dest a") + prefer 2 + apply (erule mdb_swap_abs.intro) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (rule refl) + apply assumption + apply (frule mdb_swap_abs''.intro) + apply (drule_tac t="cdt_list (a)" in mdb_swap_abs'.intro) + subgoal by (simp add: mdb_swap_abs'_axioms_def) + apply (thin_tac "modify_map m f p p' = t" for m f p p' t) + apply(rule conjI) + apply (simp add: cdt_relation_def del: split_paired_All) + apply (intro allI impI) + apply (subst mdb_swap.descendants, assumption) + apply (subst mdb_swap_abs.descendants, assumption) + apply (simp add: mdb_swap_abs.s_d_swp_def mdb_swap.s_d_swp_def + del: split_paired_All) + apply (subst image_Un)+ + apply (subgoal_tac "cte_at (s_d_swap c src dest) a") + prefer 2 + apply (simp add: s_d_swap_def split: if_split) + apply (rule conjI, clarsimp simp: cte_wp_at_caps_of_state) + apply (rule impI, rule conjI, clarsimp simp: cte_wp_at_caps_of_state) + apply (fastforce dest: in_set_cap_cte_at) + apply (subgoal_tac "s_d_swap (cte_map c) (cte_map src) (cte_map dest) = + cte_map (s_d_swap c src dest)") + prefer 2 + apply (simp add: s_d_swap_def split: if_splits) + apply (drule cte_map_inj, + erule cte_wp_at_weakenE, rule TrueI, + erule cte_wp_at_weakenE, rule TrueI, + assumption+)+ + apply simp + apply (subgoal_tac "descendants_of' (cte_map (s_d_swap c src dest)) (ctes_of b) = + cte_map ` descendants_of (s_d_swap c src dest) (cdt a)") + prefer 2 + apply (simp del: split_paired_All) + apply simp + apply (simp split: if_split) + apply (frule_tac p="s_d_swap c src dest" in inj_on_descendants_cte_map, assumption+) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (subst inj_on_image_set_diff15, assumption) + apply (rule subset_refl) + apply simp + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply (subst insert_minus_eq, assumption) + apply clarsimp + apply (subst insert_minus_eq [where x="cte_map dest"], assumption) + apply (subst inj_on_image_set_diff15) + apply (erule (3) inj_on_descendants_cte_map) + apply (rule subset_refl) + apply clarsimp + subgoal by auto + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply clarsimp + apply (subst inj_on_image_set_diff15) + apply (erule (3) inj_on_descendants_cte_map) + apply (rule subset_refl) + apply clarsimp + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply clarsimp + apply (drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule (1) descendants_of_cte_at) + apply assumption+ + apply simp + apply(clarsimp simp: cdt_list_relation_def) + apply(subst next_slot_eq[OF mdb_swap_abs'.next_slot]) + apply(assumption) + apply(fastforce split: option.split) + apply(simp) + apply(frule finite_depth) + apply(frule mdb_swap.n_next) + apply(simp) + apply(case_tac "(aa, bb)=src") + apply(case_tac "next_slot dest (cdt_list (a)) (cdt a) = Some src") + apply(simp) + apply(erule_tac x="fst dest" in allE, erule_tac x="snd dest" in allE) + apply(simp) + apply(simp) + apply(case_tac "next_slot dest (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(erule_tac x="fst dest" in allE, erule_tac x="snd dest" in allE) + apply(simp) + apply(subgoal_tac "mdbNext dest_node \ cte_map src") + apply(simp) + apply(simp) + apply(rule_tac s=a in cte_map_inj) + apply(simp) + apply(rule cte_at_next_slot') + apply(simp) + apply(simp) + apply(simp) + apply(simp) + apply(erule cte_wp_at_weakenE, rule TrueI) + apply(simp_all)[3] + apply(case_tac "(aa, bb)=dest") + apply(case_tac "next_slot src (cdt_list (a)) (cdt a) = Some dest") + apply(simp) + apply(erule_tac x="fst src" in allE, erule_tac x="snd src" in allE) + apply(simp) + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(erule_tac x="fst src" in allE, erule_tac x="snd src" in allE) + apply(simp) + apply(subgoal_tac "mdbNext src_node \ cte_map dest") + apply(simp) + apply(simp) + apply(rule_tac s=a in cte_map_inj) + apply(simp) + apply(rule cte_at_next_slot') + apply(simp) + apply(simp) + apply(simp) + apply(simp) + apply(erule cte_wp_at_weakenE, rule TrueI) + apply(simp_all)[3] + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some src") + apply(simp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) = mdbPrev src_node") + apply(clarsimp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some dest") + apply(simp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) = mdbPrev dest_node") + apply(subgoal_tac "cte_map (aa, bb) \ mdbPrev src_node") + apply(clarsimp) + apply(clarsimp simp: mdb_swap.prev_dest_src) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(rule cte_at_next_slot) + apply(simp_all)[4] + apply(simp) + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a)") + apply(simp) + apply(clarsimp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp) + apply(subgoal_tac "cte_at (aa, bb) a") + apply(subgoal_tac "cte_map (aa, bb) \ cte_map dest \ + cte_map (aa, bb) \ cte_map src \ + cte_map (aa, bb) \ mdbPrev src_node \ + cte_map (aa, bb) \ mdbPrev dest_node") + apply(clarsimp) + apply(rule conjI) + apply(rule cte_map_inj) + apply(simp_all)[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(rule cte_map_inj) + apply simp_all[6] + apply(erule cte_wp_at_weakenE, simp) + apply(rule conjI) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE src_cap src_node" in valid_mdbD2') + subgoal by (clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(rule cte_at_next_slot') + apply(simp_all)[9] + apply(erule cte_wp_at_weakenE, simp) + apply(frule mdb_swap.m_exists) + apply(simp) + apply(clarsimp) + apply(frule_tac cte="CTE dest_cap dest_node" in valid_mdbD2') + apply(clarsimp) + apply(simp add: valid_mdb'_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(rule cte_at_next_slot') + apply(simp_all)[9] + apply(erule cte_wp_at_weakenE, simp) + by (rule cte_at_next_slot; simp) + + +lemma capSwapForDelete_corres: + assumes "src' = cte_map src" "dest' = cte_map dest" + shows "corres dc + (valid_objs and pspace_aligned and pspace_distinct and + valid_mdb and valid_list and cte_at src and cte_at dest + and (\s. \cap. tcb_cap_valid cap src s) + and (\s. \cap. tcb_cap_valid cap dest s)) + (valid_mdb' and pspace_distinct' and pspace_aligned') + (cap_swap_for_delete src dest) (capSwapForDelete src' dest')" + using assms + apply (simp add: cap_swap_for_delete_def capSwapForDelete_def) + apply (cases "src = dest") + apply (clarsimp simp: when_def) + apply (rule corres_assume_pre) + apply clarsimp + apply (frule_tac s=s in cte_map_inj) + apply (simp add: caps_of_state_cte_at)+ + apply (simp add: when_def liftM_def) + apply (rule corres_guard_imp) + apply (rule_tac P1=wellformed_cap in corres_split[OF get_cap_corres_P]) + apply (rule_tac P1=wellformed_cap in corres_split[OF get_cap_corres_P]) + apply (rule cteSwap_corres, rule refl, rule refl, clarsimp+) + apply (wp get_cap_wp getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule (1) caps_of_state_valid_cap)+ + apply (simp add: valid_cap_def2) + apply (clarsimp simp: cte_wp_at_ctes_of) +done + +declare if_split [split] +declare revokable_relation_simps [simp] + +definition + "no_child' s cte \ + let next = mdbNext (cteMDBNode cte) in + (next \ 0 \ cte_at' next s \ cte_wp_at' (\cte'. \isMDBParentOf cte cte') next s)" + +definition + "child_save' s cte' cte \ + let cap = cteCap cte; cap' = cteCap cte' in + sameRegionAs cap cap' \ + (isEndpointCap cap \ (capEPBadge cap = capEPBadge cap' \ no_child' s cte)) \ + (isNotificationCap cap \ (capNtfnBadge cap = capNtfnBadge cap' \ no_child' s cte))" + +lemma subtree_no_parent: + assumes "m \ p \ x" + assumes "mdbNext (cteMDBNode cte) \ 0" + assumes "\ isMDBParentOf cte next" + assumes "m p = Some cte" + assumes "m (mdbNext (cteMDBNode cte)) = Some next" + shows "False" using assms + by induct (auto simp: parentOf_def mdb_next_unfold) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ensureNoChildren_corres: + "p' = cte_map p \ + corres (ser \ dc) (cte_at p) (pspace_aligned' and pspace_distinct' and cte_at' p' and valid_mdb') + (ensure_no_children p) (ensureNoChildren p')" + apply (simp add: ensureNoChildren_def ensure_no_children_descendants + liftE_bindE nullPointer_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply simp + apply (case_tac "mdbNext (cteMDBNode cte) = 0") + apply (simp add: whenE_def) + apply (clarsimp simp: returnOk_def corres_underlying_def return_def) + apply (erule notE) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (subgoal_tac "descendants_of' (cte_map p) (ctes_of b) = {}") + apply simp + apply (clarsimp simp: descendants_of'_def) + apply (subst (asm) cte_wp_at_ctes_of) + apply clarsimp + apply (erule (2) subtree_next_0) + apply (simp add: whenE_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: corres_underlying_def + throwError_def returnOk_def return_def) + apply (subgoal_tac "pspace_aligned' b \ pspace_distinct' b") + prefer 2 + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (clarsimp simp: descendants_of'_def) + apply (subgoal_tac "ctes_of b \ cte_map p \ mdbNext (cteMDBNode cte)") + apply simp + apply (rule direct_parent) + apply (simp add: mdb_next_unfold) + apply assumption + apply (simp add: parentOf_def) + apply clarsimp + apply (clarsimp simp: corres_underlying_def + throwError_def returnOk_def return_def) + apply (subgoal_tac "pspace_aligned' b \ pspace_distinct' b") + prefer 2 + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: state_relation_def cdt_relation_def + simp del: split_paired_All) + apply (erule allE, erule (1) impE) + apply (subgoal_tac "descendants_of' (cte_map p) (ctes_of b) = {}") + apply simp + apply (clarsimp simp: descendants_of'_def) + apply (erule (4) subtree_no_parent) + done + +end +end diff --git a/proof/refine/AARCH64/CSpace_I.thy b/proof/refine/AARCH64/CSpace_I.thy new file mode 100644 index 0000000000..6050582002 --- /dev/null +++ b/proof/refine/AARCH64/CSpace_I.thy @@ -0,0 +1,2030 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace invariants +*) + +theory CSpace_I +imports ArchAcc_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma capUntypedPtr_simps [simp]: + "capUntypedPtr (ThreadCap r) = r" + "capUntypedPtr (NotificationCap r badge a b) = r" + "capUntypedPtr (EndpointCap r badge a b c d) = r" + "capUntypedPtr (Zombie r bits n) = r" + "capUntypedPtr (ArchObjectCap x) = Arch.capUntypedPtr x" + "capUntypedPtr (UntypedCap d r n f) = r" + "capUntypedPtr (CNodeCap r n g n2) = r" + "capUntypedPtr (ReplyCap r m a) = r" + "Arch.capUntypedPtr (ASIDPoolCap r asid) = r" + "Arch.capUntypedPtr (FrameCap r rghts sz d mapdata) = r" + "Arch.capUntypedPtr (PageTableCap r pt_t mapdata2) = r" + "Arch.capUntypedPtr (VCPUCap r) = r" + by (auto simp: capUntypedPtr_def AARCH64_H.capUntypedPtr_def) + +lemma rights_mask_map_UNIV [simp]: + "rights_mask_map UNIV = allRights" + by (simp add: rights_mask_map_def allRights_def) + +declare insert_UNIV[simp] + +lemma maskCapRights_allRights [simp]: + "maskCapRights allRights c = c" + unfolding maskCapRights_def isCap_defs allRights_def + AARCH64_H.maskCapRights_def maskVMRights_def + by (cases c) (simp_all add: Let_def split: arch_capability.split vmrights.split) + +lemma getCTE_inv [wp]: "\P\ getCTE addr \\rv. P\" + by (simp add: getCTE_def) wp + +lemma getEndpoint_inv [wp]: + "\P\ getEndpoint ptr \\rv. P\" + by (simp add: getEndpoint_def getObject_inv loadObject_default_inv) + +lemma getNotification_inv [wp]: + "\P\ getNotification ptr \\rv. P\" + by (simp add: getNotification_def getObject_inv loadObject_default_inv) + +lemma getSlotCap_inv [wp]: "\P\ getSlotCap addr \\rv. P\" + by (simp add: getSlotCap_def, wp) + +declare resolveAddressBits.simps[simp del] + +lemma cap_case_CNodeCap: + "(case cap of CNodeCap a b c d \ P + | _ \ Q) + = (if isCNodeCap cap then P else Q)" + by (cases cap, simp_all add: isCap_simps) + +lemma resolveAddressBits_inv_induct: + shows + "s \ \P\ + resolveAddressBits cap cref depth + \\rv. P\,\\rv. P\" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 cap fn cref depth) + show ?case + apply (subst resolveAddressBits.simps) + apply (simp add: Let_def split_def cap_case_CNodeCap[unfolded isCap_simps] + split del: if_split cong: if_cong) + apply (rule hoare_pre_spec_validE) + apply ((elim exE | wp (once) spec_strengthen_postE[OF "1.hyps"])+, + (rule refl conjI | simp add: in_monad split del: if_split)+) + apply (wp | simp add: locateSlot_conv split del: if_split + | wp (once) hoare_drop_imps)+ + done +qed + +lemma rab_inv' [wp]: + "\P\ resolveAddressBits cap addr depth \\rv. P\" + by (rule validE_valid, rule use_specE', rule resolveAddressBits_inv_induct) + +lemmas rab_inv'' [wp] = rab_inv' [folded resolveAddressBits_decl_def] + +crunch inv [wp]: lookupCap P + +lemma updateObject_cte_inv: + "\P\ updateObject (cte :: cte) ko x y n \\rv. P\" + apply (simp add: updateObject_cte) + apply (cases ko, simp_all add: typeError_def unless_def + split del: if_split + cong: if_cong) + apply (wp | simp)+ + done + +definition + "no_mdb cte \ mdbPrev (cteMDBNode cte) = 0 \ mdbNext (cteMDBNode cte) = 0" + +lemma mdb_next_update: + "m (x \ y) \ a \ b = + (if a = x then mdbNext (cteMDBNode y) = b else m \ a \ b)" + by (simp add: mdb_next_rel_def mdb_next_def) + +lemma neg_no_loopsI: + "m \ c \\<^sup>+ c \ \ no_loops m" + unfolding no_loops_def by auto + +lemma valid_dlistEp [elim?]: + "\ valid_dlist m; m p = Some cte; mdbPrev (cteMDBNode cte) \ 0; + \cte'. \ m (mdbPrev (cteMDBNode cte)) = Some cte'; + mdbNext (cteMDBNode cte') = p \ \ P \ \ + P" + unfolding valid_dlist_def Let_def by blast + +lemma valid_dlistEn [elim?]: + "\ valid_dlist m; m p = Some cte; mdbNext (cteMDBNode cte) \ 0; + \cte'. \ m (mdbNext (cteMDBNode cte)) = Some cte'; + mdbPrev (cteMDBNode cte') = p \ \ P \ \ + P" + unfolding valid_dlist_def Let_def by blast + +lemmas valid_dlistE = valid_dlistEn valid_dlistEp + +lemma mdb_next_update_other: + "\ m (x \ y) \ a \ b; x \ a \ \ m \ a \ b" + by (simp add: mdb_next_rel_def mdb_next_def) + +lemma mdb_trancl_update_other: + assumes upd: "m(p \ cte) \ x \\<^sup>+ y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m \ x \\<^sup>+ y" + using upd nopath +proof induct + case (base y) + + have "m \ x \ y" + proof (rule mdb_next_update_other) + from base show "p \ x" by clarsimp + qed fact+ + + thus ?case .. +next + case (step y z) + hence ih: "m \ x \\<^sup>+ y" by auto + + from ih show ?case + proof + show "m \ y \ z" + proof (rule mdb_next_update_other) + show "p \ y" + proof (cases "x = p") + case True thus ?thesis using step.prems by simp + next + case False thus ?thesis using step.prems ih + by - (erule contrapos_nn, rule trancl_into_rtrancl, simp) + qed + qed fact+ + qed +qed + +lemma next_unfold': + "m \ c \ y = (\cte. m c = Some cte \ mdbNext (cteMDBNode cte) = y)" + unfolding mdb_next_rel_def + by (simp add: next_unfold split: option.splits) + +lemma no_self_loop_next_noloop: + assumes no_loop: "no_loops m" + and lup: "m ptr = Some cte" + shows "mdbNext (cteMDBNode cte) \ ptr" +proof - + from no_loop have "\ m \ ptr \ ptr" + unfolding no_loops_def + by - (drule spec, erule contrapos_nn, erule r_into_trancl) + + thus ?thesis using lup + by (simp add: next_unfold') +qed + + +lemma valid_dlistI [intro?]: + defines "nxt \ \cte. mdbNext (cteMDBNode cte)" + and "prv \ \cte. mdbPrev (cteMDBNode cte)" + assumes r1: "\p cte. \ m p = Some cte; prv cte \ 0 \ \ \cte'. m (prv cte) = Some cte' \ nxt cte' = p" + and r2: "\p cte. \ m p = Some cte; nxt cte \ 0 \ \ \cte'. m (nxt cte) = Some cte' \ prv cte' = p" + shows "valid_dlist m" + unfolding valid_dlist_def + by (auto dest: r1 r2 simp: Let_def prv_def nxt_def) + +lemma no_loops_tranclE: + assumes nl: "no_loops m" + and nxt: "m \ x \\<^sup>+ y" + shows "\ m \ y \\<^sup>* x" +proof + assume "m \ y \\<^sup>* x" + hence "m \ x \\<^sup>+ x" using nxt + by simp + + thus False using nl + unfolding no_loops_def by auto +qed + +lemma neg_next_rtrancl_trancl: + "\ \ m \ y \\<^sup>* r; m \ x \ y \ \ \ m \ x \\<^sup>+ r" + apply (erule contrapos_nn) + apply (drule tranclD) + apply (clarsimp simp: next_unfold') + done + +lemma next_trancl_split_tt: + assumes p1: "m \ x \\<^sup>+ y" + and p2: "m \ x \\<^sup>+ p" + and nm: "\ m \ p \\<^sup>* y" + shows "m \ y \\<^sup>* p" + using p2 p1 nm +proof induct + case base thus ?case + by (clarsimp dest!: tranclD) (drule (1) next_single_value, simp) +next + case (step q r) + + show ?case + proof (cases "q = y") + case True thus ?thesis using step + by fastforce + next + case False + have "m \ y \\<^sup>* q" + proof (rule step.hyps) + have "\ m \ q \\<^sup>+ y" + by (rule neg_next_rtrancl_trancl) fact+ + thus "\ m \ q \\<^sup>* y" using False + by (clarsimp dest!: rtranclD) + qed fact+ + thus ?thesis by (rule rtrancl_into_rtrancl) fact+ + qed +qed + +lemma no_loops_upd_last: + assumes noloop: "no_loops m" + and nxt: "m \ x \\<^sup>+ p" + shows "m (p \ cte) \ x \\<^sup>+ p" +proof - + from noloop nxt have xp: "x \ p" + by (clarsimp dest!: neg_no_loopsI) + + from nxt show ?thesis using xp + proof (induct rule: converse_trancl_induct') + case (base y) + hence "m (p \ cte) \ y \ p" using noloop + by (auto simp add: mdb_next_update) + thus ?case .. + next + case (step y z) + + from noloop step have xp: "z \ p" + by (clarsimp dest!: neg_no_loopsI) + + hence "m (p \ cte) \ y \ z" using step + by (simp add: mdb_next_update) + + moreover from xp have "m (p \ cte) \ z \\<^sup>+ p" using step.hyps assms + by (auto simp del: fun_upd_apply) + ultimately show ?case by (rule trancl_into_trancl2) + qed +qed + + +lemma no_0_neq [intro?]: + "\m c = Some cte; no_0 m\ \ c \ 0" + by (auto simp add: no_0_def) + +lemma no_0_update: + assumes no0: "no_0 m" + and pnz: "p \ 0" + shows "no_0 (m(p \ cte))" + using no0 pnz unfolding no_0_def by simp + +lemma has_loop_update: + assumes lp: "m(p \ cte) \ c \\<^sup>+ c'" + and cn0: "c' \ 0" + and mnext: "mdbNext (cteMDBNode cte) = 0" + and mn0: "no_0 m" + and pn0: "p \ 0" + shows "m \ c \\<^sup>+ c'" + using lp cn0 +proof induct + case (base y) + have "m \ c \ y" + proof (rule mdb_next_update_other) + show "p \ c" using base + by (clarsimp intro: contrapos_nn simp: mdb_next_update mnext) + qed fact+ + + thus ?case .. +next + case (step y z) + + show ?case + proof + have "y \ 0" by (rule no_0_lhs [OF _ no_0_update]) fact+ + thus "m \ c \\<^sup>+ y" using step by simp + next + have "z \ 0" by fact+ + hence "p \ y" using step.hyps mnext + by (clarsimp simp: mdb_next_update) + thus "m \ y \ z" + by (rule mdb_next_update_other [OF step.hyps(2)]) + qed +qed + +lemma mdb_rtrancl_update_other: + assumes upd: "m(p \ cte) \ x \\<^sup>* y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m \ x \\<^sup>* y" + using upd +proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp +next + case trancl thus ?thesis + by (auto intro: trancl_into_rtrancl elim: mdb_trancl_update_other [OF _ nopath]) +qed + +lemma mdb_trancl_other_update: + assumes upd: "m \ x \\<^sup>+ y" + and np: "\ m \ x \\<^sup>* p" + shows "m(p \ cte) \ x \\<^sup>+ y" + using upd +proof induct + case (base q) + from np have "x \ p" by clarsimp + hence"m (p \ cte) \ x \ q" + using base by (simp add: mdb_next_update del: fun_upd_apply) + thus ?case .. +next + case (step q r) + + show ?case + proof + from step.hyps(1) np have "q \ p" + by (auto elim!: contrapos_nn) + + thus x: "m(p \ cte) \ q \ r" + using step by (simp add: mdb_next_update del: fun_upd_apply) + qed fact+ +qed + +lemma mdb_rtrancl_other_update: + assumes upd: "m \ x \\<^sup>* y" + and nopath: "\ m \ x \\<^sup>* p" + shows "m(p \ cte) \ x \\<^sup>* y" + using upd +proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp +next + case trancl thus ?thesis + by (auto intro: trancl_into_rtrancl elim: mdb_trancl_other_update [OF _ nopath]) +qed + +lemma mdb_chain_0_update: + assumes x: "m \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + and np: "\ m \ mdbNext (cteMDBNode cte) \\<^sup>* p" + assumes p: "p \ 0" + assumes 0: "no_0 m" + assumes n: "mdb_chain_0 m" + shows "mdb_chain_0 (m(p \ cte))" + unfolding mdb_chain_0_def +proof rule + fix x + assume "x \ dom (m(p \ cte))" + hence x: "x = p \ x \ dom m" by simp + + have cnxt: "m(p \ cte) \ mdbNext (cteMDBNode cte) \\<^sup>* 0" + by (rule mdb_rtrancl_other_update) fact+ + + from x show "m(p \ cte) \ x \\<^sup>+ 0" + proof + assume xp: "x = p" + show ?thesis + proof (rule rtrancl_into_trancl2 [OF _ cnxt]) + show "m(p \ cte) \ x \ mdbNext (cteMDBNode cte)" using xp + by (simp add: mdb_next_update) + qed + next + assume x: "x \ dom m" + + show ?thesis + proof (cases "m \ x \\<^sup>* p") + case False + from n have "m \ x \\<^sup>+ 0" + unfolding mdb_chain_0_def + using x by auto + + thus ?thesis + by (rule mdb_trancl_other_update) fact+ + next + case True + hence "m(p \ cte) \ x \\<^sup>* p" + proof (cases rule: next_rtrancl_tranclE) + case eq thus ?thesis by simp + next + case trancl + have "no_loops m" by (rule mdb_chain_0_no_loops) fact+ + thus ?thesis + by (rule trancl_into_rtrancl [OF no_loops_upd_last]) fact+ + qed + moreover + have "m(p \ cte) \ p \ mdbNext (cteMDBNode cte)" by (simp add: mdb_next_update) + ultimately show ?thesis using cnxt by simp + qed + qed +qed + +lemma mdb_chain_0_update_0: + assumes x: "mdbNext (cteMDBNode cte) = 0" + assumes p: "p \ 0" + assumes 0: "no_0 m" + assumes n: "mdb_chain_0 m" + shows "mdb_chain_0 (m(p \ cte))" + using x 0 p + apply - + apply (rule mdb_chain_0_update [OF _ _ p 0 n]) + apply (auto elim: next_rtrancl_tranclE dest: no_0_lhs_trancl) + done + +lemma valid_badges_0_update: + assumes nx: "mdbNext (cteMDBNode cte) = 0" + assumes pv: "mdbPrev (cteMDBNode cte) = 0" + assumes p: "m p = Some cte'" + assumes m: "no_mdb cte'" + assumes 0: "no_0 m" + assumes d: "valid_dlist m" + assumes v: "valid_badges m" + shows "valid_badges (m(p \ cte))" +proof (unfold valid_badges_def, clarify) + fix c c' cap cap' n n' + assume c: "(m(p \ cte)) c = Some (CTE cap n)" + assume c': "(m(p \ cte)) c' = Some (CTE cap' n')" + assume nxt: "m(p \ cte) \ c \ c'" + assume r: "sameRegionAs cap cap'" + + from p 0 have p0: "p \ 0" by (clarsimp simp: no_0_def) + + from c' p0 0 + have "c' \ 0" by (clarsimp simp: no_0_def) + with nx nxt + have cp: "c \ p" by (clarsimp simp add: mdb_next_unfold) + moreover + from pv nx nxt p p0 c d m 0 + have "c' \ p" + apply clarsimp + apply (simp add: mdb_next_unfold split: if_split_asm) + apply (erule (1) valid_dlistEn, simp) + apply (clarsimp simp: no_mdb_def no_0_def) + done + moreover + with nxt c c' cp + have "m \ c \ c'" by (simp add: mdb_next_unfold) + ultimately + show "(isEndpointCap cap \ + capEPBadge cap \ capEPBadge cap' \ + capEPBadge cap' \ 0 \ + mdbFirstBadged n') \ + (isNotificationCap cap \ + capNtfnBadge cap \ capNtfnBadge cap' \ + capNtfnBadge cap' \ 0 \ + mdbFirstBadged n')" + using r c c' v by (fastforce simp: valid_badges_def) +qed + +definition + "caps_no_overlap' m S \ + \p c n. m p = Some (CTE c n) \ capRange c \ S = {}" + +definition + fresh_virt_cap_class :: "capclass \ cte_heap \ bool" +where + "fresh_virt_cap_class C m \ + C \ PhysicalClass \ C \ (capClass \ cteCap) ` ran m" + +lemma fresh_virt_cap_class_Physical[simp]: + "fresh_virt_cap_class PhysicalClass = \" + by (rule ext, simp add: fresh_virt_cap_class_def)+ + +lemma fresh_virt_cap_classD: + "\ m x = Some cte; fresh_virt_cap_class C m \ + \ C \ PhysicalClass \ capClass (cteCap cte) \ C" + by (auto simp: fresh_virt_cap_class_def) + +lemma capRange_untyped: + "capRange cap' \ untypedRange cap \ {} \ isUntypedCap cap" + by (cases cap, auto simp: isCap_simps) + +lemma capRange_of_untyped [simp]: + "capRange (UntypedCap d r n f) = untypedRange (UntypedCap d r n f)" + by (simp add: capRange_def isCap_simps capUntypedSize_def) + +lemma caps_contained_no_overlap: + "\ caps_no_overlap' m (capRange cap); caps_contained' m\ + \ caps_contained' (m(p \ CTE cap n))" + apply (clarsimp simp add: caps_contained'_def) + apply (rule conjI) + apply clarsimp + apply (rule conjI, clarsimp dest!: capRange_untyped) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (frule capRange_untyped) + apply (clarsimp simp add: isCap_simps) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (frule capRange_untyped) + apply (clarsimp simp: isCap_simps) + apply blast + done + +lemma no_mdb_next: + "\ m p = Some cte; no_mdb cte; valid_dlist m; no_0 m \ \ \ m \ x \ p" + apply clarsimp + apply (frule vdlist_nextD0) + apply clarsimp + apply assumption + apply (clarsimp simp: mdb_prev_def no_mdb_def mdb_next_unfold) + done + +lemma no_mdb_rtrancl: + "\ m p = Some cte; no_mdb cte; p \ x; valid_dlist m; no_0 m \ \ \ m \ x \\<^sup>* p" + apply (clarsimp dest!: rtranclD) + apply (drule tranclD2) + apply clarsimp + apply (drule (3) no_mdb_next) + apply fastforce + done + +lemma isNullCap [simp]: + "isNullCap cap = (cap = NullCap)" + by (simp add: isCap_simps) + +lemma isDomainCap [simp]: + "isDomainCap cap = (cap = DomainCap)" + by (simp add: isCap_simps) + +lemma isPhysicalCap[simp]: + "isPhysicalCap cap = (capClass cap = PhysicalClass)" + by (simp add: isPhysicalCap_def AARCH64_H.isPhysicalCap_def + split: capability.split arch_capability.split) + +definition capMasterCap :: "capability \ capability" where + "capMasterCap cap \ case cap of + EndpointCap ref bdg s r g gr \ EndpointCap ref 0 True True True True + | NotificationCap ref bdg s r \ NotificationCap ref 0 True True + | CNodeCap ref bits gd gs \ CNodeCap ref bits 0 0 + | ThreadCap ref \ ThreadCap ref + | ReplyCap ref master g \ ReplyCap ref True True + | UntypedCap d ref n f \ UntypedCap d ref n 0 + | ArchObjectCap acap \ ArchObjectCap (case acap of + FrameCap ref rghts sz d mapdata \ + FrameCap ref VMReadWrite sz d None + | ASIDPoolCap pool asid \ + ASIDPoolCap pool 0 + | PageTableCap ptr pt_t data \ + PageTableCap ptr pt_t None + | VCPUCap ptr \ + VCPUCap ptr + | _ \ acap) + | _ \ cap" + +lemmas capMasterCap_simps[simp] = capMasterCap_def[split_simps capability.split arch_capability.split] + +lemma capMasterCap_eqDs1: + "capMasterCap cap = EndpointCap ref bdg s r g gr + \ bdg = 0 \ s \ r \ g \ gr + \ (\bdg s r g gr. cap = EndpointCap ref bdg s r g gr)" + "capMasterCap cap = NotificationCap ref bdg s r + \ bdg = 0 \ s \ r + \ (\bdg s r. cap = NotificationCap ref bdg s r)" + "capMasterCap cap = CNodeCap ref bits gd gs + \ gd = 0 \ gs = 0 \ (\gd gs. cap = CNodeCap ref bits gd gs)" + "capMasterCap cap = ThreadCap ref + \ cap = ThreadCap ref" + "capMasterCap cap = NullCap + \ cap = NullCap" + "capMasterCap cap = DomainCap + \ cap = DomainCap" + "capMasterCap cap = IRQControlCap + \ cap = IRQControlCap" + "capMasterCap cap = IRQHandlerCap irq + \ cap = IRQHandlerCap irq" + "capMasterCap cap = Zombie ref tp n + \ cap = Zombie ref tp n" + "capMasterCap cap = UntypedCap d ref bits 0 + \ \f. cap = UntypedCap d ref bits f" + "capMasterCap cap = ReplyCap ref master g + \ master \ g \ (\master g. cap = ReplyCap ref master g)" + "capMasterCap cap = ArchObjectCap (FrameCap ref rghts sz d mapdata) + \ rghts = VMReadWrite \ mapdata = None + \ (\rghts mapdata. cap = ArchObjectCap (FrameCap ref rghts sz d mapdata))" + "capMasterCap cap = ArchObjectCap ASIDControlCap + \ cap = ArchObjectCap ASIDControlCap" + "capMasterCap cap = ArchObjectCap (ASIDPoolCap pool asid) + \ asid = 0 \ (\asid. cap = ArchObjectCap (ASIDPoolCap pool asid))" + "capMasterCap cap = ArchObjectCap (PageTableCap ptr pt_t data) + \ data = None \ (\data. cap = ArchObjectCap (PageTableCap ptr pt_t data))" + "capMasterCap cap = ArchObjectCap (VCPUCap v) + \ cap = ArchObjectCap (VCPUCap v)" + by (clarsimp simp: capMasterCap_def + split: capability.split_asm arch_capability.split_asm)+ + +lemmas capMasterCap_eqDs[dest!] = capMasterCap_eqDs1 capMasterCap_eqDs1 [OF sym] + +definition + capBadge :: "capability \ machine_word option" +where + "capBadge cap \ if isEndpointCap cap then Some (capEPBadge cap) + else if isNotificationCap cap then Some (capNtfnBadge cap) + else None" + +lemma capBadge_simps[simp]: + "capBadge (UntypedCap d p n f) = None" + "capBadge (NullCap) = None" + "capBadge (DomainCap) = None" + "capBadge (EndpointCap ref badge s r g gr) = Some badge" + "capBadge (NotificationCap ref badge s r) = Some badge" + "capBadge (CNodeCap ref bits gd gs) = None" + "capBadge (ThreadCap ref) = None" + "capBadge (Zombie ref b n) = None" + "capBadge (ArchObjectCap cap) = None" + "capBadge (IRQControlCap) = None" + "capBadge (IRQHandlerCap irq) = None" + "capBadge (ReplyCap tcb master g) = None" + by (simp add: capBadge_def isCap_defs)+ + +lemma capClass_Master: + "capClass (capMasterCap cap) = capClass cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split) + +lemma capRange_Master: + "capRange (capMasterCap cap) = capRange cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split, + simp add: capRange_def) + +lemma master_eqI: + "\ \cap. F (capMasterCap cap) = F cap; F (capMasterCap cap) = F (capMasterCap cap') \ + \ F cap = F cap'" + by simp + +lemmas isArchFrameCap_simps[simp] = + isArchFrameCap_def[split_simps capability.split arch_capability.split] + +lemma isCap_Master: + "isZombie (capMasterCap cap) = isZombie cap" + "isArchObjectCap (capMasterCap cap) = isArchObjectCap cap" + "isThreadCap (capMasterCap cap) = isThreadCap cap" + "isCNodeCap (capMasterCap cap) = isCNodeCap cap" + "isNotificationCap (capMasterCap cap) = isNotificationCap cap" + "isEndpointCap (capMasterCap cap) = isEndpointCap cap" + "isUntypedCap (capMasterCap cap) = isUntypedCap cap" + "isReplyCap (capMasterCap cap) = isReplyCap cap" + "isIRQControlCap (capMasterCap cap) = isIRQControlCap cap" + "isIRQHandlerCap (capMasterCap cap) = isIRQHandlerCap cap" + "isNullCap (capMasterCap cap) = isNullCap cap" + "isDomainCap (capMasterCap cap) = isDomainCap cap" + "isArchFrameCap (capMasterCap cap) = isArchFrameCap cap" + by (simp add: isCap_simps capMasterCap_def + split: capability.split arch_capability.split)+ + +lemma capUntypedSize_capBits: + "capClass cap = PhysicalClass \ capUntypedSize cap = 2 ^ (capBits cap)" + apply (simp add: capUntypedSize_def objBits_simps' + AARCH64_H.capUntypedSize_def bit_simps' + split: capability.splits arch_capability.splits + zombie_type.splits) + apply fastforce + done + +lemma sameRegionAs_def2: + "sameRegionAs cap cap' = (\cap cap'. + (cap = cap' + \ (\ isNullCap cap \ \ isZombie cap + \ \ isUntypedCap cap \ \ isArchFrameCap cap) + \ (\ isNullCap cap' \ \ isZombie cap' + \ \ isUntypedCap cap' \ \ isArchFrameCap cap')) + \ (capRange cap' \ {} \ capRange cap' \ capRange cap + \ (isUntypedCap cap \ (isArchFrameCap cap \ isArchFrameCap cap'))) + \ (isIRQControlCap cap \ isIRQHandlerCap cap')) + (capMasterCap cap) (capMasterCap cap')" + apply (cases "isUntypedCap cap") + apply (clarsimp simp: sameRegionAs_def Let_def + isCap_Master capRange_Master capClass_Master) + apply (clarsimp simp: isCap_simps + capMasterCap_def[where cap="UntypedCap d p n f" for d p n f]) + apply (simp add: capRange_def capUntypedSize_capBits) + apply (intro impI iffI) + apply (clarsimp del: subsetI intro!: range_subsetI) + apply clarsimp + apply (simp cong: conj_cong) + apply (simp add: capMasterCap_def sameRegionAs_def isArchFrameCap_def + split: capability.split + split del: if_split cong: if_cong) + apply (simp add: AARCH64_H.sameRegionAs_def isCap_simps + split: arch_capability.split + split del: if_split cong: if_cong) + apply (clarsimp simp: capRange_def Let_def isCap_simps) + apply (simp add: range_subset_eq2 cong: conj_cong) + apply (simp add: conj_comms mask_def add_diff_eq) + done + +lemma sameObjectAs_def2: + "sameObjectAs cap cap' = (\cap cap'. + (cap = cap' + \ (\ isNullCap cap \ \ isZombie cap \ \ isUntypedCap cap) + \ (\ isNullCap cap' \ \ isZombie cap' \ \ isUntypedCap cap') + \ (isArchFrameCap cap \ capRange cap \ {}) + \ (isArchFrameCap cap' \ capRange cap' \ {}))) + (capMasterCap cap) (capMasterCap cap')" + apply (simp add: sameObjectAs_def sameRegionAs_def2 + isCap_simps capMasterCap_def + split: capability.split) + apply (clarsimp simp: AARCH64_H.sameObjectAs_def isCap_simps + split: arch_capability.split cong: if_cong) + apply (clarsimp simp: AARCH64_H.sameRegionAs_def isCap_simps + split del: if_split cong: if_cong) + apply (simp add: capRange_def isCap_simps mask_def add_diff_eq + split del: if_split) + apply fastforce + done + +lemmas sameRegionAs_def3 = + sameRegionAs_def2 [simplified capClass_Master capRange_Master isCap_Master] + +lemmas sameObjectAs_def3 = + sameObjectAs_def2 [simplified capClass_Master capRange_Master isCap_Master] + +lemma sameRegionAsE: + "\ sameRegionAs cap cap'; + \ capMasterCap cap = capMasterCap cap'; \ isNullCap cap; \ isZombie cap; + \ isUntypedCap cap; \ isArchFrameCap cap\ \ R; + \ capRange cap' \ {}; capRange cap' \ capRange cap; isUntypedCap cap \ \ R; + \ capRange cap' \ {}; capRange cap' \ capRange cap; isArchFrameCap cap; + isArchFrameCap cap' \ \ R; + \ isIRQControlCap cap; isIRQHandlerCap cap' \ \ R + \ \ R" + by (simp add: sameRegionAs_def3, fastforce) + +lemma sameObjectAsE: + "\ sameObjectAs cap cap'; + \ capMasterCap cap = capMasterCap cap'; \ isNullCap cap; \ isZombie cap; + \ isUntypedCap cap; + isArchFrameCap cap \ capRange cap \ {} \ \ R \ \ R" + by (clarsimp simp add: sameObjectAs_def3) + +lemma sameObjectAs_sameRegionAs: + "sameObjectAs cap cap' \ sameRegionAs cap cap'" + by (clarsimp simp add: sameObjectAs_def2 sameRegionAs_def2 isCap_simps) + +lemma sameObjectAs_sym: + "sameObjectAs c d = sameObjectAs d c" + by (simp add: sameObjectAs_def2 eq_commute conj_comms) + +lemma untypedRange_Master: + "untypedRange (capMasterCap cap) = untypedRange cap" + by (simp add: capMasterCap_def split: capability.split) + +lemma sameObject_capRange: + "sameObjectAs cap cap' \ capRange cap' = capRange cap" + apply (rule master_eqI, rule capRange_Master) + apply (clarsimp simp: sameObjectAs_def2) + done + +lemma sameRegionAs_Null [simp]: + "sameRegionAs c NullCap = False" + "sameRegionAs NullCap c = False" + by (simp add: sameRegionAs_def3 capRange_def isCap_simps)+ + +lemma isMDBParent_Null [simp]: + "isMDBParentOf c (CTE NullCap m) = False" + "isMDBParentOf (CTE NullCap m) c = False" + unfolding isMDBParentOf_def by (auto split: cte.splits) + +lemma capUntypedSize_simps [simp]: + "capUntypedSize (ThreadCap r) = 1 << objBits (undefined::tcb)" + "capUntypedSize (NotificationCap r badge a b) = 1 << objBits (undefined::Structures_H.notification)" + "capUntypedSize (EndpointCap r badge a b c d) = 1 << objBits (undefined::endpoint)" + "capUntypedSize (Zombie r zs n) = 1 << (zBits zs)" + "capUntypedSize NullCap = 0" + "capUntypedSize DomainCap = 1" + "capUntypedSize (ArchObjectCap x) = Arch.capUntypedSize x" + "capUntypedSize (UntypedCap d r n f) = 1 << n" + "capUntypedSize (CNodeCap r n g n2) = 1 << (objBits (undefined::cte) + n)" + "capUntypedSize (ReplyCap r m a) = 1 << objBits (undefined :: tcb)" + "capUntypedSize IRQControlCap = 1" + "capUntypedSize (IRQHandlerCap irq) = 1" + by (auto simp add: capUntypedSize_def isCap_simps objBits_simps' + split: zombie_type.splits) + +lemma sameRegionAs_classes: + "sameRegionAs cap cap' \ capClass cap = capClass cap'" + apply (erule sameRegionAsE) + apply (rule master_eqI, rule capClass_Master) + apply simp + apply (simp add: capRange_def split: if_split_asm) + apply (clarsimp simp: isCap_simps)+ + done + +lemma capAligned_capUntypedPtr: + "\ capAligned cap; capClass cap = PhysicalClass \ \ + capUntypedPtr cap \ capRange cap" + by (simp add: capRange_def capAligned_def is_aligned_no_overflow) + +lemma sameRegionAs_capRange_Int: + "\ sameRegionAs cap cap'; capClass cap = PhysicalClass \ capClass cap' = PhysicalClass; + capAligned cap; capAligned cap' \ + \ capRange cap' \ capRange cap \ {}" + apply (frule sameRegionAs_classes, simp) + apply (drule(1) capAligned_capUntypedPtr)+ + apply (erule sameRegionAsE) + apply (subgoal_tac "capRange (capMasterCap cap') \ capRange (capMasterCap cap) \ {}") + apply (simp(no_asm_use) add: capRange_Master) + apply (clarsimp simp: capRange_Master) + apply blast + apply blast + apply (clarsimp simp: isCap_simps) + done + +lemma sameRegionAs_trans: + "\ sameRegionAs a b; sameRegionAs b c \ \ sameRegionAs a c" + apply (simp add: sameRegionAs_def2, elim conjE disjE, simp_all) + by (auto simp: isCap_simps capRange_def) (* long *) + +lemma capMasterCap_maskCapRights[simp]: + "capMasterCap (maskCapRights msk cap) + = capMasterCap cap" + apply (cases cap; + simp add: maskCapRights_def Let_def isCap_simps capMasterCap_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.maskCapRights_def Let_def isCap_simps) + done + +lemma capBadge_maskCapRights[simp]: + "capBadge (maskCapRights msk cap) = capBadge cap" + apply (cases cap; + simp add: maskCapRights_def Let_def isCap_simps capBadge_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.maskCapRights_def Let_def isCap_simps) + done + +lemma getObject_cte_det: + "(r::cte,s') \ fst (getObject p s) \ fst (getObject p s) = {(r,s)} \ s' = s" + apply (clarsimp simp add: getObject_def bind_def get_def gets_def + return_def loadObject_cte split_def) + apply (clarsimp split: kernel_object.split_asm if_split_asm option.split_asm + simp: in_monad typeError_def alignError_def magnitudeCheck_def) + apply (simp_all add: bind_def return_def assert_opt_def split_def + alignCheck_def is_aligned_mask[symmetric] + unless_def when_def magnitudeCheck_def) + done + +lemma cte_wp_at_obj_cases': + "cte_wp_at' P p s = + (obj_at' P p s \ (\n \ dom tcb_cte_cases. obj_at' (P \ (fst (the (tcb_cte_cases n)))) (p - n) s))" + apply (simp add: cte_wp_at_cases' obj_at'_def) + apply (rule iffI) + apply (erule disjEI + | clarsimp simp: objBits_simps' cte_level_bits_def + | rule rev_bexI, erule domI)+ + apply fastforce + done + +lemma cte_wp_at_valid_objs_valid_cap': + "\ cte_wp_at' P p s; valid_objs' s \ \ \cte. P cte \ s \' (cteCap cte)" + apply (simp add: cte_wp_at_obj_cases') + apply (elim disjE bexE conjE) + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: valid_obj'_def valid_cte'_def) + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: valid_obj'_def valid_cte'_def valid_tcb'_def) + apply (fastforce dest: bspec [OF _ ranI]) + done + +lemma getCTE_valid_cap: + "\valid_objs'\ getCTE t \\cte s. s \' (cteCap cte) \ cte_at' t s\" + apply (clarsimp simp add: getCTE_def valid_def) + apply (frule in_inv_by_hoareD [OF getObject_cte_inv], clarsimp) + apply (subst conj_commute) + apply (subgoal_tac "cte_wp_at' ((=) a) t s") + apply (rule conjI) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (drule(1) cte_wp_at_valid_objs_valid_cap') + apply clarsimp + apply (drule getObject_cte_det) + apply (simp add: cte_wp_at'_def) + done + +lemmas getCTE_valid_cap' [wp] = + getCTE_valid_cap [THEN hoare_conjD1 [unfolded pred_conj_def]] + +lemma ctes_of_valid_cap': + "\ ctes_of s p = Some (CTE c n); valid_objs' s\ \ s \' c" + apply (rule cte_wp_at_valid_objs_valid_cap'[where P="(=) (CTE c n)", simplified]) + apply (simp add: cte_wp_at_ctes_of) + apply assumption + done + +lemma valid_capAligned: + "valid_cap' c s \ capAligned c" + by (simp add: valid_cap'_def) + +lemma caps_no_overlap'_no_region: + "\ caps_no_overlap' m (capRange cap); valid_objs' s; + m = ctes_of s; s \' cap; fresh_virt_cap_class (capClass cap) m \ \ + \c n p. m p = Some (CTE c n) \ + \ sameRegionAs c cap \ \ sameRegionAs cap c" + apply (clarsimp simp add: caps_no_overlap'_def) + apply (erule allE)+ + apply (erule impE, erule exI) + apply (frule (1) ctes_of_valid_cap') + apply (drule valid_capAligned)+ + apply (case_tac "capClass cap = PhysicalClass") + apply (auto dest: sameRegionAs_capRange_Int)[1] + apply (drule(1) fresh_virt_cap_classD) + apply (auto dest: sameRegionAs_classes) + done + +definition + "initMDBNode \ MDB nullPointer nullPointer True True" + +lemma init_next [simp]: + "mdbNext initMDBNode = 0" + by (simp add: initMDBNode_def nullPointer_def) + +lemma init_prev [simp]: + "mdbPrev initMDBNode = 0" + by (simp add: initMDBNode_def nullPointer_def) + +lemma mdb_chunked_init: + assumes x: "m x = Some cte" + assumes no_m: "no_mdb cte" + assumes no_c: "caps_no_overlap' m (capRange cap)" + assumes no_v: "fresh_virt_cap_class (capClass cap) m" + assumes no_0: "no_0 m" + assumes dlist: "valid_dlist m" + assumes chain: "mdb_chain_0 m" + assumes chunked: "mdb_chunked m" + assumes valid: "valid_objs' s" "m = ctes_of s" "s \' cap" + shows "mdb_chunked (m(x \ CTE cap initMDBNode))" + unfolding mdb_chunked_def +proof clarify + fix p p' c c' n n' + define m' where "m' \ m (x \ CTE cap initMDBNode)" + assume p: "m' p = Some (CTE c n)" + assume p': "m' p' = Some (CTE c' n')" + assume r: "sameRegionAs c c'" + assume neq: "p \ p'" + + note no_region = caps_no_overlap'_no_region [OF no_c valid no_v] + + from chain x no_0 + have chain': "mdb_chain_0 m'" + unfolding m'_def + apply - + apply (rule mdb_chain_0_update, clarsimp) + apply clarsimp + apply (drule rtranclD) + apply (erule disjE, clarsimp) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply assumption + apply assumption + done + moreover + from x no_0 + have x0 [simp]: "x \ 0" by clarsimp + with no_0 + have "no_0 m'" + unfolding m'_def + by (rule no_0_update) + ultimately + have nl: "no_loops m'" by (rule mdb_chain_0_no_loops) + + from p p' r neq no_region + have px: "p \ x" + by (clarsimp simp: m'_def) blast + moreover + from p p' r neq no_region + have p'x: "p' \ x" + by (clarsimp simp: m'_def) blast + ultimately + have m: + "(m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m c p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m c' p' p)" + using chunked p p' neq r + unfolding mdb_chunked_def m'_def + by simp + + from x no_m px [symmetric] dlist no_0 + have npx: "\ m \ p \\<^sup>* x" by (rule no_mdb_rtrancl) + + from x no_m p'x [symmetric] dlist no_0 + have np'x: "\ m \ p' \\<^sup>* x" by (rule no_mdb_rtrancl) + + show "(m' \ p \\<^sup>+ p' \ m' \ p' \\<^sup>+ p) \ + (m' \ p \\<^sup>+ p' \ is_chunk m' c p p') \ + (m' \ p' \\<^sup>+ p \ is_chunk m' c' p' p)" + proof (cases "m \ p \\<^sup>+ p'") + case True + with m + have ch: "is_chunk m c p p'" by simp + + from True npx + have "m' \ p \\<^sup>+ p'" + unfolding m'_def + by (rule mdb_trancl_other_update) + moreover + with nl + have "\ m' \ p' \\<^sup>+ p" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + moreover + have "is_chunk m' c p p'" + unfolding is_chunk_def + proof clarify + fix p'' + assume "m' \ p \\<^sup>+ p''" + with npx + have "m \ p \\<^sup>+ p''" + unfolding m'_def + by - (rule mdb_trancl_update_other) + moreover + then + have p''x: "p'' \ x" + using dlist x no_m no_0 + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0, simp, assumption) + apply (clarsimp simp: mdb_prev_def mdb_next_unfold no_mdb_def) + done + moreover + assume "m' \ p'' \\<^sup>* p'" + { + moreover + from x no_m p''x [symmetric] dlist no_0 + have "\m \ p'' \\<^sup>* x" by (rule no_mdb_rtrancl) + ultimately + have "m \ p'' \\<^sup>* p'" + unfolding m'_def + by (rule mdb_rtrancl_update_other) + } + ultimately + have "\cap'' n''. + m p'' = Some (CTE cap'' n'') \ sameRegionAs c cap''" + using ch + by (simp add: is_chunk_def) + with p''x + show "\cap'' n''. + m' p'' = Some (CTE cap'' n'') \ sameRegionAs c cap''" + by (simp add: m'_def) + qed + ultimately + show ?thesis by simp + next + case False + with m + have p'p: "m \ p' \\<^sup>+ p" by simp + with m + have ch: "is_chunk m c' p' p" by simp + from p'p np'x + have "m' \ p' \\<^sup>+ p" + unfolding m'_def + by (rule mdb_trancl_other_update) + moreover + with nl + have "\ m' \ p \\<^sup>+ p'" + apply clarsimp + apply (drule (1) trancl_trans) + apply (simp add: no_loops_def) + done + moreover + have "is_chunk m' c' p' p" + unfolding is_chunk_def + proof clarify + fix p'' + assume "m' \ p' \\<^sup>+ p''" + with np'x + have "m \ p' \\<^sup>+ p''" + unfolding m'_def + by - (rule mdb_trancl_update_other) + moreover + then + have p''x: "p'' \ x" + using dlist x no_m no_0 + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0, simp, assumption) + apply (clarsimp simp: mdb_prev_def mdb_next_unfold no_mdb_def) + done + moreover + assume "m' \ p'' \\<^sup>* p" + { + moreover + from x no_m p''x [symmetric] dlist no_0 + have "\m \ p'' \\<^sup>* x" by (rule no_mdb_rtrancl) + ultimately + have "m \ p'' \\<^sup>* p" + unfolding m'_def + by (rule mdb_rtrancl_update_other) + } + ultimately + have "\cap'' n''. + m p'' = Some (CTE cap'' n'') \ sameRegionAs c' cap''" + using ch + by (simp add: is_chunk_def) + with p''x + show "\cap'' n''. + m' p'' = Some (CTE cap'' n'') \ sameRegionAs c' cap''" + by (simp add: m'_def) + qed + ultimately + show ?thesis by simp + qed +qed + +lemma cte_refs_capRange: + "\ s \' c; \irq. c \ IRQHandlerCap irq \ \ cte_refs' c x \ capRange c" + apply (cases c; simp add: capRange_def isCap_simps) + apply (clarsimp dest!: valid_capAligned + simp: capAligned_def objBits_simps field_simps) + apply (frule tcb_cte_cases_small) + apply (intro conjI) + apply (erule(1) is_aligned_no_wrap') + apply (rule word_plus_mono_right[where z="2^tcbBlockSizeBits - 1", simplified field_simps]) + apply (drule word_le_minus_one_leq, simp) + apply (erule is_aligned_no_wrap'[where off="2^tcbBlockSizeBits - 1", simplified field_simps]) + apply (drule word_le_minus_one_leq) + apply simp + defer + \ \CNodeCap\ + apply (clarsimp simp: objBits_simps capAligned_def dest!: valid_capAligned) + apply (rename_tac word1 nat1 word2 nat2 x) + apply (subgoal_tac "x * 2^cteSizeBits < 2 ^ (cteSizeBits + nat1)") + apply (intro conjI) + apply (simp add: shiftl_t2n mult_ac) + apply (erule(1) is_aligned_no_wrap') + apply (simp add: add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply simp + apply (simp add: power_add field_simps mask_def) + apply (erule word_mult_less_mono1) + apply (simp add: objBits_defs) + apply (frule power_strict_increasing [where a="2 :: nat" and n="y + z" for y z]) + apply simp + apply (simp only: power_add) + apply (simp add: word_bits_def) + \ \Zombie\ + apply (rename_tac word zombie_type nat) + apply (clarsimp simp: capAligned_def valid_cap'_def objBits_simps) + apply (subgoal_tac "xa * 2^cteSizeBits < 2 ^ zBits zombie_type") + apply (intro conjI) + apply (simp add: shiftl_t2n mult_ac) + apply (erule(1) is_aligned_no_wrap') + apply (simp add: add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult_ac) + apply (erule is_aligned_no_wrap') + apply simp + apply (case_tac zombie_type) + apply simp + apply (rule div_lt_mult) + apply (simp add: objBits_defs) + apply (erule order_less_le_trans) + apply (simp add: word_le_nat_alt) + apply (subst le_unat_uoi[where z=5]) + apply simp + apply simp + apply (simp add: objBits_defs) + apply (simp add: objBits_simps' power_add mult.commute) + apply (rule word_mult_less_mono1) + apply (erule order_less_le_trans) + apply (simp add: word_le_nat_alt) + apply (subst le_unat_uoi) + apply (subst unat_power_lower) + prefer 2 + apply assumption + apply (simp add: word_bits_def) + apply (simp add: word_bits_def) + apply simp + apply (frule power_strict_increasing [where a="2 :: nat" and n="y + z" for y z]) + apply simp + apply (simp only: power_add) + apply (simp add: word_bits_def) + done + +lemma untypedCapRange: + "isUntypedCap cap \ capRange cap = untypedRange cap" + by (clarsimp simp: isCap_simps) + +lemma no_direct_loop [simp]: + "no_loops m \ m (mdbNext node) \ Some (CTE cap node)" + by (fastforce simp: mdb_next_rel_def mdb_next_def no_loops_def) + +lemma no_loops_direct_simp: + "no_loops m \ m \ x \ x = False" + by (auto simp add: no_loops_def) + +lemma no_loops_trancl_simp: + "no_loops m \ m \ x \\<^sup>+ x = False" + by (auto simp add: no_loops_def) + +lemma subtree_mdb_next: + "m \ a \ b \ m \ a \\<^sup>+ b" + by (erule subtree.induct) (auto simp: mdb_next_rel_def intro: trancl_into_trancl) +end + +context mdb_order +begin + +lemma no_loops: "no_loops m" + using chain no_0 by (rule mdb_chain_0_no_loops) + +lemma irrefl_direct_simp [iff]: + "m \ x \ x = False" + using no_loops by (rule no_loops_direct_simp) + +lemma irrefl_trancl_simp [iff]: + "m \ x \\<^sup>+ x = False" + using no_loops by (rule no_loops_trancl_simp) + +lemma irrefl_subtree [iff]: + "m \ x \ x = False" + by (clarsimp dest!: subtree_mdb_next) + +end (* of context mdb_order *) + +lemma no_loops_prev_next_0: + fixes m :: cte_heap + assumes src: "m src = Some (CTE src_cap src_node)" + assumes no_loops: "no_loops m" + assumes dlist: "valid_dlist m" + shows "(mdbPrev src_node = mdbNext src_node) = + (mdbPrev src_node = 0 \ mdbNext src_node = 0)" +proof - + { assume "mdbPrev src_node = mdbNext src_node" + moreover + assume "mdbNext src_node \ 0" + ultimately + obtain cte where + "m (mdbNext src_node) = Some cte" + "mdbNext (cteMDBNode cte) = src" + using src dlist + by (fastforce simp add: valid_dlist_def Let_def) + hence "m \ src \\<^sup>+ src" using src + apply - + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: next_unfold') + apply (rule r_into_trancl) + apply (simp add: next_unfold') + done + with no_loops + have False by (simp add: no_loops_def) + } + thus ?thesis by auto blast +qed + +lemma no_loops_next_prev_0: + fixes m :: cte_heap + assumes "m src = Some (CTE src_cap src_node)" + assumes "no_loops m" + assumes "valid_dlist m" + shows "(mdbNext src_node = mdbPrev src_node) = + (mdbPrev src_node = 0 \ mdbNext src_node = 0)" + apply (rule iffI) + apply (drule sym) + apply (simp add: no_loops_prev_next_0 [OF assms]) + apply clarsimp + done + +locale vmdb = mdb_next + + assumes valid: "valid_mdb_ctes m" + +sublocale vmdb < mdb_order + using valid + by (auto simp: greater_def greater_eq_def mdb_order_def valid_mdb_ctes_def) + +context vmdb +begin + +declare no_0 [intro!] +declare no_loops [intro!] + +lemma dlist [intro!]: "valid_dlist m" + using valid by (simp add: valid_mdb_ctes_def) + +lemmas m_0_simps [iff] = no_0_simps [OF no_0] + +lemma prev_next_0_p: + assumes "m p = Some (CTE cap node)" + shows "(mdbPrev node = mdbNext node) = + (mdbPrev node = 0 \ mdbNext node = 0)" + using assms by (rule no_loops_prev_next_0) auto + +lemma next_prev_0_p: + assumes "m p = Some (CTE cap node)" + shows "(mdbNext node = mdbPrev node) = + (mdbPrev node = 0 \ mdbNext node = 0)" + using assms by (rule no_loops_next_prev_0) auto + +lemmas dlistEn = valid_dlistEn [OF dlist] +lemmas dlistEp = valid_dlistEp [OF dlist] + +lemmas dlist_prevD = vdlist_prevD [OF _ _ dlist no_0] +lemmas dlist_nextD = vdlist_nextD [OF _ _ dlist no_0] +lemmas dlist_prevD0 = vdlist_prevD0 [OF _ _ dlist] +lemmas dlist_nextD0 = vdlist_nextD0 [OF _ _ dlist] +lemmas dlist_prev_src_unique = vdlist_prev_src_unique [OF _ _ _ dlist] +lemmas dlist_next_src_unique = vdlist_next_src_unique [OF _ _ _ dlist] + +lemma subtree_not_0 [simp]: + "\m \ p \ 0" + apply clarsimp + apply (erule subtree.cases) + apply auto + done + +lemma not_0_subtree [simp]: + "\m \ 0 \ p" + apply clarsimp + apply (erule subtree.induct) + apply (auto simp: mdb_next_unfold) + done + +lemma not_0_next [simp]: + "\ m \ 0 \ p" + by (clarsimp simp: mdb_next_unfold) + +lemma not_0_trancl [simp]: + "\ m \ 0 \\<^sup>+ p" + by (clarsimp dest!: tranclD) + +lemma rtrancl0 [simp]: + "m \ 0 \\<^sup>* p = (p = 0)" + by (auto dest!: rtranclD) + +lemma valid_badges: "valid_badges m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma nullcaps: "valid_nullcaps m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma + caps_contained: "caps_contained' m" and + chunked: "mdb_chunked m" and + untyped_mdb: "untyped_mdb' m" and + untyped_inc: "untyped_inc' m" and + class_links: "class_links m" and + irq_control: "irq_control m" + using valid by (simp add: valid_mdb_ctes_def)+ + +end (* of context vmdb *) + +lemma no_self_loop_next: + assumes vmdb: "valid_mdb_ctes m" + and lup: "m ptr = Some cte" + shows "mdbNext (cteMDBNode cte) \ ptr" +proof - + from vmdb have "no_loops m" .. + thus ?thesis + by (rule no_self_loop_next_noloop) fact+ +qed + +lemma no_self_loop_prev: + assumes vmdb: "valid_mdb_ctes m" + and lup: "m ptr = Some cte" + shows "mdbPrev (cteMDBNode cte) \ ptr" +proof + assume prev: "mdbPrev (cteMDBNode cte) = ptr" + + from vmdb have "no_0 m" .. + with lup have "ptr \ 0" + by (rule no_0_neq) + + moreover have "mdbNext (cteMDBNode cte) \ ptr" + by (rule no_self_loop_next) fact+ + + moreover from vmdb have "valid_dlist m" .. + + ultimately show False using lup prev + by - (erule (1) valid_dlistEp, simp_all) +qed + + +locale mdb_ptr = vmdb + + fixes p cap node + assumes m_p [intro!]: "m p = Some (CTE cap node)" +begin + +lemma p_not_next [simp]: + "(p = mdbNext node) = False" + using valid m_p by (fastforce dest: no_self_loop_next) + +lemma p_not_prev [simp]: + "(p = mdbPrev node) = False" + using valid m_p by (fastforce dest: no_self_loop_prev) + +lemmas next_not_p [simp] = p_not_next [THEN x_sym] +lemmas prev_not_p [simp] = p_not_prev [THEN x_sym] + +lemmas prev_next_0 [simp] = prev_next_0_p [OF m_p] next_prev_0_p [OF m_p] + +lemma p_0 [simp]: "p \ 0" using m_p by clarsimp + +lemma p_nextD: + assumes p': "m p' = Some (CTE cap' node')" + assumes eq: "mdbNext node = mdbNext node'" + shows "p = p' \ mdbNext node = 0 \ mdbNext node' = 0" +proof (cases "mdbNext node = 0") + case True thus ?thesis using eq by simp +next + case False + with eq have n': "mdbNext node' \ 0" by simp + + have "p = p'" + apply (rule dlistEn [OF m_p, simplified, OF False]) + apply (simp add: eq) + apply (rule dlistEn [OF p', simplified, OF n']) + apply clarsimp + done + + thus ?thesis by blast +qed + +lemma p_next_eq: + assumes "m p' = Some (CTE cap' node')" + shows "(mdbNext node = mdbNext node') = + (p = p' \ mdbNext node = 0 \ mdbNext node' = 0)" + using assms m_p + apply - + apply (rule iffI) + apply (erule (1) p_nextD) + apply auto + done + +lemma p_prevD: + assumes p': "m p' = Some (CTE cap' node')" + assumes eq: "mdbPrev node = mdbPrev node'" + shows "p = p' \ mdbPrev node = 0 \ mdbPrev node' = 0" +proof (cases "mdbPrev node = 0") + case True thus ?thesis using eq by simp +next + case False + with eq have n': "mdbPrev node' \ 0" by simp + + have "p = p'" + apply (rule dlistEp [OF m_p, simplified, OF False]) + apply (simp add: eq) + apply (rule dlistEp [OF p', simplified, OF n']) + apply clarsimp + done + + thus ?thesis by blast +qed + +lemma p_prev_eq: + assumes "m p' = Some (CTE cap' node')" + shows "(mdbPrev node = mdbPrev node') = + (p = p' \ mdbPrev node = 0 \ mdbPrev node' = 0)" + using assms m_p + apply - + apply (rule iffI) + apply (erule (1) p_prevD) + apply auto + done + +lemmas p_prev_qe = p_prev_eq [THEN x_sym] +lemmas p_next_qe = p_next_eq [THEN x_sym] + +lemma m_p_prev [intro!]: + "m \ mdbPrev node \ p" + using m_p by (clarsimp simp: mdb_prev_def) + +lemma m_p_next [intro!]: + "m \ p \ mdbNext node" + using m_p by (clarsimp simp: mdb_next_unfold) + +lemma next_p_prev: + "mdbNext node \ 0 \ m \ p \ mdbNext node" + by (rule dlist_nextD0 [OF m_p_next]) + +lemma prev_p_next: + "mdbPrev node \ 0 \ m \ mdbPrev node \ p" + by (rule dlist_prevD0 [OF m_p_prev]) + +lemma p_next: + "(m \ p \ p') = (p' = mdbNext node)" + using m_p by (auto simp: mdb_next_unfold) + +end (* of locale mdb_ptr *) + +lemma no_mdb_not_target: + "\ m \ c \ c'; m p = Some cte; no_mdb cte; valid_dlist m; no_0 m \ + \ c' \ p" + apply clarsimp + apply (subgoal_tac "c \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule (3) vdlist_nextD) + apply (clarsimp simp: mdb_prev_def) + apply (simp add: no_mdb_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma valid_dlist_init: + "\ valid_dlist m; m p = Some cte; no_mdb cte \ \ + valid_dlist (m (p \ CTE cap initMDBNode))" + apply (simp add: initMDBNode_def Let_def nullPointer_def) + apply (clarsimp simp: no_mdb_def valid_dlist_def Let_def) + apply fastforce + done +end + +lemma (in mdb_ptr) descendants_of_init': + assumes n: "no_mdb (CTE cap node)" + shows + "descendants_of' p' (m(p \ CTE c initMDBNode)) = + descendants_of' p' m" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (rule iffI) + apply (erule subtree.induct) + apply (frule no_mdb_not_target [where p=p]) + apply simp + apply (simp add: no_mdb_def) + apply (rule valid_dlist_init[OF dlist, OF m_p n]) + apply (insert no_0)[1] + apply (clarsimp simp: no_0_def) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply (frule no_mdb_not_target [where p=p]) + apply simp + apply (simp add: no_mdb_def) + apply (rule valid_dlist_init[OF dlist, OF m_p n]) + apply (insert no_0)[1] + apply (clarsimp simp: no_0_def) + apply (subgoal_tac "p' \ p") + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def m_p split: if_split_asm) + apply clarsimp + apply (drule subtree_mdb_next)+ + apply (drule tranclD)+ + apply clarsimp + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (erule subtree.induct) + apply (frule no_mdb_not_target [where p=p], rule m_p, rule n) + apply (rule dlist) + apply (rule no_0) + apply (subgoal_tac "p'\p") + prefer 2 + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def) + apply (frule no_mdb_not_target [where p=p], rule m_p, rule n) + apply (rule dlist) + apply (rule no_0) + apply (subgoal_tac "c'\p") + prefer 2 + apply (insert n)[1] + apply (clarsimp simp: mdb_next_unfold m_p no_mdb_def) + apply (subgoal_tac "p'\p") + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (clarsimp simp: parentOf_def) + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule tranclD) + apply clarsimp + apply (insert n) + apply (clarsimp simp: mdb_next_unfold no_mdb_def m_p) + done + +lemma untyped_mdb_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); untyped_mdb' m; + valid_objs' s; s \' cap; + m = ctes_of s\ + \ untyped_mdb' (m(p \ CTE cap initMDBNode))" + apply (clarsimp simp add: untyped_mdb'_def) + apply (rule conjI) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (drule untypedCapRange)+ + apply simp + apply (cases cte) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + prefer 2 + apply (simp add: vmdb_def mdb_ptr_def mdb_ptr_axioms_def) + apply (clarsimp simp: mdb_ptr.descendants_of_init') + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned untypedCapRange)+ + apply simp + apply blast + done + +lemma aligned_untypedRange_non_empty: + "\capAligned c; isUntypedCap c\ \ untypedRange c \ {}" + apply (frule untypedCapRange) + apply (drule capAligned_capUntypedPtr) + apply (clarsimp simp: isCap_simps) + apply blast + done + +lemma untypedRange_not_emptyD: "untypedRange c' \ {} \ isUntypedCap c'" + by (case_tac c'; simp add: isCap_simps) + +lemma usableRange_subseteq: + "\capAligned c';isUntypedCap c'\ \ usableUntypedRange c' \ untypedRange c'" + apply (clarsimp simp:isCap_simps capAligned_def mask_def add_diff_eq split:if_splits) + apply (erule order_trans[OF is_aligned_no_wrap']) + apply (erule of_nat_power) + apply (simp add:word_bits_def)+ + done + +lemma untypedRange_in_capRange: "untypedRange x \ capRange x" + by (case_tac x; simp add: capRange_def) + +lemma untyped_inc_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); + valid_objs' s; s \' cap; + m = ctes_of s\ + \ untyped_inc' (m(p \ CTE cap initMDBNode))" + apply (clarsimp simp add: valid_mdb_ctes_def untyped_inc'_def) + apply (intro conjI impI) + apply clarsimp + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=p' in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (frule usableRange_subseteq[OF _ untypedRange_not_emptyD]) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (frule_tac c' = c' in usableRange_subseteq) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (drule(1) aligned_untypedRange_non_empty)+ + apply (thin_tac "All P" for P) + apply (subgoal_tac "untypedRange cap \ untypedRange c' = {}") + apply (intro conjI) + apply simp + apply (drule(2) set_inter_not_emptyD2) + apply fastforce + apply (drule(2) set_inter_not_emptyD1) + apply fastforce + apply (drule(2) set_inter_not_emptyD3) + apply simp+ + apply (rule disjoint_subset2[OF _ disjoint_subset]) + apply (rule untypedRange_in_capRange)+ + apply (simp add:Int_ac) + apply clarsimp + apply (cases cte) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "mdb_ptr (ctes_of s) p capability mdbnode") + prefer 2 + apply (simp add: vmdb_def mdb_ptr_def mdb_ptr_axioms_def valid_mdb_ctes_def untyped_inc'_def) + apply (clarsimp simp: mdb_ptr.descendants_of_init') + apply (simp add: caps_no_overlap'_def) + apply (erule_tac x=pa in allE, erule allE, erule impE, erule exI) + apply (drule (1) ctes_of_valid_cap')+ + apply (drule valid_capAligned)+ + apply (frule usableRange_subseteq[OF _ untypedRange_not_emptyD]) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (frule_tac c' = c in usableRange_subseteq) + apply (drule (1) aligned_untypedRange_non_empty) + apply assumption + apply (drule (1) aligned_untypedRange_non_empty)+ + apply (drule untypedCapRange)+ + apply (thin_tac "All P" for P) + apply (subgoal_tac "untypedRange cap \ untypedRange c = {}") + apply (intro conjI) + apply simp + apply (drule(2) set_inter_not_emptyD1) + apply fastforce + apply (drule(2) set_inter_not_emptyD2) + apply fastforce + apply (drule(2) set_inter_not_emptyD3) + apply simp+ + apply (rule disjoint_subset2[OF _ disjoint_subset]) + apply (rule untypedRange_in_capRange)+ + apply (simp add:Int_ac) + done +context begin interpretation Arch . (*FIXME: arch_split*) +lemma valid_nullcaps_init: + "\ valid_nullcaps m; cap \ NullCap \ \ valid_nullcaps (m(p \ CTE cap initMDBNode))" + by (simp add: valid_nullcaps_def initMDBNode_def nullPointer_def) +end + +lemma class_links_init: + "\ class_links m; no_0 m; m p = Some cte; + no_mdb cte; valid_dlist m \ + \ class_links (m(p \ CTE cap initMDBNode))" + apply (simp add: class_links_def split del: if_split) + apply (erule allEI, erule allEI) + apply simp + apply (intro conjI impI) + apply clarsimp + apply (drule no_mdb_not_target[where p=p], simp) + apply (simp add: no_mdb_def) + apply (erule(2) valid_dlist_init) + apply (clarsimp simp add: no_0_def) + apply simp + apply (clarsimp simp: mdb_next_unfold) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma distinct_zombies_copyE: + "\ distinct_zombies m; m x = Some cte; + capClass (cteCap cte') = PhysicalClass + \ isZombie (cteCap cte) = isZombie (cteCap cte'); + \ capClass (cteCap cte') = PhysicalClass; isUntypedCap (cteCap cte) \ + \ isUntypedCap (cteCap cte'); + \ capClass (cteCap cte') = PhysicalClass; isArchFrameCap (cteCap cte) \ + \ isArchFrameCap (cteCap cte'); + isZombie (cteCap cte') \ x = y; + capClass (cteCap cte') = PhysicalClass \ + capBits (cteCap cte') = capBits (cteCap cte); + capClass (cteCap cte') = PhysicalClass \ capClass (cteCap cte) = PhysicalClass; + capClass (cteCap cte') = PhysicalClass \ + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte) \ + \ distinct_zombies (m (y \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply clarsimp + apply (intro allI conjI impI) + apply clarsimp + apply (drule_tac x=y in spec) + apply (drule_tac x=ptr' in spec) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (drule_tac x=ptr in spec) + apply (drule_tac x=x in spec) + apply clarsimp + apply auto[1] + apply clarsimp + apply (drule_tac x=ptr in spec) + apply (drule_tac x=ptr' in spec) + apply auto[1] + done + +lemmas distinct_zombies_sameE + = distinct_zombies_copyE [where y=x and x=x for x, simplified, + OF _ _ _ _ _] +context begin interpretation Arch . (*FIXME: arch_split*) +lemma capBits_Master: + "capBits (capMasterCap cap) = capBits cap" + by (clarsimp simp: capMasterCap_def split: capability.split arch_capability.split) + +lemma capUntyped_Master: + "capUntypedPtr (capMasterCap cap) = capUntypedPtr cap" + by (clarsimp simp: capMasterCap_def AARCH64_H.capUntypedPtr_def split: capability.split arch_capability.split) + +lemma distinct_zombies_copyMasterE: + "\ distinct_zombies m; m x = Some cte; + capClass (cteCap cte') = PhysicalClass + \ capMasterCap (cteCap cte) = capMasterCap (cteCap cte'); + isZombie (cteCap cte') \ x = y \ + \ distinct_zombies (m (y \ cte'))" + apply (erule(1) distinct_zombies_copyE, simp_all) + apply (rule master_eqI, rule isCap_Master, simp) + apply (drule_tac f=isUntypedCap in arg_cong) + apply (simp add: isCap_Master) + apply (drule_tac f=isArchFrameCap in arg_cong) + apply (simp add: isCap_Master) + apply (rule master_eqI, rule capBits_Master, simp) + apply clarsimp + apply (drule_tac f=capClass in arg_cong, simp add: capClass_Master) + apply (drule_tac f=capUntypedPtr in arg_cong, simp add: capUntyped_Master) + done + +lemmas distinct_zombies_sameMasterE + = distinct_zombies_copyMasterE[where x=x and y=x for x, simplified, + OF _ _ _] + +lemma isZombie_capClass: "isZombie cap \ capClass cap = PhysicalClass" + by (clarsimp simp: isCap_simps) + +lemma distinct_zombies_unzombieE: + "\ distinct_zombies m; m x = Some cte; + isZombie (cteCap cte') \ isZombie (cteCap cte); + isUntypedCap (cteCap cte) \ isUntypedCap (cteCap cte'); + isArchFrameCap (cteCap cte) \ isArchFrameCap (cteCap cte'); + capClass (cteCap cte') = capClass (cteCap cte); + capBits (cteCap cte') = capBits (cteCap cte); + capUntypedPtr (cteCap cte') = capUntypedPtr (cteCap cte) \ + \ distinct_zombies (m(x \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply clarsimp + done + +lemma distinct_zombies_seperateE: + "\ distinct_zombies m; + \y cte. m y = Some cte \ x \ y + \ \ isUntypedCap (cteCap cte) + \ \ isArchFrameCap (cteCap cte) + \ capClass (cteCap cte) = PhysicalClass + \ capClass (cteCap cte') = PhysicalClass + \ capUntypedPtr (cteCap cte) = capUntypedPtr (cteCap cte') + \ capBits (cteCap cte) = capBits (cteCap cte') \ False \ + \ distinct_zombies (m (x \ cte'))" + apply (simp add: distinct_zombies_def distinct_zombie_caps_def) + apply (intro impI allI conjI) + apply (clarsimp simp: isZombie_capClass) + apply fastforce + apply clarsimp + apply (frule isZombie_capClass) + apply (subgoal_tac "\ isUntypedCap (cteCap z) \ \ isArchFrameCap (cteCap z)") + apply fastforce + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (erule notE[rotated], elim allE, erule mp) + apply auto[1] + done + +lemma distinct_zombies_init: + "\ distinct_zombies m; caps_no_overlap' m (capRange (cteCap cte)); + capAligned (cteCap cte); \x cte. m x = Some cte \ capAligned (cteCap cte) \ + \ distinct_zombies (m (p \ cte))" + apply (erule distinct_zombies_seperateE) + apply (rename_tac y cte') + apply (clarsimp simp: caps_no_overlap'_def) + apply (drule_tac x=y in spec)+ + apply (case_tac cte') + apply (rename_tac capability mdbnode) + apply clarsimp + apply (subgoal_tac "capRange capability \ capRange (cteCap cte)") + apply (clarsimp simp: capRange_def) + apply (drule(1) capAligned_capUntypedPtr)+ + apply clarsimp + done + +definition + "no_irq' m \ \p cte. m p = Some cte \ cteCap cte \ IRQControlCap" + +lemma no_irqD': + "\ m p = Some (CTE IRQControlCap n); no_irq' m \ \ False" + unfolding no_irq'_def + apply (erule allE, erule allE, erule (1) impE) + apply auto + done + +lemma irq_control_init: + assumes no_irq: "cap = IRQControlCap \ no_irq' m" + assumes ctrl: "irq_control m" + shows "irq_control (m(p \ CTE cap initMDBNode))" + using no_irq + apply (clarsimp simp: irq_control_def) + apply (rule conjI) + apply (clarsimp simp: initMDBNode_def) + apply (erule (1) no_irqD') + apply clarsimp + apply (frule irq_revocable, rule ctrl) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule (1) no_irqD') + apply clarsimp + apply (erule (1) irq_controlD, rule ctrl) + done + +lemma valid_mdb_ctes_init: + "\ valid_mdb_ctes m; m p = Some cte; no_mdb cte; + caps_no_overlap' m (capRange cap); s \' cap; + valid_objs' s; m = ctes_of s; cap \ NullCap; + fresh_virt_cap_class (capClass cap) (ctes_of s); + cap = capability.IRQControlCap \ no_irq' (ctes_of s) \ \ + valid_mdb_ctes (m (p \ CTE cap initMDBNode))" + apply (simp add: valid_mdb_ctes_def) + apply (rule conjI, rule valid_dlist_init, simp+) + apply (subgoal_tac "p \ 0") + prefer 2 + apply (erule no_0_neq, clarsimp) + apply (clarsimp simp: no_0_update) + apply (rule conjI, rule mdb_chain_0_update_0, simp+) + apply (rule conjI, rule valid_badges_0_update, simp+) + apply (rule conjI, erule (1) caps_contained_no_overlap) + apply (rule conjI, rule mdb_chunked_init, simp+) + apply (rule conjI) + apply (rule untyped_mdb_init, (simp add: valid_mdb_ctes_def)+) + apply (rule conjI) + apply (rule untyped_inc_init, (simp add: valid_mdb_ctes_def)+) + apply (rule conjI) + apply (erule(1) valid_nullcaps_init) + apply (rule conjI, simp add: ut_revocable'_def initMDBNode_def) + apply (rule conjI, erule(4) class_links_init) + apply (rule conjI) + apply (erule distinct_zombies_init, simp+) + apply (erule valid_capAligned) + apply clarsimp + apply (case_tac ctea, clarsimp) + apply (rule valid_capAligned, erule(1) ctes_of_valid_cap') + apply (rule conjI) + apply (erule (1) irq_control_init) + apply (simp add: ran_def reply_masters_rvk_fb_def) + apply (auto simp: initMDBNode_def)[1] + done + +lemma setCTE_state_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ setCTE p cte \\rv s. P (state_refs_of' s)\" + unfolding setCTE_def + apply (rule setObject_state_refs_of_eq) + apply (clarsimp simp: updateObject_cte in_monad typeError_def + in_magnitude_check objBits_simps + split: kernel_object.split_asm if_split_asm) + done + +lemma setCTE_valid_mdb: + fixes cap + defines "cte \ CTE cap initMDBNode" + shows + "\\s. valid_mdb' s \ cte_wp_at' no_mdb ptr s \ + s \' cap \ valid_objs' s \ cap \ NullCap \ + caps_no_overlap' (ctes_of s) (capRange cap) \ + fresh_virt_cap_class (capClass cap) (ctes_of s) \ + (cap = capability.IRQControlCap \ no_irq' (ctes_of s))\ + setCTE ptr cte + \\r. valid_mdb'\" + apply (simp add: valid_mdb'_def setCTE_def cte_def cte_wp_at_ctes_of) + apply (wp ctes_of_setObject_cte) + apply (clarsimp simp del: fun_upd_apply) + apply (erule (8) valid_mdb_ctes_init [OF _ _ _ _ _ _ refl]) + done + +lemma setCTE_valid_objs'[wp]: + "\valid_objs' and (valid_cap' (cteCap cte)) \ + setCTE p cte \\rv. valid_objs'\" + unfolding setCTE_def + apply (rule setObject_valid_objs') + apply (clarsimp simp: prod_eq_iff lookupAround2_char1 updateObject_cte objBits_simps) + apply (clarsimp simp: prod_eq_iff lookupAround2_char1 + updateObject_cte in_monad typeError_def + valid_obj'_def valid_tcb'_def valid_cte'_def + tcb_cte_cases_def cteSizeBits_def + split: kernel_object.split_asm if_split_asm) + done + +lemma getCTE_cte_wp_at: + "\\\ getCTE p \\rv. cte_wp_at' (\c. c = rv) p\" + apply (clarsimp simp: valid_def cte_wp_at'_def getCTE_def) + apply (frule state_unchanged [OF getObject_cte_inv]) + apply simp + apply (drule getObject_cte_det, simp) + done + +lemma getCTE_sp: + "\P\ getCTE p \\rv. cte_wp_at' (\c. c = rv) p and P\" + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule getCTE_cte_wp_at) + apply (rule getCTE_inv) + apply (rule conjI, rule TrueI, assumption) + apply simp + done + +lemmas setCTE_ad[wp] = + setObject_aligned[where 'a=cte, folded setCTE_def] + setObject_distinct[where 'a=cte, folded setCTE_def] +lemmas setCTE_map_to_ctes = + ctes_of_setObject_cte[folded setCTE_def] + +lemma getCTE_ctes_wp: + "\\s. \cte. ctes_of s ptr = Some cte \ P cte s\ getCTE ptr \P\" + apply (rule hoare_strengthen_post, rule getCTE_sp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma updateMDB_valid_objs'[wp]: + "\valid_objs'\ updateMDB m p \\rv. valid_objs'\" + apply (clarsimp simp add: updateMDB_def) + apply (wp | simp)+ + done + +lemma cte_overwrite: + "cteMDBNode_update (\x. m) (cteCap_update (\x. c) v) = CTE c m" + by (cases v, simp) + +lemma setCTE_no_0_obj' [wp]: + "\no_0_obj'\ setCTE p c \\_. no_0_obj'\" + by (simp add: setCTE_def) wp + +declare mresults_fail[simp] + +end + +end (* of theory *) diff --git a/proof/refine/AARCH64/CSpace_R.thy b/proof/refine/AARCH64/CSpace_R.thy new file mode 100644 index 0000000000..0e53eb1ff7 --- /dev/null +++ b/proof/refine/AARCH64/CSpace_R.thy @@ -0,0 +1,6274 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + CSpace refinement +*) + +theory CSpace_R +imports CSpace1_R +begin + +lemma setCTE_pred_tcb_at': + "\pred_tcb_at' proj P t\ + setCTE c cte + \\rv. pred_tcb_at' proj P t\" + unfolding pred_tcb_at'_def setCTE_def + apply (rule setObject_cte_obj_at_tcb') + apply (simp add: tcb_to_itcb'_def)+ + done + +locale mdb_move = + mdb_ptr m _ _ src src_cap src_node + for m src src_cap src_node + + + fixes dest cap' + + fixes old_dest_node + assumes dest: "m dest = Some (CTE NullCap old_dest_node)" + assumes prev: "mdbPrev old_dest_node = 0" + assumes nxt: "mdbNext old_dest_node = 0" + + assumes parency: "weak_derived' src_cap cap'" + assumes not_null: "src_cap \ NullCap" + assumes neq: "src \ dest" + + fixes n + defines "n \ + modify_map + (modify_map + (modify_map + (modify_map + (modify_map m dest (cteCap_update (\_. cap'))) + src (cteCap_update (\_. NullCap))) + dest (cteMDBNode_update (\m. src_node))) + src (cteMDBNode_update (\m. nullMDBNode))) + (mdbPrev src_node) (cteMDBNode_update (mdbNext_update (\_. dest)))" + + fixes m' + defines "m' \ + modify_map n (mdbNext src_node) + (cteMDBNode_update (mdbPrev_update (\_. dest)))" +begin +interpretation Arch . (*FIXME: arch_split*) + + +lemmas src = m_p + +lemma [intro?]: + shows src_0: "src \ 0" + and dest_0: "dest \ 0" + using no_0 src dest + by (auto simp: no_0_def) + +lemma src_neq_next: + "src \ mdbNext src_node" + by simp + +lemma src_neq_prev: + "src \ mdbPrev src_node" + by simp + +lemmas src_neq_next2 = src_neq_next [symmetric] +lemmas src_neq_prev2 = src_neq_prev [symmetric] + +lemma n: + "n = modify_map (m(dest \ CTE cap' src_node, + src \ CTE capability.NullCap nullMDBNode)) + (mdbPrev src_node) + (cteMDBNode_update (mdbNext_update (\_. dest)))" + using neq src dest no_0 + by (simp add: n_def modify_map_apply) + +lemma dest_no_parent [iff]: + "m \ dest \ x = False" using dest nxt + by (auto dest: subtree_next_0) + +lemma dest_no_child [iff]: + "m \ x \ dest = False" using dest prev + by (auto dest: subtree_prev_0) + +lemma src_no_parent [iff]: + "n \ src \ x = False" + apply clarsimp + apply (erule subtree_next_0) + apply (auto simp add: n modify_map_def nullPointer_def) + done + +lemma no_0_n: "no_0 n" by (simp add: n_def no_0) +lemma no_0': "no_0 m'" by (simp add: m'_def no_0_n) + +lemma next_neq_dest [iff]: + "mdbNext src_node \ dest" + using dlist src dest prev dest_0 no_0 + by (fastforce simp add: valid_dlist_def no_0_def Let_def) + +lemma prev_neq_dest [simp]: + "mdbPrev src_node \ dest" + using dlist src dest nxt dest_0 no_0 + by (fastforce simp add: valid_dlist_def no_0_def Let_def) + +lemmas next_neq_dest2 [simp] = next_neq_dest [symmetric] +lemmas prev_neq_dest2 [simp] = prev_neq_dest [symmetric] + +lemma dlist': + "valid_dlist m'" + using src dest prev neq nxt dlist no_0 + apply (simp add: m'_def n no_0_def) + apply (simp add: valid_dlist_def Let_def) + apply clarsimp + apply (case_tac cte) + apply (rename_tac cap node) + apply (rule conjI) + apply (clarsimp simp: modify_map_def nullPointer_def split: if_split_asm) + apply (case_tac z) + apply fastforce + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (rule conjI) + apply fastforce + apply clarsimp + apply (rule conjI, fastforce) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbNext mdbnode = mdbPrev src_node") + prefer 2 + apply fastforce + apply (subgoal_tac "mdbNext mdbnode = src") + prefer 2 + apply fastforce + apply fastforce + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + apply fastforce + subgoal by fastforce + subgoal by fastforce + apply (rule conjI, clarsimp) + apply fastforce + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + apply (clarsimp simp: modify_map_def nullPointer_def split: if_split_asm) + apply (case_tac z) + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + apply (case_tac z) + subgoal by fastforce + subgoal by fastforce + apply (rule conjI) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule conjI, fastforce) + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + apply (clarsimp, rule conjI) + apply clarsimp + apply (frule_tac x=src in spec, erule allE, erule (1) impE) + subgoal by fastforce + subgoal by fastforce + done + +lemma src_no_child [iff]: + "n \ x \ src = False" +proof - + from src_neq_next + have "m' src = Some (CTE capability.NullCap nullMDBNode)" + by (simp add: m'_def n modify_map_def) + hence "m' \ x \ src = False" using dlist' no_0' + by (auto elim!: subtree_prev_0 simp: nullPointer_def) + thus ?thesis by (simp add: m'_def) +qed + +lemma dest_not_parentOf_c[iff]: + "m \ dest parentOf c = False" + using dest by (simp add: parentOf_def) + +lemma dest_source [iff]: + "(m \ dest \ x) = (x = 0)" + using dest nxt by (simp add: next_unfold') + +lemma dest_no_target [iff]: + "m \ p \ dest = False" + using dlist no_0 prev dest + by (fastforce simp: valid_dlist_def Let_def no_0_def next_unfold') + +lemma parent_preserved: + "isMDBParentOf cte' (CTE cap' src_node) = + isMDBParentOf cte' (CTE src_cap src_node)" + using parency unfolding weak_derived'_def + apply (cases cte') + apply (simp add: isMDBParentOf_CTE sameRegionAs_def2) + done + +lemma children_preserved: + "isMDBParentOf (CTE cap' src_node) cte' = + isMDBParentOf (CTE src_cap src_node) cte'" + using parency unfolding weak_derived'_def + apply (cases cte') + apply (simp add: isMDBParentOf_CTE sameRegionAs_def2) + done + +lemma no_src_subtree_n_m: + assumes no_src: "\ m \ p \ src" "p \ src" "p \ dest" + assumes px: "n \ p \ x" + shows "m \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using neq no_src no_loops + apply - + apply (case_tac "c=dest") + apply (cases "m (mdbPrev src_node)") + apply (unfold n)[1] + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update) + apply (rename_tac cte') + apply clarsimp + apply (subgoal_tac "p = mdbPrev src_node") + prefer 2 + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp) + apply (clarsimp simp:_mdb_next_update split: if_split_asm) + apply clarsimp + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (insert dest)[1] + apply (clarsimp simp add: parentOf_def mdb_next_unfold) + apply (subgoal_tac "m \ mdbPrev src_node \ src") + apply simp + apply (rule subtree.direct_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0, clarsimp simp: no_0_def)[1] + apply (rule dlist) + apply (rule src_0) + apply (simp add: parentOf_def src parent_preserved) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp add: parentOf_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp add: parentOf_def split: if_split_asm) + done +next + case (trans_parent c c') + thus ?case using neq no_src + apply - + apply (case_tac "c' = dest") + apply clarsimp + apply (subgoal_tac "c = mdbPrev src_node") + prefer 2 + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply clarsimp + apply (cases "m (mdbPrev src_node)") + apply (unfold n)[1] + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update) + apply (subgoal_tac "m \ p \ src") + apply simp + apply (rule subtree.trans_parent, assumption) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0, clarsimp simp: no_0_def)[1] + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: parentOf_def src parent_preserved + split: if_split_asm) + apply (rule subtree.trans_parent, assumption) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: parentOf_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: parentOf_def split: if_split_asm) + done +qed + +lemma subtree_m_n: + assumes p_neq: "p \ dest" "p \ src" + assumes px: "m \ p \ x" + shows "if x = src then n \ p \ dest else n \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using p_neq + apply - + apply simp + apply (rule conjI) + apply clarsimp + apply (drule leadsto_is_prev) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply (clarsimp simp: parentOf_def) + apply (rule subtree.direct_parent) + apply (simp add: n modify_map_apply mdb_next_update) + apply (rule dest_0) + apply (clarsimp simp: n modify_map_apply parentOf_def + neq [symmetric] src parent_preserved) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (clarsimp simp: mdb_next_update) + apply (drule prev_leadstoD) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply simp + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: parentOf_def) + apply fastforce + done +next + case (trans_parent c c') + thus ?case using p_neq + apply - + apply (clarsimp split: if_split_asm) + apply (erule subtree.trans_parent) + apply (clarsimp simp: next_unfold' src n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: neq [symmetric] src split: option.splits) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: neq [symmetric] src split: option.splits) + apply assumption + apply (clarsimp simp: mdb_next_unfold src n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (fastforce simp add: parentOf_def) + apply (rule conjI) + apply clarsimp + apply (cases "m c", simp add: mdb_next_unfold) + apply (drule leadsto_is_prev) + apply (rule src) + apply (rule dlist) + apply (rule no_0) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n modify_map_apply mdb_next_update) + apply (rule dest_0) + apply (clarsimp simp: n modify_map_apply parentOf_def neq [symmetric] src) + apply (rule conjI, clarsimp) + apply (clarsimp simp: parent_preserved) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (clarsimp simp add: mdb_next_update) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule prev_leadstoD, rule src, rule dlist, rule no_0) + apply simp + apply assumption + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (clarsimp simp add: parentOf_def) + apply (subst modify_map_apply, simp) + apply (fastforce simp add: parentOf_def) + done +qed + +lemmas neq_sym [simp] = neq [symmetric] + +lemmas src_prev_loop [simp] = + subtree_prev_loop [OF src no_loops dlist no_0] + +lemma subtree_src_dest: + "m \ src \ x \ n \ dest \ x" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold src) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply (subst modify_map_apply, simp) + apply (simp add: mdb_next_update) + apply assumption + apply (simp add: n) + apply (clarsimp simp add: modify_map_def parentOf_def src children_preserved) + apply (subgoal_tac "c'' \ src") + prefer 2 + apply (drule (3) subtree.trans_parent) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst modify_map_None, simp) + apply (simp add: mdb_next_update) + apply fastforce + apply (subst modify_map_apply, simp) + apply (simp add: mdb_next_update) + apply fastforce + apply assumption + apply (fastforce simp: n modify_map_def parentOf_def src children_preserved) + done + +lemma src_next [simp]: + "m \ src \ mdbNext src_node" + by (simp add: next_unfold' src) + +lemma dest_no_trancl_target [simp]: + "m \ x \\<^sup>+ dest = False" + by (clarsimp dest!: tranclD2) + +lemma m'_next: + "\m' p = Some (CTE cte' node'); m p = Some (CTE cte node)\ + \ mdbNext node' + = (if p = src then 0 + else if p = dest then mdbNext src_node + else if mdbNext node = src then dest + else mdbNext node)" + apply(simp, intro conjI impI) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(clarsimp simp: n m'_def modify_map_def nullPointer_def) + apply(subgoal_tac "mdbPrev src_node = p") + prefer 2 + apply(erule dlistEn) + apply(simp) + apply(case_tac "cte'a") + apply(clarsimp simp: src) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(clarsimp simp: dest n m'_def modify_map_def) + apply(clarsimp simp: n m'_def modify_map_def nullPointer_def) + apply(clarsimp simp: n m'_def modify_map_def split: if_split_asm) + apply(insert m_p no_0) + apply(erule_tac p=src in dlistEp) + apply(clarsimp simp: no_0_def) + apply(clarsimp) + done + + +lemma mdb_next_from_dest: + "n \ dest \\<^sup>+ x \ m \ src \\<^sup>+ x" + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (simp add: n modify_map_def next_unfold' src) + apply (cases "m (mdbPrev src_node)") + apply (simp add: n) + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (fastforce intro: trancl_into_trancl) + apply (simp add: n) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp: mdb_next_update split: if_split_asm) + apply (subgoal_tac "m \ src \\<^sup>+ src") + apply simp + apply (erule trancl_into_trancl) + apply (rule prev_leadstoI, rule src) + apply (insert no_0)[1] + apply (clarsimp simp add: no_0_def) + apply (rule dlist) + apply (fastforce intro: trancl_into_trancl) + done + +lemma dest_loop: + "n \ dest \ dest = False" + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule mdb_next_from_dest) + apply simp + done + + +lemma subtree_dest_src: + "n \ dest \ x \ m \ src \ x" + apply (erule subtree.induct) + apply (clarsimp simp: mdb_next_unfold src) + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (clarsimp simp add: mdb_next_update next_unfold src) + apply (subst (asm) modify_map_apply, simp)+ + apply (clarsimp simp add: mdb_next_update next_unfold src) + apply assumption + apply (simp add: n) + apply (simp add: modify_map_def parentOf_def) + apply (clarsimp simp: src children_preserved) + apply (subgoal_tac "c' \ dest") + prefer 2 + apply clarsimp + apply (subgoal_tac "c'' \ dest") + prefer 2 + apply clarsimp + apply (drule (3) trans_parent) + apply (simp add: dest_loop) + apply (subgoal_tac "c' \ mdbPrev src_node") + prefer 2 + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: mdb_next_update nullPointer_def split: if_split_asm) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update nullPointer_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: n modify_map_def parentOf_def src children_preserved + split: if_split_asm) + done + +lemma subtree_n_m: + assumes p_neq: "p \ dest" "p \ src" + assumes px: "n \ p \ x" + shows "if x = dest then m \ p \ src else m \ p \ x" using px +proof induct + case (direct_parent c) + thus ?case using p_neq + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "p = mdbPrev src_node") + prefer 2 + apply (drule mdb_next_modify_prev [where x="mdbNext src_node" and f="\_. dest", THEN iffD2]) + apply (fold m'_def) + apply (drule leadsto_is_prev) + apply (fastforce simp: n m'_def modify_map_def) + apply (rule dlist') + apply (rule no_0') + apply simp + apply clarsimp + apply (rule subtree.direct_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0)[1] + apply (clarsimp simp add: next_unfold' n modify_map_def no_0_def split: if_split_asm) + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: parentOf_def n modify_map_def src parent_preserved) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n) + apply (cases "m (mdbPrev src_node)") + apply (subst (asm) modify_map_None, simp)+ + apply (simp add: next_unfold' mdb_next_unfold) + apply (subst (asm) modify_map_apply, simp)+ + apply (simp add: mdb_next_update split: if_split_asm) + apply assumption + apply (simp add: n) + apply (clarsimp simp add: parentOf_def modify_map_def split: if_split_asm) + done +next + case (trans_parent c c') + thus ?case using p_neq + apply - + apply (simp split: if_split_asm) + apply clarsimp + apply (subgoal_tac "c' = mdbNext src_node") + prefer 2 + apply (clarsimp simp add: mdb_next_unfold n modify_map_def) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: mdb_next_unfold src) + apply assumption + apply (clarsimp simp add: parentOf_def modify_map_def n split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "c = mdbPrev src_node") + prefer 2 + apply (drule mdb_next_modify_prev [where x="mdbNext src_node" and f="\_. dest", THEN iffD2]) + apply (fold m'_def) + apply (drule leadsto_is_prev) + apply (fastforce simp: n m'_def modify_map_def) + apply (rule dlist') + apply (rule no_0') + apply simp + apply clarsimp + apply (erule subtree.trans_parent) + apply (rule prev_leadstoI) + apply (rule src) + apply (insert no_0)[1] + apply (clarsimp simp: next_unfold' no_0_def n modify_map_def) + apply (rule dlist) + apply (rule src_0) + apply (clarsimp simp: parentOf_def n modify_map_def src + parent_preserved split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent) + apply (clarsimp simp add: n modify_map_def mdb_next_unfold nullPointer_def split: if_split_asm) + apply assumption + apply (clarsimp simp add: n modify_map_def parentOf_def split: if_split_asm) + done +qed + +lemma descendants: + "descendants_of' p m' = + (if p = src + then {} + else if p = dest + then descendants_of' src m + else descendants_of' p m - {src} \ + (if src \ descendants_of' p m then {dest} else {}))" + apply (rule set_eqI) + apply (simp add: descendants_of'_def m'_def) + apply (auto simp: subtree_m_n intro: subtree_src_dest subtree_dest_src no_src_subtree_n_m) + apply (auto simp: subtree_n_m) + done +end + +context mdb_move_abs +begin + +end + +context mdb_move +begin + +end + +lemma updateCap_dynamic_duo: + "\ (rv, s') \ fst (updateCap x cap s); pspace_aligned' s; pspace_distinct' s \ + \ pspace_aligned' s' \ pspace_distinct' s'" + unfolding updateCap_def + apply (rule conjI) + apply (erule use_valid | wp | assumption)+ + done + +declare const_apply[simp] + +lemma next_slot_eq2: + "\case n q of None \ next_slot p t' m' = x | Some q' \ next_slot p (t'' q') (m'' q') = x; + case n q of None \ (t' = t \ m' = m) | Some q' \ t'' q' = t \ m'' q' = m\ + \ next_slot p t m = x" + apply(simp split: option.splits) + done + +lemma set_cap_not_quite_corres': + assumes cr: + "pspace_relations (ekheap (a)) (kheap s) (ksPSpace s')" + "ekheap (s) = ekheap (a)" + "cur_thread s = ksCurThread s'" + "idle_thread s = ksIdleThread s'" + "machine_state s = ksMachineState s'" + "work_units_completed s = ksWorkUnitsCompleted s'" + "domain_index s = ksDomScheduleIdx s'" + "domain_list s = ksDomSchedule s'" + "cur_domain s = ksCurDomain s'" + "domain_time s = ksDomainTime s'" + "(x,t') \ fst (updateCap p' c' s')" + "valid_objs s" "pspace_aligned s" "pspace_distinct s" "cte_at p s" + "pspace_aligned' s'" "pspace_distinct' s'" + "interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s')" + "(arch_state s, ksArchState s') \ arch_state_relation" + assumes c: "cap_relation c c'" + assumes p: "p' = cte_map p" + shows "\t. ((),t) \ fst (set_cap c p s) \ + pspace_relations (ekheap t) (kheap t) (ksPSpace t') \ + cdt t = cdt s \ + cdt_list t = cdt_list (s) \ + ekheap t = ekheap (s) \ + scheduler_action t = scheduler_action (s) \ + ready_queues t = ready_queues (s) \ + is_original_cap t = is_original_cap s \ + interrupt_state_relation (interrupt_irq_node t) (interrupt_states t) + (ksInterruptState t') \ + (arch_state t, ksArchState t') \ arch_state_relation \ + cur_thread t = ksCurThread t' \ + idle_thread t = ksIdleThread t' \ + machine_state t = ksMachineState t' \ + work_units_completed t = ksWorkUnitsCompleted t' \ + domain_index t = ksDomScheduleIdx t' \ + domain_list t = ksDomSchedule t' \ + cur_domain t = ksCurDomain t' \ + domain_time t = ksDomainTime t'" + apply (rule set_cap_not_quite_corres) + using cr + apply (fastforce simp: c p pspace_relations_def)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma cteMove_corres: + assumes cr: "cap_relation cap cap'" + notes trans_state_update'[symmetric,simp] + shows + "corres dc (einvs and + cte_at ptr and + cte_wp_at (\c. c = cap.NullCap) ptr' and + valid_cap cap and tcb_cap_valid cap ptr' and K (ptr \ ptr')) + (invs' and + cte_wp_at' (\c. weak_derived' cap' (cteCap c) \ cteCap c \ NullCap) (cte_map ptr) and + cte_wp_at' (\c. cteCap c = NullCap) (cte_map ptr')) + (cap_move cap ptr ptr') (cteMove cap' (cte_map ptr) (cte_map ptr'))" + (is "corres _ ?P ?P' _ _") + supply subst_all [simp del] + apply (simp add: cap_move_def cteMove_def const_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (rule corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_assert_assume) + prefer 2 + apply clarsimp + apply (drule invs_mdb') + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac oldCTE) + apply (clarsimp simp: valid_nullcaps_def initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (clarsimp simp: nullPointer_def) + apply (rule corres_symb_exec_r) + defer + apply (rule getCTE_sp) + apply wp + apply (rule no_fail_pre, wp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply (drule invs_mdb') + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp, simp) + apply clarsimp + apply (erule (2) valid_dlistEn, simp) + apply (clarsimp simp: in_monad state_relation_def) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac oldCTE) + apply (rename_tac x old_dest_node) + apply (case_tac cte) + apply (rename_tac src_cap src_node) + apply clarsimp + apply (subgoal_tac "\c. caps_of_state a ptr = Some c") + prefer 2 + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply clarsimp + apply (subgoal_tac "cap_relation c src_cap") + prefer 2 + apply (drule caps_of_state_cteD) + apply (drule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply fastforce + apply (drule (1) pspace_relationsD) + apply (drule_tac p=ptr' in set_cap_not_quite_corres, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply (erule cte_wp_at_weakenE, rule TrueI) + apply fastforce + apply fastforce + apply assumption + apply fastforce + apply (rule cr) + apply (rule refl) + apply (clarsimp simp: split_def) + apply (rule bind_execI, assumption) + apply (drule_tac p=ptr and c="cap.NullCap" in set_cap_not_quite_corres') + apply assumption+ + apply (frule use_valid [OF _ set_cap_valid_objs]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_aligned]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_distinct]) + apply fastforce + apply assumption + apply (frule use_valid [OF _ set_cap_cte_at]) + prefer 2 + apply assumption + apply assumption + apply (drule updateCap_stuff) + apply (elim conjE mp, fastforce) + apply (drule updateCap_stuff) + apply (elim conjE mp, fastforce) + apply assumption + apply simp + apply simp + apply (rule refl) + apply clarsimp + apply (rule bind_execI, assumption) + apply(subgoal_tac "mdb_move_abs ptr ptr' (cdt a) a") + apply (frule mdb_move_abs'.intro) + prefer 2 + apply(rule mdb_move_abs.intro) + apply(clarsimp) + apply(fastforce elim!: cte_wp_at_weakenE) + apply(simp) + apply(simp) + apply (clarsimp simp: exec_gets exec_get exec_put set_cdt_def + set_original_def bind_assoc modify_def + |(rule bind_execI[where f="cap_move_ext x y z x'" for x y z x'], clarsimp simp: mdb_move_abs'.cap_move_ext_det_def2 update_cdt_list_def set_cdt_list_def put_def) | rule refl )+ + apply (clarsimp simp: put_def) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (frule updateCap_dynamic_duo, fastforce, fastforce) + apply (frule(2) updateCap_dynamic_duo [OF _ conjunct1 conjunct2]) + apply (subgoal_tac "no_0 (ctes_of b)") + prefer 2 + apply fastforce + apply (frule(1) use_valid [OF _ updateCap_no_0]) + apply (frule(2) use_valid [OF _ updateCap_no_0, OF _ use_valid [OF _ updateCap_no_0]]) + apply (elim conjE) + apply (drule (5) updateMDB_the_lot', elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply (drule updateCap_stuff, elim conjE, erule (1) impE) + apply (drule updateCap_stuff, clarsimp) + apply (subgoal_tac "pspace_distinct' b \ pspace_aligned' b") + prefer 2 + subgoal by fastforce + apply (thin_tac "ctes_of t = s" for t s)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (subgoal_tac "\p. cte_at p ta = cte_at p a") + prefer 2 + apply (simp add: set_cap_cte_eq) + apply (clarsimp simp add: swp_def cte_wp_at_ctes_of simp del: split_paired_All) + apply (subgoal_tac "cte_at ptr' a") + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "cte_map ptr \ cte_map ptr'") + prefer 2 + apply (erule (2) cte_map_inj) + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp simp: pspace_relations_def) + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ekheap_relation t p" for t p)+ + apply (thin_tac "pspace_relation t p" for t p)+ + apply (thin_tac "interrupt_state_relation s t p" for s t p)+ + apply (thin_tac "ghost_relation s t p q" for s t p q)+ + apply (thin_tac "sched_act_relation t p" for t p)+ + apply (thin_tac "ready_queues_relation t p" for t p)+ + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + defer + apply (drule set_cap_caps_of_state_monad)+ + apply (simp add: modify_map_mdb_cap) + apply (simp add: modify_map_apply) + apply (clarsimp simp add: revokable_relation_def simp del: fun_upd_apply) + apply simp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="fst ptr" in allE) + apply (erule_tac x="snd ptr" in allE) + apply simp + apply (erule impE) + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_split_asm) + apply simp + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp only: null_filter_def cap.simps option.simps + fun_upd_def simp_thms + split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (frule_tac p="(aa,bb)" and p'="ptr'" in cte_map_inj, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "(aa,bb) \ ptr") + apply (frule_tac p="(aa,bb)" and p'="ptr" in cte_map_inj, assumption+) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + subgoal by (simp add: null_filter_def split: if_splits) (*long *) + apply (subgoal_tac "mdb_move (ctes_of b) (cte_map ptr) src_cap src_node (cte_map ptr') cap' old_dest_node") + prefer 2 + apply (rule mdb_move.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + apply (simp add: valid_pspace'_def valid_mdb'_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_move_axioms.intro) + apply assumption + apply (simp add: nullPointer_def) + apply (simp add: nullPointer_def) + apply (erule weak_derived_sym') + subgoal by clarsimp + apply assumption + apply (rule conjI) + apply (simp (no_asm) add: cdt_relation_def) + apply clarsimp + apply (subst mdb_move.descendants, assumption) + apply (subst mdb_move_abs.descendants[simplified fun_upd_apply]) + apply (rule mdb_move_abs.intro) + apply fastforce + apply (fastforce elim!: cte_wp_at_weakenE) + subgoal by simp + subgoal by simp + apply (case_tac "(aa,bb) = ptr", simp) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map ptr") + prefer 2 + apply (erule (2) cte_map_inj, fastforce, fastforce, fastforce) + apply (case_tac "(aa,bb) = ptr'") + subgoal by (simp add: cdt_relation_def del: split_paired_All) + apply (subgoal_tac "cte_map (aa,bb) \ cte_map ptr'") + prefer 2 + apply (erule (2) cte_map_inj, fastforce, fastforce, fastforce) + apply (simp only: if_False) + apply simp + apply (subgoal_tac "descendants_of' (cte_map (aa, bb)) (ctes_of b) = + cte_map ` descendants_of (aa, bb) (cdt a)") + prefer 2 + apply (simp add: cdt_relation_def del: split_paired_All) + apply simp + apply (rule conjI) + apply clarsimp + apply (subst inj_on_image_set_diff15) + apply (rule inj_on_descendants_cte_map) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (rule subset_refl) + subgoal by simp + apply simp + apply clarsimp + apply (drule (1) cte_map_inj_eq) + apply (erule descendants_of_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + subgoal by simp + apply(clarsimp simp: cdt_list_relation_def) + apply(subst next_slot_eq2) + apply(simp split: option.splits) + apply(intro conjI impI) + apply(rule mdb_move_abs'.next_slot_no_parent) + apply(simp, fastforce, simp) + apply(intro allI impI) + apply(rule mdb_move_abs'.next_slot) + apply(simp, fastforce, simp) + subgoal by (fastforce split: option.splits) + apply(case_tac "ctes_of b (cte_map (aa, bb))") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply(case_tac ab) + apply(frule mdb_move.m'_next) + apply(simp, fastforce) + apply(case_tac "(aa, bb) = ptr") + apply(simp) + apply(case_tac "(aa, bb) = ptr'") + apply(case_tac "next_slot ptr (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(simp) + apply(erule_tac x="fst ptr" in allE) + apply(erule_tac x="snd ptr" in allE) + subgoal by(clarsimp split: if_split_asm) + apply(frule invs_mdb, frule invs_valid_pspace) + apply(frule finite_depth) + apply simp + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a) = Some ptr") + apply(frule(3) cte_at_next_slot) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + subgoal by (clarsimp simp: cte_map_inj_eq valid_pspace_def split: if_split_asm) + apply(simp) + apply(case_tac "next_slot (aa, bb) (cdt_list (a)) (cdt a)") + subgoal by(simp) + apply(frule(3) cte_at_next_slot) + apply(frule(3) cte_at_next_slot') + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + by(clarsimp simp: cte_map_inj_eq valid_pspace_def split: if_split_asm) + +lemmas cur_tcb_lift = + hoare_lift_Pf [where f = ksCurThread and P = tcb_at', folded cur_tcb'_def] + +lemma valid_bitmapQ_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\Invariants_H.valid_bitmapQ\ f \\_. Invariants_H.valid_bitmapQ\" + unfolding valid_bitmapQ_def bitmapQ_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma bitmapQ_no_L1_orphans_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\ bitmapQ_no_L1_orphans \ f \\_. bitmapQ_no_L1_orphans \" + unfolding valid_bitmapQ_def bitmapQ_def bitmapQ_no_L1_orphans_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma bitmapQ_no_L2_orphans_lift: + assumes prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\ bitmapQ_no_L2_orphans \ f \\_. bitmapQ_no_L2_orphans \" + unfolding valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def + apply (wp hoare_vcg_all_lift) + apply (wps prq prqL1 prqL2) + apply (rule hoare_vcg_prop, assumption) + done + +lemma valid_queues_lift_asm: + assumes tat1: "\d p tcb. \obj_at' (inQ d p) tcb and Q \ f \\_. obj_at' (inQ d p) tcb\" + and tat2: "\tcb. \st_tcb_at' runnable' tcb and Q \ f \\_. st_tcb_at' runnable' tcb\" + and prq: "\P. \\s. P (ksReadyQueues s) \ f \\_ s. P (ksReadyQueues s)\" + and prqL1: "\P. \\s. P (ksReadyQueuesL1Bitmap s)\ f \\_ s. P (ksReadyQueuesL1Bitmap s)\" + and prqL2: "\P. \\s. P (ksReadyQueuesL2Bitmap s)\ f \\_ s. P (ksReadyQueuesL2Bitmap s)\" + shows "\Invariants_H.valid_queues and Q\ f \\_. Invariants_H.valid_queues\" + proof - + have tat: "\d p tcb. \obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb and Q\ f + \\_. obj_at' (inQ d p) tcb and st_tcb_at' runnable' tcb\" + apply (rule hoare_chain [OF hoare_vcg_conj_lift [OF tat1 tat2]]) + apply (fastforce)+ + done + have tat_combined: "\d p tcb. \obj_at' (inQ d p and runnable' \ tcbState) tcb and Q\ f + \\_. obj_at' (inQ d p and runnable' \ tcbState) tcb\" + apply (rule hoare_chain [OF tat]) + apply (fastforce simp add: obj_at'_and pred_tcb_at'_def o_def)+ + done + show ?thesis unfolding valid_queues_def valid_queues_no_bitmap_def + by (wp tat_combined prq prqL1 prqL2 valid_bitmapQ_lift bitmapQ_no_L2_orphans_lift + bitmapQ_no_L1_orphans_lift hoare_vcg_all_lift hoare_vcg_conj_lift hoare_Ball_helper) + simp_all + qed + +lemmas valid_queues_lift = valid_queues_lift_asm[where Q="\_. True", simplified] + +lemma valid_queues_lift': + assumes tat: "\d p tcb. \\s. \ obj_at' (inQ d p) tcb s\ f \\_ s. \ obj_at' (inQ d p) tcb s\" + and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" + shows "\valid_queues'\ f \\_. valid_queues'\" + unfolding valid_queues'_def imp_conv_disj + by (wp hoare_vcg_all_lift hoare_vcg_disj_lift tat prq) + +lemma setCTE_norq [wp]: + "\\s. P (ksReadyQueues s)\ setCTE ptr cte \\r s. P (ksReadyQueues s) \" + by (clarsimp simp: valid_def dest!: setCTE_pspace_only) + +lemma setCTE_norqL1 [wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL1Bitmap s) \" + by (clarsimp simp: valid_def dest!: setCTE_pspace_only) + +lemma setCTE_norqL2 [wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ setCTE ptr cte \\r s. P (ksReadyQueuesL2Bitmap s) \" + by (clarsimp simp: valid_def dest!: setCTE_pspace_only) + +crunches cteInsert + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and norqL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and norqL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: updateObject_cte_inv crunch_wps ignore_del: setObject) + +lemmas updateMDB_typ_ats [wp] = typ_at_lifts [OF updateMDB_typ_at'] +lemmas updateCap_typ_ats [wp] = typ_at_lifts [OF updateCap_typ_at'] +lemmas cteInsert_typ_ats [wp] = typ_at_lifts [OF cteInsert_typ_at'] + +lemma setObject_cte_ct: + "\\s. P (ksCurThread s)\ setObject t (v::cte) \\rv s. P (ksCurThread s)\" + by (clarsimp simp: valid_def setCTE_def[symmetric] dest!: setCTE_pspace_only) + +crunch ct[wp]: cteInsert "\s. P (ksCurThread s)" + (wp: setObject_cte_ct hoare_drop_imps) +end +context mdb_insert +begin +interpretation Arch . (*FIXME: arch_split*) +lemma n_src_dest: + "n \ src \ dest" + by (simp add: n_direct_eq) + +lemma dest_chain_0 [simp, intro!]: + "n \ dest \\<^sup>+ 0" + using chain_n n_dest + by (simp add: mdb_chain_0_def) blast + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ p' \ dest \ (p = dest \ p' = 0) \ n \ p \\<^sup>+ p'" + apply (erule trancl_induct) + apply (rule context_conjI, clarsimp) + apply (rule context_conjI, clarsimp) + apply (cases "p = src") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (rule n_src_dest) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply (cases "p = dest", simp) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply clarsimp + apply (rule context_conjI, clarsimp) + apply (rule context_conjI, clarsimp simp: mdb_next_unfold) + apply (case_tac "y = src") + apply clarsimp + apply (erule trancl_trans) + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (rule n_src_dest) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + apply (case_tac "y = dest", simp) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n_direct_eq) + done + +lemma n_trancl_eq': + "n \ p \\<^sup>+ p' = + (if p' = dest then m \ p \\<^sup>* src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + apply (rule iffI) + apply (erule trancl_induct) + apply (clarsimp simp: n_direct_eq) + apply (fastforce split: if_split_asm) + apply (clarsimp simp: n_direct_eq split: if_split_asm) + apply fastforce + apply fastforce + apply (fastforce intro: trancl_trans) + apply (fastforce intro: trancl_trans) + apply (simp split: if_split_asm) + apply (drule rtranclD) + apply (erule disjE) + apply (fastforce intro: n_src_dest) + apply (clarsimp dest!: m_tranclD) + apply (erule trancl_trans) + apply (fastforce intro: n_src_dest) + apply (drule m_tranclD, clarsimp) + apply (drule tranclD) + apply clarsimp + apply (insert n_src_dest)[1] + apply (drule (1) next_single_value) + subgoal by (clarsimp dest!: rtrancl_eq_or_trancl[THEN iffD1]) + apply (drule m_tranclD) + apply clarsimp + done + +lemma n_trancl_eq: + "n \ p \\<^sup>+ p' = + (if p' = dest then p = src \ m \ p \\<^sup>+ src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + by (safe; clarsimp simp: n_trancl_eq' + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2]) + +lemma n_rtrancl_eq: + "n \ p \\<^sup>* p' = + (if p' = dest then p = dest \ p \ dest \ m \ p \\<^sup>* src + else if p = dest then p' \ src \ m \ src \\<^sup>* p' + else m \ p \\<^sup>* p')" + apply clarsimp + by (safe; clarsimp simp: n_trancl_eq' + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2]) + +lemma n_cap: + "n p = Some (CTE cap node) \ + \node'. if p = dest then cap = c' \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node')" + by (simp add: n src dest new_src_def new_dest_def split: if_split_asm) + +lemma m_cap: + "m p = Some (CTE cap node) \ + \node'. if p = dest then cap = dest_cap \ n p = Some (CTE c' node') + else n p = Some (CTE cap node')" + apply (simp add: n new_src_def new_dest_def) + apply (cases "p=dest") + apply (auto simp: src dest) + done + +lemma chunked_m: + "mdb_chunked m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma derived_region1 [simp]: + "badge_derived' c' src_cap \ + sameRegionAs c' cap = sameRegionAs src_cap cap" + by (clarsimp simp add: badge_derived'_def sameRegionAs_def2) + +lemma derived_region2 [simp]: + "badge_derived' c' src_cap \ + sameRegionAs cap c' = sameRegionAs cap src_cap" + by (clarsimp simp add: badge_derived'_def sameRegionAs_def2) + +lemma chunked_n: + assumes b: "badge_derived' c' src_cap" + shows "mdb_chunked n" + using chunked_m src b + apply (clarsimp simp: mdb_chunked_def) + apply (drule n_cap)+ + apply clarsimp + apply (simp split: if_split_asm) + apply clarsimp + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply simp + apply (case_tac "src=p'") + apply (clarsimp simp: n_trancl_eq) + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply (clarsimp simp: n_trancl_eq) + apply (rule conjI) + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply (clarsimp split: if_split_asm) + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: n_trancl_eq) + apply (case_tac "p=src") + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (clarsimp simp: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (clarsimp simp: n_trancl_eq) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (simp add: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule sameRegionAsE, simp_all add: sameRegionAs_def3)[1] + apply blast + apply blast + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply fastforce + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (simp add: is_chunk_def n_trancl_eq n_rtrancl_eq n_dest new_dest_def) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x=p in allE, simp, erule(1) sameRegionAs_trans) + apply fastforce + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + done + +end + +context mdb_insert_der +begin + +lemma untyped_c': + "untypedRange c' = untypedRange src_cap" + "isUntypedCap c' = isUntypedCap src_cap" + using partial_is_derived' + apply - + apply (case_tac "isUntypedCap src_cap") + by (clarsimp simp:isCap_simps freeIndex_update_def is_derived'_def + badge_derived'_def capMasterCap_def split:if_splits capability.splits)+ + +lemma capRange_c': + "capRange c' = capRange src_cap" + using partial_is_derived' untyped_c' + apply - + apply (case_tac "isUntypedCap src_cap") + apply (clarsimp simp:untypedCapRange) + apply (rule master_eqI, rule capRange_Master) + apply simp + apply (rule arg_cong) + apply (auto simp:isCap_simps freeIndex_update_def is_derived'_def + badge_derived'_def capMasterCap_def split:if_splits capability.splits) + done + +lemma untyped_no_parent: + "isUntypedCap src_cap \ \ m \ src \ p" + using partial_is_derived' untyped_c' + by (clarsimp simp: is_derived'_def isCap_simps freeIndex_update_def descendants_of'_def) + +end + +lemma (in mdb_insert) n_revocable: + "n p = Some (CTE cap node) \ + \node'. if p = dest then mdbRevocable node = isCapRevocable c' src_cap + else mdbRevocable node = mdbRevocable node' \ m p = Some (CTE cap node')" + using src dest + by (clarsimp simp: n new_src_def new_dest_def split: if_split_asm) + +lemma (in mdb_insert_der) irq_control_n: + "irq_control n" + using src dest partial_is_derived' + apply (clarsimp simp: irq_control_def) + apply (frule n_cap) + apply (drule n_revocable) + apply (clarsimp split: if_split_asm) + apply (simp add: is_derived'_def isCap_simps) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply (clarsimp simp: is_derived'_def isCap_simps) + apply (erule (1) irq_controlD, rule irq_control) + done + +context mdb_insert_child +begin + +lemma untyped_mdb_n: + shows "untyped_mdb' n" + using untyped_mdb + apply (clarsimp simp add: untyped_mdb'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (erule disjE, clarsimp) + apply (simp add: descendants_of'_def) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src untyped_c' capRange_c') + apply (erule disjE) + apply clarsimp + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (fastforce simp: src dest: untyped_no_parent) + apply (case_tac "p=src", simp) + apply simp + done + +lemma parent_untyped_must_not_usable: + "\ptr \ src; m ptr = Some (CTE ccap node'); + untypedRange ccap = untypedRange src_cap; capAligned src_cap; + isUntypedCap src_cap \ + \ usableUntypedRange ccap = {}" + using untyped_inc src + apply (clarsimp simp:untyped_inc'_def) + apply (erule_tac x = ptr in allE) + apply (erule_tac x = src in allE) + apply clarsimp + apply (subgoal_tac "isUntypedCap ccap") + apply clarsimp + apply (drule_tac p = ptr in untyped_no_parent) + apply (simp add:descendants_of'_def) + apply (drule (1) aligned_untypedRange_non_empty) + apply (case_tac ccap,simp_all add:isCap_simps) + done + +lemma untyped_inc_n: + "\capAligned src_cap;isUntypedCap src_cap \ usableUntypedRange src_cap = {}\ + \ untyped_inc' n" + using untyped_inc + apply (clarsimp simp add: untyped_inc'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=dest", simp) + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src) + apply (frule_tac p=p in untyped_no_parent) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (case_tac "p = src") + using src + apply clarsimp + apply (drule(4) parent_untyped_must_not_usable) + apply simp + apply (intro conjI) + apply clarsimp + apply clarsimp + using src + apply clarsimp + apply clarsimp + apply (case_tac "p=dest") + apply (simp add: descendants_of'_def untyped_c') + apply (erule_tac x=p' in allE) + apply (erule_tac x=src in allE) + apply (clarsimp simp:src) + apply (frule_tac p=p' in untyped_no_parent) + apply (case_tac "p' = src") + apply (clarsimp simp:src) + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply simp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply simp+ + apply (clarsimp simp:Int_ac) + apply (erule_tac x=p' in allE) + apply (erule_tac x=p in allE) + apply (case_tac "p' = src") + apply (clarsimp simp:src descendants_of'_def untyped_c') + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (clarsimp simp:Int_ac,intro conjI,clarsimp+) + apply (clarsimp simp:descendants_of'_def) + apply (case_tac "p = src") + apply simp + apply (elim disjE) + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply (simp,intro conjI,clarsimp+) + apply (intro conjI) + apply clarsimp+ + apply (erule disjE[OF iffD1[OF subset_iff_psubset_eq]]) + apply clarsimp+ + apply fastforce + apply (clarsimp simp:Int_ac,intro conjI,clarsimp+) + apply (intro conjI) + apply (elim disjE) + apply (simp add:Int_ac)+ + apply clarsimp + done + +end + +context mdb_insert_sib +begin + +lemma untyped_mdb_n: + shows "untyped_mdb' n" + using untyped_mdb + apply (clarsimp simp add: untyped_mdb'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm simp: descendants_of'_def capRange_c' untyped_c') + apply (erule_tac x=src in allE) + apply (erule_tac x=p' in allE) + apply (fastforce simp: src dest: untyped_no_parent) + apply (erule_tac x=p in allE) + apply (erule_tac x=src in allE) + apply (simp add: src) + done + +lemma not_untyped: "capAligned c' \ \isUntypedCap src_cap" + using no_child partial_is_derived' ut_rev src + apply (clarsimp simp: ut_revocable'_def isMDBParentOf_CTE) + apply (erule_tac x=src in allE) + apply simp + apply (clarsimp simp: is_derived'_def freeIndex_update_def isCap_simps capAligned_def + badge_derived'_def) + apply (clarsimp simp: sameRegionAs_def3 capMasterCap_def isCap_simps + is_aligned_no_overflow split:capability.splits) + done + +lemma untyped_inc_n: + assumes c': "capAligned c'" + shows "untyped_inc' n" + using untyped_inc not_untyped [OF c'] + apply (clarsimp simp add: untyped_inc'_def descendants split del: if_split) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (simp add: descendants_of'_def untyped_c') + apply (case_tac "p = dest") + apply (clarsimp simp: untyped_c') + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +end + +lemma trancl_prev_update: + "modify_map m ptr (cteMDBNode_update (mdbPrev_update z)) \ x \\<^sup>+ y = m \ x \\<^sup>+ y" + apply (rule iffI) + apply (erule update_prev_next_trancl2) + apply (erule update_prev_next_trancl) + done + +lemma rtrancl_prev_update: + "modify_map m ptr (cteMDBNode_update (mdbPrev_update z)) \ x \\<^sup>* y = m \ x \\<^sup>* y" + by (simp add: trancl_prev_update rtrancl_eq_or_trancl) + +lemma mdb_chunked_prev_update: + "mdb_chunked (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = mdb_chunked m" + apply (simp add: mdb_chunked_def trancl_prev_update rtrancl_prev_update is_chunk_def) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=cap in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply blast + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=cap in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply clarsimp + apply blast + apply (erule allE, erule impE, blast) + apply clarsimp + apply blast + apply clarsimp + apply blast + done + +lemma descendants_of_prev_update: + "descendants_of' p (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = + descendants_of' p m" + by (simp add: descendants_of'_def) + +lemma untyped_mdb_prev_update: + "untyped_mdb' (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = untyped_mdb' m" + apply (simp add: untyped_mdb'_def descendants_of_prev_update) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + done + +lemma untyped_inc_prev_update: + "untyped_inc' (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = untyped_inc' m" + apply (simp add: untyped_inc'_def descendants_of_prev_update) + apply (rule iffI) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply (erule impE, blast) + apply (erule allE, erule impE, blast) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (erule_tac x=c in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + done + +lemma is_derived_badge_derived': + "is_derived' m src cap cap' \ badge_derived' cap cap'" + by (simp add: is_derived'_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_mdb_chain_0: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. mdb_chain_0 (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_mdb_chain_0 mdb_inv_preserve_fun_upd | simp del:fun_upd_apply)+ + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac x) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert (ctes_of s) src s_cap s_node dest NullCap node" for node) + apply (drule mdb_insert.chain_n) + apply (rule mdb_chain_0_modify_map_prev) + apply (simp add:modify_map_apply) + apply (clarsimp simp: valid_badges_def) + apply unfold_locales + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + done + +lemma cteInsert_mdb_chunked: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. mdb_chunked (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_mdb_chunked mdb_inv_preserve_fun_upd,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert (ctes_of s) src s_cap s_node dest NullCap d_node") + apply (drule mdb_insert.chunked_n, erule is_derived_badge_derived') + apply (clarsimp simp: modify_map_apply mdb_chunked_prev_update fun_upd_def) + apply unfold_locales + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + done + +lemma cteInsert_untyped_mdb: + "\valid_mdb' and pspace_distinct' and pspace_aligned' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. untyped_mdb' (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_untyped_mdb' mdb_inv_preserve_fun_upd,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption + apply assumption + apply (case_tac "isMDBParentOf (CTE s_cap s_node) (CTE cap + (mdbFirstBadged_update (\a. isCapRevocable cap s_cap) + (mdbRevocable_update (\a. isCapRevocable cap s_cap) (mdbPrev_update (\a. src) s_node))))") + apply (subgoal_tac "mdb_insert_child (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_child_def mdb_insert_child_axioms_def) + apply (drule mdb_insert_child.untyped_mdb_n) + apply (clarsimp simp: modify_map_apply untyped_mdb_prev_update + descendants_of_prev_update fun_upd_def) + apply (subgoal_tac "mdb_insert_sib (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_sib_def mdb_insert_sib_axioms_def) + apply (drule mdb_insert_sib.untyped_mdb_n) + apply (clarsimp simp: modify_map_apply untyped_mdb_prev_update + descendants_of_prev_update fun_upd_def) + done + +lemma valid_mdb_ctes_maskedAsFull: + "\valid_mdb_ctes m;m src = Some (CTE s_cap s_node)\ + \ valid_mdb_ctes (m(src \ CTE (maskedAsFull s_cap cap) s_node))" + apply (clarsimp simp: maskedAsFull_def) + apply (intro conjI impI) + apply (frule mdb_inv_preserve_updateCap + [where m = m and slot = src and index = "max_free_index (capBlockSize cap)"]) + apply simp + apply (drule mdb_inv_preserve_sym) + apply (clarsimp simp:valid_mdb_ctes_def modify_map_def) + apply (frule mdb_inv_preserve.preserve_stuff,simp) + apply (frule mdb_inv_preserve.by_products,simp) + apply (rule mdb_inv_preserve.untyped_inc') + apply (erule mdb_inv_preserve_sym) + apply (clarsimp split:if_split_asm simp: isCap_simps max_free_index_def) + apply simp + apply (subgoal_tac "m = m(src \ CTE s_cap s_node)") + apply simp + apply (rule ext) + apply clarsimp + done + +lemma capAligned_maskedAsFull: + "capAligned s_cap \ capAligned (maskedAsFull s_cap cap)" + apply (case_tac s_cap) + apply (clarsimp simp:isCap_simps capAligned_def maskedAsFull_def max_free_index_def)+ + done + +lemma maskedAsFull_derived': + "\m src = Some (CTE s_cap s_node); is_derived' m ptr b c\ + \ is_derived' (m(src \ CTE (maskedAsFull s_cap cap) s_node)) ptr b c" + apply (subgoal_tac "m(src \ CTE (maskedAsFull s_cap cap) s_node) + = (modify_map m src (cteCap_update (\_. maskedAsFull s_cap cap)))") + apply simp + apply (clarsimp simp:maskedAsFull_def is_derived'_def) + apply (intro conjI impI) + apply (simp add:modify_map_def del:cteCap_update.simps) + apply (subst same_master_descendants) + apply simp + apply (clarsimp simp:isCap_simps capASID_def )+ + apply (clarsimp simp:modify_map_def) + done + +lemma maskedAsFull_usable_empty: + "\capMasterCap cap = capMasterCap s_cap; + isUntypedCap (maskedAsFull s_cap cap)\ + \ usableUntypedRange (maskedAsFull s_cap cap) = {}" + apply (simp add:isCap_simps maskedAsFull_def max_free_index_def split:if_split_asm) + apply fastforce+ + done + +lemma capAligned_master: + "\capAligned cap; capMasterCap cap = capMasterCap ncap\ \ capAligned ncap" + apply (case_tac cap) + apply (clarsimp simp:capAligned_def)+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (clarsimp simp:capAligned_def)+ + done + +lemma cteInsert_untyped_inc': + "\valid_mdb' and pspace_distinct' and pspace_aligned' and valid_objs' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. untyped_inc' (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map + setUntypedCapAsFull_untyped_mdb' mdb_inv_preserve_fun_upd) + apply (wp getCTE_wp setUntypedCapAsFull_ctes)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (clarsimp simp:modify_map_def valid_mdb_ctes_maskedAsFull)+ + apply (erule(2) valid_mdb_ctesE[OF valid_mdb_ctes_maskedAsFull]) + apply (clarsimp simp:modify_map_def) + apply (erule(2) valid_mdb_ctesE[OF valid_mdb_ctes_maskedAsFull]) + apply simp + apply (clarsimp simp:modify_map_def maskedAsFull_derived') + apply (case_tac "isMDBParentOf (CTE (maskedAsFull s_cap cap) s_node) (CTE cap + (mdbFirstBadged_update (\a. isCapRevocable cap (maskedAsFull s_cap cap)) + (mdbRevocable_update (\a. isCapRevocable cap (maskedAsFull s_cap cap)) + (mdbPrev_update (\a. src) s_node))))") + apply (subgoal_tac "mdb_insert_child + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_child_def mdb_insert_child_axioms_def) + apply (drule mdb_insert_child.untyped_inc_n) + apply (rule capAligned_maskedAsFull[OF valid_capAligned]) + apply (erule(1) ctes_of_valid_cap') + apply (intro impI maskedAsFull_usable_empty) + apply (clarsimp simp:is_derived'_def badge_derived'_def) + apply simp + apply (clarsimp simp: modify_map_apply untyped_inc_prev_update maskedAsFull_revokable + descendants_of_prev_update) + apply (subgoal_tac "mdb_insert_sib + (modify_map (ctes_of s) src (cteCap_update (\_. maskedAsFull s_cap cap))) + src (maskedAsFull s_cap cap) s_node dest NullCap d_node cap") + prefer 2 + apply (simp add: mdb_insert_sib_def mdb_insert_sib_axioms_def) + apply (drule mdb_insert_sib.untyped_inc_n) + apply (rule capAligned_master[OF valid_capAligned]) + apply (erule(1) ctes_of_valid_cap') + apply (clarsimp simp:is_derived'_def badge_derived'_def) + apply (clarsimp simp: modify_map_apply untyped_inc_prev_update maskedAsFull_revokable + descendants_of_prev_update) + done + +lemma irq_control_prev_update: + "irq_control (modify_map m x (cteMDBNode_update (mdbPrev_update f))) = irq_control m" + apply (simp add: irq_control_def) + apply (rule iffI) + apply clarsimp + apply (simp only: modify_map_if) + apply (erule_tac x=p in allE) + apply (simp (no_asm_use) split: if_split_asm) + apply (case_tac "x=p") + apply fastforce + apply clarsimp + apply (erule_tac x=p' in allE) + apply simp + apply (case_tac "x=p'") + apply simp + apply fastforce + apply clarsimp + apply (erule_tac x=p in allE) + apply (simp add: modify_map_if split: if_split_asm) + apply clarsimp + apply (case_tac "x=p'") + apply clarsimp + apply clarsimp + apply clarsimp + apply (case_tac "x=p'") + apply clarsimp + apply clarsimp + done + +lemma cteInsert_irq_control: + "\valid_mdb' and pspace_distinct' and pspace_aligned' and (\s. src \ dest) and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. irq_control (ctes_of s)\" + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 setUntypedCapAsFull_irq_control mdb_inv_preserve_fun_upd + mdb_inv_preserve_modify_map,simp) + apply (wp getCTE_wp)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (subgoal_tac "src \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "dest \ 0") + prefer 2 + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (rule conjI) + apply (fastforce simp: valid_mdb_ctes_def no_0_def) + apply (case_tac cte) + apply (rename_tac s_cap s_node) + apply (case_tac cteb) + apply (rename_tac d_cap d_node) + apply (simp add: nullPointer_def) + apply (subgoal_tac "mdb_insert_der (ctes_of s) src s_cap s_node dest NullCap d_node cap") + prefer 2 + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (simp add: valid_mdb_ctes_def) + apply (simp add: valid_mdb_ctes_def) + apply assumption+ + apply (drule mdb_insert_der.irq_control_n) + apply (clarsimp simp: modify_map_apply irq_control_prev_update fun_upd_def) + done + +lemma capMaster_isUntyped: + "capMasterCap c = capMasterCap c' \ isUntypedCap c = isUntypedCap c'" + by (simp add: capMasterCap_def isCap_simps split: capability.splits) + +lemma capMaster_capRange: + "capMasterCap c = capMasterCap c' \ capRange c = capRange c'" + by (simp add: capMasterCap_def capRange_def split: capability.splits arch_capability.splits) + +lemma capMaster_untypedRange: + "capMasterCap c = capMasterCap c' \ untypedRange c = untypedRange c'" + by (simp add: capMasterCap_def capRange_def split: capability.splits arch_capability.splits) + +lemma capMaster_capClass: + "capMasterCap c = capMasterCap c' \ capClass c = capClass c'" + by (simp add: capMasterCap_def split: capability.splits arch_capability.splits) + +lemma distinct_zombies_nonCTE_modify_map: + "\m x f. \ \cte. cteCap (f cte) = cteCap cte \ + \ distinct_zombies (modify_map m x f) = distinct_zombies m" + apply (simp add: distinct_zombies_def modify_map_def o_def) + apply (rule_tac f=distinct_zombie_caps in arg_cong) + apply (rule ext) + apply simp + apply (simp add: map_option.compositionality o_def) + done + +lemma updateCapFreeIndex_dlist: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_dlist (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (valid_dlist (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.preserve_stuff) + apply simp + apply (rule preserve) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +lemma setUntypedCapAsFull_valid_dlist: + assumes preserve: + "\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (valid_dlist (Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P (valid_dlist (Q (ctes_of s)))\" + apply (clarsimp simp:setUntypedCapAsFull_def split:if_splits,intro conjI impI) + apply (wp updateCapFreeIndex_dlist) + apply (clarsimp simp:preserve cte_wp_at_ctes_of)+ + apply wp + apply clarsimp + done + +lemma valid_dlist_prevD: + "\m p = Some cte;valid_dlist m;mdbPrev (cteMDBNode cte) \ 0\ + \ (\cte'. m (mdbPrev (cteMDBNode cte)) = Some cte' \ + mdbNext (cteMDBNode cte') = p)" + by (clarsimp simp:valid_dlist_def Let_def) + +lemma valid_dlist_nextD: + "\m p = Some cte;valid_dlist m;mdbNext (cteMDBNode cte) \ 0\ + \ (\cte'. m (mdbNext (cteMDBNode cte)) = Some cte' \ + mdbPrev (cteMDBNode cte') = p)" + by (clarsimp simp:valid_dlist_def Let_def) + +lemma no_loops_no_l2_loop: + "\valid_dlist m; no_loops m; m p = Some cte;mdbPrev (cteMDBNode cte) = mdbNext (cteMDBNode cte)\ + \ mdbNext (cteMDBNode cte) = 0" + apply (rule ccontr) + apply (subgoal_tac "m \ p \ (mdbNext (cteMDBNode cte))") + prefer 2 + apply (clarsimp simp:mdb_next_rel_def mdb_next_def) + apply (subgoal_tac "m \ (mdbNext (cteMDBNode cte)) \ p") + prefer 2 + apply (clarsimp simp:mdb_next_rel_def mdb_next_def) + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (frule(1) valid_dlist_prevD) + apply simp+ + apply (drule(1) transitive_closure_trans) + apply (simp add:no_loops_def) + done + +lemma cteInsert_no_0: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. no_0 (ctes_of s) \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map getCTE_wp + setUntypedCapAsFull_valid_dlist mdb_inv_preserve_fun_upd | simp)+ + apply (intro conjI impI) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def) + done + +lemma cteInsert_valid_dlist: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and + (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_ s. valid_dlist (ctes_of s) \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 mdb_inv_preserve_modify_map getCTE_wp + setUntypedCapAsFull_valid_dlist mdb_inv_preserve_fun_upd | simp)+ + apply (intro conjI impI) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def)+ + apply (frule mdb_chain_0_no_loops) + apply (simp add:no_0_def) + apply (rule valid_dlistI) + apply (case_tac "p = dest") + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm)+ + apply (frule(2) valid_dlist_prevD) + apply simp + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ mdbNext (cteMDBNode ctea)") + prefer 2 + apply (clarsimp) + apply (drule(3) no_loops_no_l2_loop[rotated -1],simp) + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ dest") + apply clarsimp+ + apply (frule_tac p = p and m = "ctes_of sa" in valid_dlist_prevD) + apply simp+ + apply fastforce + apply (case_tac "p = dest") + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm)+ + apply (frule(2) valid_dlist_nextD,clarsimp) + apply (clarsimp simp:modify_map_def nullPointer_def split:if_split_asm) + apply (frule(2) valid_dlist_nextD) + apply simp + apply (subgoal_tac "mdbPrev (cteMDBNode ctea) \ mdbNext (cteMDBNode ctea)") + prefer 2 + apply (clarsimp) + apply (drule(3) no_loops_no_l2_loop[rotated -1],simp) + apply clarsimp + apply (intro conjI impI) + apply clarsimp+ + apply (drule_tac cte = cte' in no_loops_no_l2_loop,simp) + apply simp+ + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (frule_tac p = p and m = "ctes_of sa" in valid_dlist_nextD) + apply clarsimp+ + apply (rule conjI) + apply fastforce + apply (intro conjI impI,clarsimp+) + apply (frule_tac valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac valid_dlist_nextD) + apply clarsimp+ + done + +lemma cteInsert_mdb' [wp]: + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and K (capAligned cap) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) \ + cteInsert cap src dest + \\_. valid_mdb'\" + apply (simp add:valid_mdb'_def valid_mdb_ctes_def) + apply (rule_tac Q = "\r s. valid_dlist (ctes_of s) \ irq_control (ctes_of s) \ + no_0 (ctes_of s) \ mdb_chain_0 (ctes_of s) \ + mdb_chunked (ctes_of s) \ untyped_mdb' (ctes_of s) \ untyped_inc' (ctes_of s) \ + Q s" for Q + in hoare_strengthen_post) + prefer 2 + apply clarsimp + apply assumption + apply (rule hoare_name_pre_state) + apply (wp cteInsert_no_0 cteInsert_valid_dlist cteInsert_mdb_chain_0 cteInsert_untyped_inc' + cteInsert_mdb_chunked cteInsert_untyped_mdb cteInsert_irq_control) + apply (unfold cteInsert_def) + apply (unfold cteInsert_def updateCap_def) + apply (simp add: valid_mdb'_def split del: if_split) + apply (wp updateMDB_ctes_of_no_0 getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setUntypedCapAsFull_ctes_of + setUntypedCapAsFull_ctes_of_no_0 + setUntypedCapAsFull_valid_dlist setUntypedCapAsFull_distinct_zombies + setUntypedCapAsFull_valid_badges setUntypedCapAsFull_caps_contained + setUntypedCapAsFull_valid_nullcaps setUntypedCapAsFull_ut_revocable + setUntypedCapAsFull_class_links setUntypedCapAsFull_reply_masters_rvk_fb + mdb_inv_preserve_fun_upd + mdb_inv_preserve_modify_map getCTE_wp| simp del:fun_upd_apply)+ + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + defer + apply (clarsimp simp:valid_mdb_ctes_def valid_mdb'_def simp del:fun_upd_apply)+ + apply (case_tac cte) + apply (rename_tac cap1 node1) + apply (case_tac x) + apply (rename_tac cap2 node2) + apply (case_tac node1) + apply (case_tac node2) + apply (clarsimp simp:valid_mdb_ctes_def no_0_def nullPointer_def) + apply (intro conjI impI) + apply clarsimp + apply (rename_tac s src_cap word1 word2 bool1a bool2a bool1 bool2) +proof - + fix s :: kernel_state + fix bool1 bool2 src_cap word1 word2 bool1a bool2a + let ?c1 = "(CTE src_cap (MDB word1 word2 bool1a bool2a))" + let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" + let ?C = "(modify_map + (modify_map + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) + src (cteMDBNode_update (mdbNext_update (\_. dest)))) + word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" + let ?m = "ctes_of s" + let ?prv = "\cte. mdbPrev (cteMDBNode cte)" + let ?nxt = "\cte. mdbNext (cteMDBNode cte)" + + assume "pspace_distinct' s" and "pspace_aligned' s" and srcdest: "src \ dest" + and dest0: "dest \ 0" + and cofs: "ctes_of s src = Some ?c1" and cofd: "ctes_of s dest = Some ?c2" + and is_der: "is_derived' (ctes_of s) src cap src_cap" + and aligned: "capAligned cap" + and vd: "valid_dlist ?m" + and no0: "?m 0 = None" + and chain: "mdb_chain_0 ?m" + and badges: "valid_badges ?m" + and chunk: "mdb_chunked ?m" + and contained: "caps_contained' ?m" + and untyped_mdb: "untyped_mdb' ?m" + and untyped_inc: "untyped_inc' ?m" + and class_links: "class_links ?m" + and distinct_zombies: "distinct_zombies ?m" + and irq: "irq_control ?m" + and reply_masters_rvk_fb: "reply_masters_rvk_fb ?m" + and vn: "valid_nullcaps ?m" + and ut_rev:"ut_revocable' ?m" + + have no_loop: "no_loops ?m" + apply (rule mdb_chain_0_no_loops[OF chain]) + apply (simp add:no_0_def no0) + done + + have badge: "badge_derived' cap src_cap" + using is_der + by (clarsimp simp:is_derived'_def) + + have vmdb: "valid_mdb_ctes ?m" + by (auto simp: vmdb_def valid_mdb_ctes_def no_0_def, fact+) + + have src0: "src \ 0" + using cofs no0 by clarsimp + + have destnull: + "cte_mdb_prop ?m dest (\m. mdbPrev m = 0 \ mdbNext m = 0)" + using cofd unfolding cte_mdb_prop_def + by auto + + have srcwd: "?m \ src \ word1" + using cofs by (simp add: next_unfold') + + have w1ned[simp]: "word1 \ dest" + proof (cases "word1 = 0") + case True thus ?thesis using dest0 by auto + next + case False + thus ?thesis using cofs cofd src0 dest0 False vd + by - (erule (1) valid_dlistEn, (clarsimp simp: nullPointer_def)+) + qed + + have w2ned[simp]: "word2 \ dest" + proof (cases "word2 = 0") + case True thus ?thesis using dest0 by auto + next + case False + thus ?thesis using cofs cofd src0 dest0 False vd + by - (erule (1) valid_dlistEp, (clarsimp simp: nullPointer_def)+) + qed + + have w1nes[simp]: "word1 \ src" using vmdb cofs + by - (drule (1) no_self_loop_next, simp) + + have w2nes[simp]: "word2 \ src" using vmdb cofs + by - (drule (1) no_self_loop_prev, simp) + + from is_der have notZomb1: "\ isZombie cap" + by (clarsimp simp: isCap_simps is_derived'_def badge_derived'_def) + + from is_der have notZomb2: "\ isZombie src_cap" + by (clarsimp simp: isCap_simps is_derived'_def) + + from badge have masters: "capMasterCap cap = capMasterCap src_cap" + by (clarsimp simp: badge_derived'_def) + + note blah[simp] = w2nes[symmetric] w1nes[symmetric] w1ned[symmetric] + w2ned[symmetric] srcdest srcdest[symmetric] + + have mdb_next_disj: + "\p p'. (?C \ p \ p' \ + ?m \ p \ p' \ p \ src \ p'\ dest \ (p' = word1 \ p' = 0) + \ p = src \ p' = dest \ p = dest \ p' = word1)" + apply (case_tac "p = src") + apply (clarsimp simp:mdb_next_unfold modify_map_cases) + apply (case_tac "p = dest") + apply (clarsimp simp:mdb_next_unfold modify_map_cases)+ + using cofs cofd vd no0 + apply - + apply (case_tac "p = word1") + apply clarsimp + apply (intro conjI) + apply clarsimp + apply (frule_tac p = "word1" and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = "mdbNext node" and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = "mdbNext node" in no_loops_no_l2_loop[OF _ no_loop]) + apply simp+ + apply (intro conjI) + apply clarsimp + apply (frule_tac p = p and m = "?m" in valid_dlist_nextD) + apply (clarsimp+)[3] + apply (intro impI) + apply (rule ccontr) + apply clarsimp + apply (frule_tac p = src and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + apply (frule_tac p = p and m = "?m" in valid_dlist_nextD) + apply clarsimp+ + done + + have ctes_ofD: + "\p cte. \?C p = Some cte; p\ dest; p\ src\ \ \cteb. (?m p = Some cteb \ cteCap cte = cteCap cteb)" + by (clarsimp simp:modify_map_def split:if_splits) + + + show "valid_badges ?C" + using srcdest badge cofs badges cofd + unfolding valid_badges_def + apply (intro impI allI) + apply (drule mdb_next_disj) + apply (elim disjE) + defer + apply (clarsimp simp:modify_map_cases dest0 src0) + apply (clarsimp simp: Retype_H.isCapRevocable_def isCapRevocable_def badge_derived'_def) + subgoal by (case_tac src_cap,auto simp:isCap_simps sameRegionAs_def) + apply (clarsimp simp:modify_map_cases valid_badges_def) + apply (frule_tac x=src in spec, erule_tac x=word1 in allE, erule allE, erule impE) + apply fastforce + apply simp + apply (clarsimp simp:mdb_next_unfold badge_derived'_def split: if_split_asm) + apply (thin_tac "All P" for P) + subgoal by (cases src_cap, + auto simp:mdb_next_unfold isCap_simps sameRegionAs_def Let_def split: if_splits) + apply (case_tac "word1 = p'") + apply (clarsimp simp:modify_map_cases valid_badges_def mdb_next_unfold src0 dest0 no0)+ + apply (case_tac "p = dest") + apply (clarsimp simp:dest0 src0 no0)+ + apply (case_tac z) + apply (rename_tac capability mdbnode) + apply clarsimp + apply (drule_tac x = p in spec,drule_tac x = "mdbNext mdbnode" in spec) + by (auto simp:isCap_simps sameRegionAs_def) + + from badge + have isUntyped_eq: "isUntypedCap cap = isUntypedCap src_cap" + apply (clarsimp simp:badge_derived'_def) + apply (case_tac cap,auto simp:isCap_simps) + done + + from badge + have [simp]: "capRange cap = capRange src_cap" + apply (clarsimp simp:badge_derived'_def) + apply (case_tac cap) + apply (clarsimp simp:isCap_simps capRange_def)+ + (* 5 subgoals *) + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + (* 9 subgoals *) + apply (clarsimp simp:isCap_simps capRange_def)+ + done + + have [simp]: "untypedRange cap = untypedRange src_cap" + using badge + apply (clarsimp simp:badge_derived'_def dest!:capMaster_untypedRange) + done + + from contained badge srcdest cofs cofd is_der no0 + show "caps_contained' ?C" + apply (clarsimp simp add: caps_contained'_def) + apply (case_tac "p = dest") + apply (case_tac "p' = dest") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (case_tac src_cap,auto)[1] + apply (case_tac "p' = src") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (clarsimp simp:badge_derived'_def) + apply (case_tac src_cap,auto)[1] + apply (drule(2) ctes_ofD) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (frule capRange_untyped) + apply (erule_tac x=src in allE, erule_tac x=p' in allE, simp) + apply (case_tac cteb) + apply (clarsimp) + apply blast + apply (case_tac "p' = dest") + apply (case_tac "p = src") + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule capRange_untyped) + subgoal by (case_tac cap,auto simp:isCap_simps badge_derived'_def) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule_tac x = word1 in spec) + apply (drule_tac x = src in spec) + apply (case_tac z) + apply (clarsimp simp:isUntyped_eq) + apply blast + apply (drule_tac x = p in spec) + apply (drule_tac x = src in spec) + apply (frule capRange_untyped) + apply (clarsimp simp:isUntyped_eq) + apply blast + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (clarsimp simp:modify_map_def split:if_splits) + apply ((case_tac z,fastforce)+)[5] + by fastforce+ + + show "valid_nullcaps ?C" + using is_der vn cofs vd no0 + apply (simp add: valid_nullcaps_def) + apply (clarsimp simp:modify_map_def is_derived'_def) + apply (rule conjI) + apply (clarsimp simp: is_derived'_def badge_derived'_def)+ + apply (drule_tac x = word1 in spec) + apply (case_tac z) + apply (clarsimp simp:nullMDBNode_def) + apply (drule(1) valid_dlist_nextD) + apply simp + apply clarsimp + apply (simp add:nullPointer_def src0) + done + + from vmdb srcdest cofs ut_rev + show "ut_revocable' ?C" + apply (clarsimp simp: valid_mdb_ctes_def ut_revocable'_def modify_map_def) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: Retype_H.isCapRevocable_def isCapRevocable_def isCap_simps)+ + apply auto + apply (drule_tac x= src in spec) + apply clarsimp + apply (case_tac z) + apply clarsimp + done + + from class_links srcdest badge cofs cofd no0 vd + show "class_links ?C" + unfolding class_links_def + apply (intro allI impI) + apply (drule mdb_next_disj) + apply (elim disjE) + apply (clarsimp simp:modify_map_def mdb_next_unfold split:if_split_asm) + apply (clarsimp simp: badge_derived'_def modify_map_def + split: if_split_asm) + apply (erule capMaster_capClass) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule_tac x = src in spec) + apply (drule_tac x = word1 in spec) + apply (clarsimp simp:mdb_next_unfold) + apply (case_tac z) + apply (clarsimp simp:badge_derived'_def) + apply (drule capMaster_capClass) + apply simp + done + + from distinct_zombies badge + show "distinct_zombies ?C" + apply (simp add:distinct_zombies_nonCTE_modify_map) + apply (erule_tac distinct_zombies_copyMasterE[where x=src]) + apply (rule cofs) + apply (simp add: masters) + apply (simp add: notZomb1 notZomb2) + done + + from reply_masters_rvk_fb is_der + show "reply_masters_rvk_fb ?C" + apply (clarsimp simp:reply_masters_rvk_fb_def) + apply (erule ranE) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply fastforce+ + apply (clarsimp simp:is_derived'_def isCap_simps) + apply fastforce + done +qed + +crunch state_refs_of'[wp]: cteInsert "\s. P (state_refs_of' s)" + (wp: crunch_wps) + +lemma setCTE_state_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ setCTE p cte \\rv s. P (state_hyp_refs_of' s)\" + unfolding setCTE_def + apply (rule setObject_state_hyp_refs_of_eq) + apply (clarsimp simp: updateObject_cte in_monad typeError_def + in_magnitude_check objBits_simps + split: kernel_object.split_asm if_split_asm) + done + +crunch state_hyp_refs_of'[wp]: cteInsert "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) + +crunch aligned'[wp]: cteInsert pspace_aligned' + (wp: crunch_wps) + +crunch distinct'[wp]: cteInsert pspace_distinct' + (wp: crunch_wps) + +crunch no_0_obj' [wp]: cteInsert no_0_obj' + (wp: crunch_wps) + +lemma cteInsert_valid_pspace: + "\valid_pspace' and valid_cap' cap and (\s. src \ dest) and valid_objs' and + (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\_. valid_pspace'\" + unfolding valid_pspace'_def + apply (rule hoare_pre) + apply (wp cteInsert_valid_objs) + apply (fastforce elim: valid_capAligned) + done + +lemma setCTE_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + setCTE p v + \\rv s. P (ko_wp_at' live' p' s)\" + apply (clarsimp simp: setCTE_def setObject_def split_def + valid_def in_monad ko_wp_at'_def + split del: if_split + elim!: rsubst[where P=P]) + apply (drule(1) updateObject_cte_is_tcb_or_cte [OF _ refl, rotated]) + apply (elim exE conjE disjE) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def hyp_live'_def + lookupAround2_char1) + apply (simp add: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def) + done + +lemma setCTE_iflive': + "\\s. cte_wp_at' (\cte'. \p'\zobj_refs' (cteCap cte') + - zobj_refs' (cteCap cte). + ko_wp_at' (Not \ live') p' s) p s + \ if_live_then_nonz_cap' s\ + setCTE p cte + \\rv s. if_live_then_nonz_cap' s\" + unfolding if_live_then_nonz_cap'_def ex_nonz_cap_to'_def + apply (rule hoare_pre) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift setCTE_weak_cte_wp_at) + apply clarsimp + apply (drule spec, drule(1) mp) + apply clarsimp + apply (rule_tac x=cref in exI) + apply (clarsimp simp: cte_wp_at'_def) + apply (rule ccontr) + apply (drule bspec, fastforce) + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma updateMDB_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s\ + updateMDB p m + \\rv s. if_live_then_nonz_cap' s\" + apply (clarsimp simp: updateMDB_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (wp setCTE_iflive') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma updateCap_iflive': + "\\s. cte_wp_at' (\cte'. \p'\zobj_refs' (cteCap cte') + - zobj_refs' cap. + ko_wp_at' (Not \ live') p' s) p s + \ if_live_then_nonz_cap' s\ + updateCap p cap + \\rv s. if_live_then_nonz_cap' s\" + apply (simp add: updateCap_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (wp setCTE_iflive') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma setCTE_ko_wp_at_not_live[wp]: + "\\s. P (ko_wp_at' (Not \ live') p' s)\ + setCTE p v + \\rv s. P (ko_wp_at' (Not \ live') p' s)\" + apply (clarsimp simp: setCTE_def setObject_def split_def + valid_def in_monad ko_wp_at'_def + split del: if_split + elim!: rsubst[where P=P]) + apply (drule(1) updateObject_cte_is_tcb_or_cte [OF _ refl, rotated]) + apply (elim exE conjE disjE) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def hyp_live'_def + lookupAround2_char1) + apply (simp add: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: ps_clear_upd objBits_simps live'_def) + done + +lemma setUntypedCapAsFull_ko_wp_not_at'[wp]: + "\\s. P (ko_wp_at' (Not \ live') p' s)\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P ( ko_wp_at' (Not \ live') p' s)\" + apply (clarsimp simp:setUntypedCapAsFull_def updateCap_def) + apply (wp setCTE_ko_wp_at_live setCTE_ko_wp_at_not_live) +done + +lemma setUntypedCapAsFull_ko_wp_at'[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\r s. P ( ko_wp_at' live' p' s)\" + apply (clarsimp simp:setUntypedCapAsFull_def updateCap_def) + apply (wp setCTE_ko_wp_at_live setCTE_ko_wp_at_live) + done + +(*FIXME:MOVE*) +lemma zobj_refs'_capFreeIndex_update[simp]: + "isUntypedCap ctecap \ + zobj_refs' (capFreeIndex_update f (ctecap)) = zobj_refs' ctecap" + by (case_tac ctecap,auto simp:isCap_simps) + +lemma setUntypedCapAsFull_if_live_then_nonz_cap': + "\\s. if_live_then_nonz_cap' s \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\rv s. if_live_then_nonz_cap' s\" + apply (clarsimp simp:if_live_then_nonz_cap'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + apply (clarsimp simp:setUntypedCapAsFull_def split del: if_split) + apply (wp hoare_vcg_if_split) + apply (clarsimp simp:ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (wp updateCap_ctes_of_wp)+ + apply clarsimp + apply (elim allE impE) + apply (assumption) + apply (clarsimp simp:ex_nonz_cap_to'_def cte_wp_at_ctes_of modify_map_def split:if_splits) + apply (rule_tac x = cref in exI) + apply (intro conjI impI; clarsimp) + done + + +lemma maskedAsFull_simps[simp]: + "maskedAsFull capability.NullCap cap = capability.NullCap" + by (auto simp:maskedAsFull_def) + +lemma cteInsert_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ cte_wp_at' (\c. cteCap c = NullCap) dest s\ + cteInsert cap src dest + \\rv. if_live_then_nonz_cap'\" + apply (simp add: cteInsert_def split del: if_split) + apply (wp updateCap_iflive' hoare_drop_imps) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (wp hoare_vcg_conj_lift hoare_vcg_ex_lift hoare_vcg_ball_lift getCTE_wp + setUntypedCapAsFull_ctes_of setUntypedCapAsFull_if_live_then_nonz_cap')+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI) + apply (rule_tac x = "case (ctes_of s dest) of Some a \a" in exI) + apply (clarsimp) + apply (case_tac cte,simp) + apply clarsimp+ + done + +lemma ifunsafe'_def2: + "if_unsafe_then_cap' = + (\s. \cref cte. ctes_of s cref = Some cte \ cteCap cte \ NullCap + \ (\cref' cte'. ctes_of s cref' = Some cte' + \ cref \ cte_refs' (cteCap cte') (irq_node' s)))" + by (fastforce simp: if_unsafe_then_cap'_def cte_wp_at_ctes_of ex_cte_cap_to'_def) + +lemma ifunsafe'_def3: + "if_unsafe_then_cap' = + (\s. \cref cap. cteCaps_of s cref = Some cap \ cap \ NullCap + \ (\cref' cap'. cteCaps_of s cref' = Some cap' + \ cref \ cte_refs' cap' (irq_node' s)))" + by (fastforce simp: cteCaps_of_def o_def ifunsafe'_def2) + +lemma tree_cte_cteCap_eq: + "cte_wp_at' (P \ cteCap) p s = (case_option False P (cteCaps_of s p))" + apply (simp add: cte_wp_at_ctes_of cteCaps_of_def) + apply (cases "ctes_of s p", simp_all) + done + +lemma updateMDB_cteCaps_of: + "\\s. P (cteCaps_of s)\ updateMDB ptr f \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply (wp updateMDB_ctes_of_wp) + apply (safe elim!: rsubst [where P=P] intro!: ext) + apply (case_tac "ctes_of s x") + apply (clarsimp simp: modify_map_def)+ + done + +lemma setCTE_ksInterruptState[wp]: + "\\s. P (ksInterruptState s)\ setCTE param_a param_b \\_ s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_cte_inv | simp add: setCTE_def)+ + +crunch ksInterruptState[wp]: cteInsert "\s. P (ksInterruptState s)" + (wp: crunch_wps) + +lemmas updateMDB_cteCaps_of_ksInt[wp] + = hoare_use_eq [where f=ksInterruptState, OF updateMDB_ksInterruptState updateMDB_cteCaps_of] + +lemma updateCap_cteCaps_of: + "\\s. P (modify_map (cteCaps_of s) ptr (K cap))\ updateCap ptr cap \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply (wp updateCap_ctes_of_wp) + apply (erule rsubst [where P=P]) + apply (case_tac "ctes_of s ptr"; fastforce simp: modify_map_def) + done + +lemmas updateCap_cteCaps_of_int[wp] + = hoare_use_eq[where f=ksInterruptState, OF updateCap_ksInterruptState updateCap_cteCaps_of] + +lemma getCTE_cteCap_wp: + "\\s. case (cteCaps_of s ptr) of None \ True | Some cap \ Q cap s\ getCTE ptr \\rv. Q (cteCap rv)\" + apply (wp getCTE_wp) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + done + +lemma capFreeIndex_update_cte_refs'[simp]: + "isUntypedCap a \ cte_refs' (capFreeIndex_update f a) = cte_refs' a " + apply (rule ext) + apply (clarsimp simp:isCap_simps) + done + +lemma cteInsert_ifunsafe'[wp]: + "\if_unsafe_then_cap' and cte_wp_at' (\c. cteCap c = NullCap) dest + and ex_cte_cap_to' dest\ + cteInsert cap src dest + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_to'_def + cteCaps_of_def + dest!: modify_map_K_D + split: if_split_asm) + apply (intro conjI) + apply clarsimp + apply (erule_tac x = crefa in allE) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply (rule_tac x = cref in exI) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply fastforce + apply (intro conjI impI) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply fastforce + apply (clarsimp simp:modify_map_def) + apply (erule_tac x = crefa in allE) + apply (intro conjI impI) + apply clarsimp + apply (rule_tac x = cref in exI) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply fastforce +done + +lemma setCTE_inQ[wp]: + "\\s. P (obj_at' (inQ d p) t s)\ setCTE ptr v \\rv s. P (obj_at' (inQ d p) t s)\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb') + apply (simp_all add: inQ_def) + done + +lemma setCTE_valid_queues'[wp]: + "\valid_queues'\ setCTE p cte \\rv. valid_queues'\" + apply (simp only: valid_queues'_def imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +crunch inQ[wp]: cteInsert "\s. P (obj_at' (inQ d p) t s)" + (wp: crunch_wps) + +lemma setCTE_it'[wp]: + "\\s. P (ksIdleThread s)\ setCTE c p \\_ s. P (ksIdleThread s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + by (wpsimp+; auto) + +lemma setCTE_idle [wp]: + "\valid_idle'\ setCTE p cte \\rv. valid_idle'\" + apply (simp add: valid_idle'_def) + apply (rule hoare_lift_Pf [where f="ksIdleThread"]) + apply (intro hoare_vcg_conj_lift; (solves \wpsimp\)?) + apply (clarsimp simp: setCTE_def) + apply (rule setObject_cte_obj_at_tcb'[where P="idle_tcb'", simplified]) + apply wpsimp + done + +lemma getCTE_no_idle_cap: + "\valid_global_refs'\ + getCTE p + \\rv s. ksIdleThread s \ capRange (cteCap rv)\" + apply (wp getCTE_wp) + apply (clarsimp simp: valid_global_refs'_def valid_refs'_def cte_wp_at_ctes_of) + apply blast + done + +lemma updateMDB_idle'[wp]: + "\valid_idle'\ updateMDB p m \\rv. valid_idle'\" + apply (clarsimp simp add: updateMDB_def) + apply (rule hoare_pre) + apply (wp | simp add: valid_idle'_def)+ + by fastforce + +lemma updateCap_idle': + "\valid_idle'\ updateCap p c \\rv. valid_idle'\" + apply (simp add: updateCap_def) + apply (wp | simp)+ + done + +crunch idle [wp]: setUntypedCapAsFull "valid_idle'" + (wp: crunch_wps simp: cte_wp_at_ctes_of) + +lemma cteInsert_idle'[wp]: + "\valid_idle'\ cteInsert cap src dest \\rv. valid_idle'\" + apply (simp add: cteInsert_def) + apply (wp updateMDB_idle' updateCap_idle' | rule hoare_drop_imp | simp)+ + done + +lemma setCTE_arch [wp]: + "\\s. P (ksArchState s)\ setCTE p c \\_ s. P (ksArchState s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + apply (wpsimp+; auto) + done + +lemma setCTE_valid_arch[wp]: + "\valid_arch_state'\ setCTE p c \\_. valid_arch_state'\" + apply (wp valid_arch_state_lift' setCTE_typ_at') + apply (simp add: setCTE_def) + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (rule_tac P=P in rsubst, assumption) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (erule disjE) + apply (clarsimp simp: ko_wp_at'_def lookupAround2_char1 is_vcpu'_def ps_clear_upd) + apply (clarsimp simp: ko_wp_at'_def lookupAround2_char1 is_vcpu'_def ps_clear_upd) + apply assumption + done + +lemma setCTE_global_refs[wp]: + "\\s. P (global_refs' s)\ setCTE p c \\_ s. P (global_refs' s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte global_refs'_def) + apply (wpsimp+; auto) + done + +lemma setCTE_gsMaxObjectSize[wp]: + "\\s. P (gsMaxObjectSize s)\ setCTE p c \\_ s. P (gsMaxObjectSize s)\" + apply (simp add: setCTE_def setObject_def split_def updateObject_cte) + apply (wpsimp+; auto) + done + +lemma setCTE_valid_globals[wp]: + "\valid_global_refs' and (\s. kernel_data_refs \ capRange (cteCap c) = {}) + and (\s. 2 ^ capBits (cteCap c) \ gsMaxObjectSize s)\ + setCTE p c + \\_. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_def pred_conj_def) + apply (rule hoare_lift_Pf2 [where f=global_refs']) + apply (rule hoare_lift_Pf2 [where f=gsMaxObjectSize]) + apply wp + apply (clarsimp simp: ran_def valid_cap_sizes'_def) + apply metis + apply wp+ + done + +lemma updateMDB_global_refs [wp]: + "\valid_global_refs'\ updateMDB p m \\rv. valid_global_refs'\" + apply (clarsimp simp add: updateMDB_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def valid_cap_sizes'_def) + apply blast + done + +lemma updateCap_global_refs [wp]: + "\valid_global_refs' and (\s. kernel_data_refs \ capRange cap = {}) + and (\s. 2 ^ capBits cap \ gsMaxObjectSize s)\ + updateCap p cap + \\rv. valid_global_refs'\" + apply (clarsimp simp add: updateCap_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +crunch arch [wp]: cteInsert "\s. P (ksArchState s)" + (wp: crunch_wps simp: cte_wp_at_ctes_of) + +crunches cteInsert + for valid_arch[wp]: valid_arch_state' + (wp: crunch_wps) + +lemma cteInsert_valid_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (\irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + cteInsert cap src dest + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def cteInsert_def irq_issued'_def setUntypedCapAsFull_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (intro conjI impI) + apply (clarsimp simp:ran_dom modify_map_dom) + apply (drule bspec) + apply fastforce + apply (clarsimp simp:isCap_simps modify_map_def split:if_splits) + apply (clarsimp simp:ran_dom modify_map_dom) + apply (drule bspec) + apply fastforce + apply (clarsimp simp:modify_map_def split:if_splits) + done + +lemma setCTE_arch_ctes_of_wp [wp]: + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ + setCTE p cte + \\rv s. P (ksArchState s) (ctes_of s)\" + apply (simp add: setCTE_def ctes_of_setObject_cte) + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim exE conjE disjE rsubst[where P="P (ksArchState s)" for s]) + apply (clarsimp simp: lookupAround2_char1) + apply (subst map_to_ctes_upd_tcb; assumption?) + apply (clarsimp simp: mask_def objBits_defs field_simps ps_clear_def3) + apply (clarsimp simp: tcb_cte_cases_change) + apply (erule rsubst[where P="P (ksArchState s)" for s]) + apply (rule ext, clarsimp) + apply (intro conjI impI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (drule(1) cte_wp_at_tcbI'[where P="(=) cte"]) + apply (simp add: ps_clear_def3 field_simps) + apply assumption+ + apply (simp add: cte_wp_at_ctes_of) + by (clarsimp simp: map_to_ctes_upd_cte ps_clear_def3 field_simps mask_def) + +lemma setCTE_irq_states' [wp]: + "\valid_irq_states'\ setCTE x y \\_. valid_irq_states'\" + apply (rule valid_irq_states_lift') + apply wp + apply (simp add: setCTE_def) + apply (wp setObject_ksMachine) + apply (simp add: updateObject_cte) + apply (rule hoare_pre) + apply (wp unless_wp|wpc|simp)+ + apply fastforce + apply assumption + done + +crunch irq_states' [wp]: cteInsert valid_irq_states' + (wp: crunch_wps) + +crunch pred_tcb_at'[wp]: cteInsert "pred_tcb_at' proj P t" + (wp: crunch_wps) + +crunch state_hyp_refs_of'[wp]: setupReplyMaster "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) + +lemma setCTE_cteCaps_of[wp]: + "\\s. P ((cteCaps_of s)(p \ cteCap cte))\ + setCTE p cte + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteCaps_of_def) + apply wp + apply (fastforce elim!: rsubst[where P=P]) + done + +crunches setupReplyMaster + for inQ[wp]: "\s. P (obj_at' (inQ d p) t s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and ct[wp]: "\s. P (ksCurThread s)" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and it[wp]: "\s. P (ksIdleThread s)" + and nosch[wp]: "\s. P (ksSchedulerAction s)" + and irq_node'[wp]: "\s. P (irq_node' s)" + (wp: crunch_wps) + +lemmas setCTE_cteCap_wp_irq[wp] = + hoare_use_eq_irq_node' [OF setCTE_ksInterruptState setCTE_cteCaps_of] + +crunch global_refs'[wp]: setUntypedCapAsFull "\s. P (global_refs' s) " + (simp: crunch_simps) + + +lemma setUntypedCapAsFull_valid_refs'[wp]: + "\\s. valid_refs' R (ctes_of s) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\yb s. valid_refs' R (ctes_of s)\" + apply (clarsimp simp:valid_refs'_def setUntypedCapAsFull_def split del:if_split) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:ran_dom) + apply (drule_tac x = y in bspec) + apply (drule_tac a = y in domI) + apply (simp add:modify_map_dom) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of isCap_simps split:if_splits) + done + +crunch gsMaxObjectSize[wp]: setUntypedCapAsFull "\s. P (gsMaxObjectSize s)" + +lemma setUntypedCapAsFull_sizes[wp]: + "\\s. valid_cap_sizes' sz (ctes_of s) \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\rv s. valid_cap_sizes' sz (ctes_of s)\" + apply (clarsimp simp:valid_cap_sizes'_def setUntypedCapAsFull_def split del:if_split) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp | wps)+ + apply (clarsimp simp:ran_dom) + apply (drule_tac x = y in bspec) + apply (drule_tac a = y in domI) + apply (simp add:modify_map_dom) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of isCap_simps split:if_splits) + done + +lemma setUntypedCapAsFull_valid_global_refs'[wp]: + "\\s. valid_global_refs' s \ cte_wp_at' ((=) srcCTE) src s\ + setUntypedCapAsFull (cteCap srcCTE) cap src + \\yb s. valid_global_refs' s\" + apply (clarsimp simp: valid_global_refs'_def) + apply (rule hoare_pre,wps) + apply wp + apply simp +done + +lemma capMaster_eq_capBits_eq: + "capMasterCap cap = capMasterCap cap' \ capBits cap = capBits cap'" + by (metis capBits_Master) + +lemma valid_global_refsD_with_objSize: + "\ ctes_of s p = Some cte; valid_global_refs' s \ \ + kernel_data_refs \ capRange (cteCap cte) = {} \ global_refs' s \ kernel_data_refs + \ 2 ^ capBits (cteCap cte) \ gsMaxObjectSize s" + by (clarsimp simp: valid_global_refs'_def valid_refs'_def valid_cap_sizes'_def ran_def) blast + +lemma cteInsert_valid_globals [wp]: + "\valid_global_refs' and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s)\ + cteInsert cap src dest + \\rv. valid_global_refs'\" + apply (simp add: cteInsert_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) + apply (frule capMaster_eq_capBits_eq) + apply (drule capMaster_capRange) + apply (drule (1) valid_global_refsD_with_objSize) + apply simp + done + +lemma setCTE_ksMachine[wp]: + "\\s. P (ksMachineState s)\ setCTE x y \\_ s. P (ksMachineState s)\" + apply (clarsimp simp: setCTE_def) + apply (wp setObject_ksMachine) + apply (clarsimp simp: updateObject_cte + split: Structures_H.kernel_object.splits) + apply (safe, (wp unless_wp | simp)+) + done + +crunch ksMachine[wp]: cteInsert "\s. P (ksMachineState s)" + (wp: crunch_wps) + +lemma cteInsert_vms'[wp]: + "\valid_machine_state'\ cteInsert cap src dest \\rv. valid_machine_state'\" + apply (simp add: cteInsert_def valid_machine_state'_def pointerInDeviceData_def + pointerInUserData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv | + intro hoare_drop_imp|assumption)+ + done + +crunch pspace_domain_valid[wp]: cteInsert "pspace_domain_valid" + (wp: crunch_wps) + +lemma setCTE_ct_not_inQ[wp]: + "\ct_not_inQ\ setCTE ptr cte \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setCTE_nosch]) + apply (simp add: setCTE_def ct_not_inQ_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_cte_ct) + apply (rule setObject_cte_obj_at_tcb') + apply (clarsimp simp add: obj_at'_def)+ + done + +crunch ct_not_inQ[wp]: cteInsert "ct_not_inQ" + (simp: crunch_simps wp: hoare_drop_imp) + +lemma setCTE_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ + setCTE p cte + \\rv s. P (ksCurDomain s)\" + apply (simp add: setCTE_def) + apply wp + done + +lemma setObject_cte_ksDomSchedule[wp]: "\ \s. P (ksDomSchedule s) \ setObject ptr (v::cte) \ \_ s. P (ksDomSchedule s) \" + apply (simp add: setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma setCTE_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ + setCTE p cte + \\rv s. P (ksDomSchedule s)\" + apply (simp add: setCTE_def) + apply wp + done + +crunch ksCurDomain[wp]: cteInsert "\s. P (ksCurDomain s)" + (wp: crunch_wps ) + +crunch ksIdleThread[wp]: cteInsert "\s. P (ksIdleThread s)" + (wp: crunch_wps) + +crunch ksDomSchedule[wp]: cteInsert "\s. P (ksDomSchedule s)" + (wp: crunch_wps) + +lemma setCTE_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbDomain tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbDomain tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +crunch tcbDomain_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbDomain tcb)) t" + (wp: crunch_simps hoare_drop_imps) + +lemma setCTE_tcbPriority_inv[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbPriority tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +crunch tcbPriority_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_simps hoare_drop_imps) + + +lemma cteInsert_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ cteInsert a b c \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift)+ + done + +lemma setObject_cte_domIdx: + "\\s. P (ksDomScheduleIdx s)\ setObject t (v::cte) \\rv s. P (ksDomScheduleIdx s)\" + by (clarsimp simp: valid_def setCTE_def[symmetric] dest!: setCTE_pspace_only) + +crunch ksDomScheduleIdx[wp]: cteInsert "\s. P (ksDomScheduleIdx s)" + (wp: setObject_cte_domIdx hoare_drop_imps) + +crunch gsUntypedZeroRanges[wp]: cteInsert "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_cte_inv crunch_wps) + +definition + "untyped_derived_eq cap cap' + = (isUntypedCap cap \ cap = cap')" + +lemma ran_split: + "inj_on m (dom m) + \ ran (\x. if P x then m' x else m x) + = ((ran m - (ran (restrict_map m (Collect P)))) + \ (ran (restrict_map m' (Collect P))))" + apply (clarsimp simp: ran_def restrict_map_def set_eq_iff) + apply (safe, simp_all) + apply (auto dest: inj_onD[OF _ trans[OF _ sym]]) + done + +lemma ran_split_eq: + "inj_on m (dom m) + \ \x. \ P x \ m' x = m x + \ ran m' + = ((ran m - (ran (restrict_map m (Collect P)))) + \ (ran (restrict_map m' (Collect P))))" + apply (rule trans[rotated], erule ran_split) + apply (rule arg_cong[where f=ran]) + apply auto + done + +lemma usableUntypedRange_uniq: + "cteCaps_of s x = Some cp + \ cteCaps_of s y = Some cp' + \ isUntypedCap cp + \ isUntypedCap cp' + \ capAligned cp + \ capAligned cp' + \ untyped_inc' (ctes_of s) + \ usableUntypedRange cp = usableUntypedRange cp' + \ usableUntypedRange cp \ {} + \ x = y" + apply (cases "the (ctes_of s x)") + apply (cases "the (ctes_of s y)") + apply (clarsimp simp: cteCaps_of_def) + apply (frule untyped_incD'[where p=x and p'=y], simp+) + apply (drule(1) usableRange_subseteq)+ + apply blast + done + +lemma usableUntypedRange_empty: + "valid_cap' cp s \ isUntypedCap cp + \ (usableUntypedRange cp = {}) = (capFreeIndex cp = maxFreeIndex (capBlockSize cp))" + apply (clarsimp simp: isCap_simps max_free_index_def valid_cap_simps' capAligned_def) + apply (rule order_trans, rule word_plus_mono_right) + apply (rule_tac x="2 ^ capBlockSize cp - 1" in word_of_nat_le) + apply (simp add: unat_2p_sub_1 untypedBits_defs) + apply (simp add: field_simps is_aligned_no_overflow) + apply (simp add: field_simps mask_def) + done + +lemma restrict_map_is_map_comp: + "restrict_map m S = m \\<^sub>m (\x. if x \ S then Some x else None)" + by (simp add: restrict_map_def map_comp_def fun_eq_iff) + +lemma untypedZeroRange_to_usableCapRange: + "untypedZeroRange c = Some (x, y) \ valid_cap' c s + \ isUntypedCap c \ usableUntypedRange c = {x .. y} + \ x \ y" + apply (clarsimp simp: untypedZeroRange_def split: if_split_asm) + apply (frule(1) usableUntypedRange_empty) + apply (clarsimp simp: isCap_simps valid_cap_simps' max_free_index_def) + apply (simp add: getFreeRef_def mask_def add_diff_eq) + done + +lemma untyped_ranges_zero_delta: + assumes urz: "untyped_ranges_zero' s" + and other: "\p. p \ set xs \ cps' p = cteCaps_of s p" + and vmdb: "valid_mdb' s" + and vobj: "valid_objs' s" + and eq: "ran (restrict_map (untypedZeroRange \\<^sub>m cteCaps_of s) (set xs)) + \ gsUntypedZeroRanges s + \ utr' = ((gsUntypedZeroRanges s - ran (restrict_map (untypedZeroRange \\<^sub>m cteCaps_of s) (set xs))) + \ ran (restrict_map (untypedZeroRange \\<^sub>m cps') (set xs)))" + notes Collect_const[simp del] + shows "untyped_ranges_zero_inv cps' utr'" + apply (subst eq) + apply (clarsimp simp: urz[unfolded untyped_ranges_zero_inv_def]) + apply (fastforce simp: map_comp_Some_iff restrict_map_Some_iff elim!: ranE)[1] + apply (simp add: untyped_ranges_zero_inv_def urz[unfolded untyped_ranges_zero_inv_def]) + apply (rule sym, rule trans, rule_tac P="\x. x \ set xs" + and m="untypedZeroRange \\<^sub>m cteCaps_of s" in ran_split_eq) + apply (rule_tac B="dom (untypedZeroRange \\<^sub>m (\cp. if valid_cap' cp s + then Some cp else None) \\<^sub>m cteCaps_of s)" in subset_inj_on[rotated]) + apply (clarsimp simp: map_comp_Some_iff cteCaps_of_def) + apply (case_tac "the (ctes_of s x)", clarsimp) + apply (frule ctes_of_valid_cap'[OF _ vobj]) + apply blast + apply (cut_tac vmdb) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (clarsimp intro!: inj_onI simp: map_comp_Some_iff + split: if_split_asm) + apply (drule(1) untypedZeroRange_to_usableCapRange)+ + apply (clarsimp) + apply (drule(2) usableUntypedRange_uniq, (simp add: valid_capAligned)+) + apply (simp add: map_comp_def other) + apply (simp add: restrict_map_is_map_comp) + done + +lemma ran_restrict_map_insert: + "ran (restrict_map m (insert x S)) = (set_option (m x) \ ran (restrict_map m S))" + by (auto simp add: ran_def restrict_map_Some_iff) + +lemmas untyped_ranges_zero_fun_upd + = untyped_ranges_zero_delta[where xs="[x]" and cps'="cps(x \ cp)", + simplified ran_restrict_map_insert list.simps, simplified] for x cps cp + +lemma cteInsert_untyped_ranges_zero[wp]: + "\untyped_ranges_zero' and (\s. src \ dest) and valid_mdb' + and valid_objs' + and cte_wp_at' (untyped_derived_eq cap o cteCap) src\ + cteInsert cap src dest + \\rv. untyped_ranges_zero'\" + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift, wp) + apply (simp add: cteInsert_def setUntypedCapAsFull_def) + apply (wp getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def cteCaps_of_def + fun_upd_def[symmetric]) + apply (intro impI conjI allI; erule + untyped_ranges_zero_delta[where xs="[src, dest]", unfolded cteCaps_of_def], + simp_all add: ran_restrict_map_insert) + apply (clarsimp simp: isCap_simps untypedZeroRange_def + untyped_derived_eq_def badge_derived'_def + split: if_split_asm) + apply blast + apply (case_tac "isUntypedCap cap", simp_all add: untyped_derived_eq_def) + apply (clarsimp simp: isCap_simps untypedZeroRange_def + untyped_derived_eq_def badge_derived'_def + split: if_split_asm) + apply blast + done + +lemma cteInsert_invs: + "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and + (\s. src \ dest) and (\s. cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s) + and cte_wp_at' (untyped_derived_eq cap o cteCap) src + and ex_cte_cap_to' dest and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + cteInsert cap src dest + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wpsimp wp: cur_tcb_lift tcb_in_cur_domain'_lift sch_act_wf_lift CSpace_R.valid_queues_lift + valid_irq_node_lift valid_queues_lift' irqs_masked_lift cteInsert_norq + simp: st_tcb_at'_def) + apply (auto simp: invs'_def valid_state'_def valid_pspace'_def elim: valid_capAligned) + done + +lemma deriveCap_corres: + "\cap_relation c c'; cte = cte_map slot \ \ + corres (ser \ cap_relation) + (cte_at slot) + (pspace_aligned' and pspace_distinct' and cte_at' cte and valid_mdb') + (derive_cap slot c) (deriveCap cte c')" + apply (unfold derive_cap_def deriveCap_def) + apply (case_tac c) + apply (simp_all add: returnOk_def Let_def is_zombie_def isCap_simps + split: sum.splits) + apply (rule_tac Q="\_ _. True" and Q'="\_ _. True" in + corres_initial_splitE [OF ensureNoChildren_corres]) + apply simp + apply clarsimp + apply wp+ + apply clarsimp + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule arch_deriveCap_corres) + apply (clarsimp simp: o_def)+ + done + +crunch inv[wp]: deriveCap "P" + (simp: crunch_simps wp: crunch_wps arch_deriveCap_inv) + +lemma valid_NullCap: + "valid_cap' NullCap = \" + by (rule ext, simp add: valid_cap_simps' capAligned_def word_bits_def) + +lemma deriveCap_valid [wp]: + "\\s. s \' c\ + deriveCap slot c + \\rv s. s \' rv\,-" + apply (simp add: deriveCap_def split del: if_split) + apply (rule hoare_pre) + apply (wp arch_deriveCap_valid | simp add: o_def)+ + apply (simp add: valid_NullCap) + apply (clarsimp simp: isCap_simps) + done + +lemma lookup_cap_valid': + "\valid_objs'\ lookupCap t c \valid_cap'\, -" + apply (simp add: lookupCap_def lookupCapAndSlot_def + lookupSlotForThread_def split_def) + apply (wp | simp)+ + done + +lemma capAligned_Null [simp]: + "capAligned NullCap" + by (simp add: capAligned_def is_aligned_def word_bits_def) + +lemma cte_wp_at'_conjI: + "\ cte_wp_at' P p s; cte_wp_at' Q p s \ \ cte_wp_at' (\c. P c \ Q c) p s" + by (auto simp add: cte_wp_at'_def) + +crunch inv'[wp]: rangeCheck "P" + (simp: crunch_simps) + +lemma lookupSlotForCNodeOp_inv'[wp]: + "\P\ lookupSlotForCNodeOp src croot ptr depth \\rv. P\" + apply (simp add: lookupSlotForCNodeOp_def split_def unlessE_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp hoare_drop_imps) + apply simp + done + +(* FIXME: move *) +lemma loadWordUser_inv [wp]: + "\P\ loadWordUser p \\rv. P\" + unfolding loadWordUser_def + by (wp dmo_inv' loadWord_inv) + +lemma capTransferFromWords_inv: + "\P\ capTransferFromWords buffer \\_. P\" + apply (simp add: capTransferFromWords_def) + apply wp + done + +lemma lct_inv' [wp]: + "\P\ loadCapTransfer b \\rv. P\" + unfolding loadCapTransfer_def + apply (wp capTransferFromWords_inv) + done + +lemma maskCapRightsNull [simp]: + "maskCapRights R NullCap = NullCap" + by (simp add: maskCapRights_def isCap_defs) + +lemma maskCapRightsUntyped [simp]: + "maskCapRights R (UntypedCap d r n f) = UntypedCap d r n f" + by (simp add: maskCapRights_def isCap_defs Let_def) + +declare if_option_Some[simp] + +lemma lookup_cap_corres: + "\ epcptr = to_bl epcptr' \ \ + corres (lfr \ cap_relation) + (valid_objs and pspace_aligned and tcb_at thread) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread) + (lookup_cap thread epcptr) + (lookupCap thread epcptr')" + apply (simp add: lookup_cap_def lookupCap_def lookupCapAndSlot_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF lookupSlotForThread_corres]) + apply (simp add: split_def) + apply (subst bindE_returnOk[symmetric]) + apply (rule corres_splitEE) + apply simp + apply (rule getSlotCap_corres, rule refl) + apply (rule corres_returnOk [of _ \ \]) + apply simp + apply wp+ + apply auto + done + +lemma ensureEmptySlot_corres: + "q = cte_map p \ + corres (ser \ dc) (invs and cte_at p) invs' + (ensure_empty p) (ensureEmptySlot q)" + apply (clarsimp simp add: ensure_empty_def ensureEmptySlot_def unlessE_whenE liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_trivial) + apply (case_tac cap, auto simp add: whenE_def returnOk_def)[1] + apply wp+ + apply (clarsimp simp: invs_valid_objs invs_psp_aligned) + apply fastforce + done + +lemma ensureEmpty_inv[wp]: + "\P\ ensureEmptySlot p \\rv. P\" + by (simp add: ensureEmptySlot_def unlessE_whenE whenE_def | wp)+ + +lemma lookupSlotForCNodeOp_corres: + "\cap_relation c c'; ptr = to_bl ptr'\ + \ corres (ser \ (\cref cref'. cref' = cte_map cref)) + (valid_objs and pspace_aligned and valid_cap c) + (valid_objs' and pspace_aligned' and pspace_distinct' and valid_cap' c') + (lookup_slot_for_cnode_op s c ptr depth) + (lookupSlotForCNodeOp s c' ptr' depth)" + apply (simp add: lookup_slot_for_cnode_op_def lookupSlotForCNodeOp_def) + apply (clarsimp simp: lookup_failure_map_def split_def word_size) + apply (clarsimp simp: rangeCheck_def[unfolded fun_app_def unlessE_def] whenE_def + word_bits_def toInteger_nat fromIntegral_def fromInteger_nat) + apply (rule corres_lookup_error) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule rab_corres'; simp) + apply (rule corres_trivial) + apply (clarsimp simp: returnOk_def lookup_failure_map_def + split: list.split) + apply wp+ + apply clarsimp + apply clarsimp + done + +lemma ensureNoChildren_wp: + "\\s. (descendants_of' p (ctes_of s) \ {} \ Q s) + \ (descendants_of' p (ctes_of s) = {} \ P () s)\ + ensureNoChildren p + \P\,\\_. Q\" + apply (simp add: ensureNoChildren_def whenE_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def descendants_of'_def) + apply (intro conjI impI allI) + apply clarsimp + apply (drule spec, erule notE, rule subtree.direct_parent) + apply (simp add:mdb_next_rel_def mdb_next_def) + apply simp + apply (simp add: parentOf_def) + apply clarsimp + apply (erule (4) subtree_no_parent) + apply clarsimp + apply (erule (2) subtree_next_0) + done + +lemma deriveCap_derived: + "\\s. c'\ capability.NullCap \ cte_wp_at' (\cte. badge_derived' c' (cteCap cte) + \ capASID c' = capASID (cteCap cte) + \ cap_asid_base' c' = cap_asid_base' (cteCap cte) + \ cap_vptr' c' = cap_vptr' (cteCap cte)) slot s + \ valid_objs' s\ + deriveCap slot c' + \\rv s. rv \ NullCap \ + cte_wp_at' (is_derived' (ctes_of s) slot rv \ cteCap) slot s\, -" + unfolding deriveCap_def badge_derived'_def + apply (cases c'; (solves \(wp ensureNoChildren_wp | simp add: isCap_simps Let_def + | clarsimp simp: badge_derived'_def + | erule cte_wp_at_weakenE' disjE + | rule is_derived'_def[THEN meta_eq_to_obj_eq, THEN iffD2])+\)?) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: AARCH64_H.deriveCap_def Let_def isCap_simps + split: if_split, + safe) + apply ((wp throwError_validE_R undefined_validE_R + | clarsimp simp: isCap_simps capAligned_def cte_wp_at_ctes_of + | drule valid_capAligned + | drule(1) bits_low_high_eq + | simp add: capBadge_def sameObjectAs_def + is_derived'_def isCap_simps up_ucast_inj_eq + is_aligned_no_overflow badge_derived'_def + capAligned_def capASID_def + | clarsimp split: option.split_asm)+) + done + +lemma untyped_derived_eq_ArchObjectCap: + "untyped_derived_eq (capability.ArchObjectCap cap) = \" + by (rule ext, simp add: untyped_derived_eq_def isCap_simps) + +lemma arch_deriveCap_untyped_derived[wp]: + "\\s. cte_wp_at' (\cte. untyped_derived_eq c' (cteCap cte)) slot s\ + AARCH64_H.deriveCap slot (capCap c') + \\rv s. cte_wp_at' (untyped_derived_eq rv o cteCap) slot s\, -" + apply (wpsimp simp: AARCH64_H.deriveCap_def Let_def untyped_derived_eq_ArchObjectCap split_del: if_split + wp: undefined_validE_R) + apply(clarsimp simp: cte_wp_at_ctes_of isCap_simps untyped_derived_eq_def) + by (case_tac "capCap c'"; fastforce) + +lemma deriveCap_untyped_derived: + "\\s. cte_wp_at' (\cte. untyped_derived_eq c' (cteCap cte)) slot s\ + deriveCap slot c' + \\rv s. cte_wp_at' (untyped_derived_eq rv o cteCap) slot s\, -" + apply (simp add: deriveCap_def split del: if_split) + apply (rule hoare_pre) + apply (wp arch_deriveCap_inv | simp add: o_def untyped_derived_eq_ArchObjectCap)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps untyped_derived_eq_def) + done + +lemma setCTE_corres: + "cap_relation cap (cteCap cte) \ + corres_underlying {(s, s'). pspace_relations (ekheap (s)) (kheap s) (ksPSpace s')} False True dc + (pspace_distinct and pspace_aligned and valid_objs and cte_at p) + (pspace_aligned' and pspace_distinct' and cte_at' (cte_map p)) + (set_cap cap p) + (setCTE (cte_map p) cte)" + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply simp + apply clarsimp + apply (drule(8) set_cap_not_quite_corres_prequel) + apply simp + apply fastforce + done + +locale_abbrev + "pt_types_of s \ pts_of s ||> pt_type" + +(* oldish-style, but still needed for the heap-only form below *) +definition pt_types_of_heap :: "(obj_ref \ Structures_A.kernel_object) \ obj_ref \ pt_type" where + "pt_types_of_heap h \ h |> aobj_of |> pt_of ||> pt_type" + +lemma pt_types_of_heap_eq: + "pt_types_of_heap (kheap s) = pt_types_of s" + by (simp add: pt_types_of_heap_def) + +(* FIXME: move to StateRelation *) +lemma ghost_relation_of_heap: + "ghost_relation h ups cns pt_types \ + ups_of_heap h = ups \ cns_of_heap h = cns \ pt_types_of_heap h = pt_types" + apply (rule iffI) + apply (rule conjI) + apply (rule ext) + apply (clarsimp simp add: ghost_relation_def ups_of_heap_def) + apply (drule_tac x=x in spec) + apply (auto simp: ghost_relation_def ups_of_heap_def + split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits)[1] + subgoal for x dev sz + by (drule_tac x = sz in spec,simp) + apply (rule conjI) + apply (rule ext) + apply (clarsimp simp add: ghost_relation_def cns_of_heap_def) + apply (drule_tac x=x in spec)+ + apply (rule ccontr) + apply (simp split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits)[1] + apply (simp split: if_split_asm) + apply force + apply (drule not_sym) + apply clarsimp + apply (erule_tac x=y in allE) + apply simp + apply (rule ext) + apply (clarsimp simp: ghost_relation_def cns_of_heap_def) + apply (thin_tac P for P) \ \DataPages\ + apply (thin_tac P for P) \ \CNodes\ + apply (simp add: pt_types_of_heap_def) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (clarsimp?, rule conjI, clarsimp, rule sym, rule ccontr, force)+ + apply force + apply (auto simp: ghost_relation_def ups_of_heap_def cns_of_heap_def pt_types_of_heap_def in_omonad + split: option.splits Structures_A.kernel_object.splits + arch_kernel_obj.splits if_split_asm)[1] + done + +lemma corres_caps_decomposition: + assumes x: "corres_underlying {(s, s'). pspace_relations (ekheap (s)) (kheap s) (ksPSpace s')} False True r P P' f g" + assumes u: "\P. \\s. P (new_caps s)\ f \\rv s. P (caps_of_state s)\" + "\P. \\s. P (new_mdb s)\ f \\rv s. P (cdt s)\" + "\P. \\s. P (new_list s)\ f \\rv s. P (cdt_list (s))\" + "\P. \\s. P (new_rvk s)\ f \\rv s. P (is_original_cap s)\" + "\P. \\s. P (new_ctes s)\ g \\rv s. P (ctes_of s)\" + "\P. \\s. P (new_ms s)\ f \\rv s. P (machine_state s)\" + "\P. \\s. P (new_ms' s)\ g \\rv s. P (ksMachineState s)\" + "\P. \\s. P (new_wuc s)\ f \\rv s. P (work_units_completed s)\" + "\P. \\s. P (new_wuc' s)\ g \\rv s. P (ksWorkUnitsCompleted s)\" + "\P. \\s. P (new_ct s)\ f \\rv s. P (cur_thread s)\" + "\P. \\s. P (new_ct' s)\ g \\rv s. P (ksCurThread s)\" + "\P. \\s. P (new_as s)\ f \\rv s. P (arch_state s)\" + "\P. \\s. P (new_as' s)\ g \\rv s. P (ksArchState s)\" + "\P. \\s. P (new_id s)\ f \\rv s. P (idle_thread s)\" + "\P. \\s. P (new_id' s)\ g \\rv s. P (ksIdleThread s)\" + "\P. \\s. P (new_irqn s)\ f \\rv s. P (interrupt_irq_node s)\" + "\P. \\s. P (new_irqs s)\ f \\rv s. P (interrupt_states s)\" + "\P. \\s. P (new_irqs' s)\ g \\rv s. P (ksInterruptState s)\" + "\P. \\s. P (new_ups s)\ f \\rv s. P (ups_of_heap (kheap s))\" + "\P. \\s. P (new_ups' s)\ g \\rv s. P (gsUserPages s)\" + "\P. \\s. P (new_cns s)\ f \\rv s. P (cns_of_heap (kheap s))\" + "\P. \\s. P (new_cns' s)\ g \\rv s. P (gsCNodes s)\" + "\P. \\s. P (new_pt_types s)\ f \\rv s. P (pt_types_of s)\" + "\P. \\s. P (new_queues s)\ f \\rv s. P (ready_queues s)\" + "\P. \\s. P (new_action s)\ f \\rv s. P (scheduler_action s)\" + "\P. \\s. P (new_sa' s)\ g \\rv s. P (ksSchedulerAction s)\" + "\P. \\s. P (new_rqs' s)\ g \\rv s. P (ksReadyQueues s)\" + "\P. \\s. P (new_di s)\ f \\rv s. P (domain_index s)\" + "\P. \\s. P (new_dl s)\ f \\rv s. P (domain_list s)\" + "\P. \\s. P (new_cd s)\ f \\rv s. P (cur_domain s)\" + "\P. \\s. P (new_dt s)\ f \\rv s. P (domain_time s)\" + "\P. \\s. P (new_dsi' s)\ g \\rv s. P (ksDomScheduleIdx s)\" + "\P. \\s. P (new_ds' s)\ g \\rv s. P (ksDomSchedule s)\" + "\P. \\s. P (new_cd' s)\ g \\rv s. P (ksCurDomain s)\" + "\P. \\s. P (new_dt' s)\ g \\rv s. P (ksDomainTime s)\" + assumes z: "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ cdt_relation ((\) None \ new_caps s) (new_mdb s) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ cdt_list_relation (new_list s) (new_mdb s) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ sched_act_relation (new_action s) (new_sa' s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ ready_queues_relation (new_queues s) (new_rqs' s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ revokable_relation (new_rvk s) (null_filter (new_caps s)) (new_ctes s')" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ (new_as s, new_as' s') \ arch_state_relation + \ interrupt_state_relation (new_irqn s) (new_irqs s) (new_irqs' s') + \ new_ct s = new_ct' s' \ new_id s = new_id' s' + \ new_ms s = new_ms' s' \ new_di s = new_dsi' s' + \ new_dl s = new_ds' s' \ new_cd s = new_cd' s' \ new_dt s = new_dt' s' \ new_wuc s = new_wuc' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_ups s = new_ups' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_cns s = new_cns' s'" + "\s s'. \ P s; P' s'; (s, s') \ state_relation \ + \ new_pt_types s = gsPTTypes (new_as' s')" + shows "corres r P P' f g" +proof - + have all_ext: "\f f'. (\p. f p = f' p) = (f = f')" + by fastforce + have mdb_wp': + "\ctes. \\s. cdt_relation ((\) None \ new_caps s) (new_mdb s) ctes\ + f + \\rv s. \m ca. (\p. ca p = ((\) None \ caps_of_state s) p) \ m = cdt s + \ cdt_relation ca m ctes\" + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift u) + apply (subst all_ext) + apply (simp add: o_def) + done + note mdb_wp = mdb_wp' [simplified all_ext simp_thms] + have list_wp': + "\ctes. \\s. cdt_list_relation (new_list s) (new_mdb s) ctes\ + f + \\rv s. \m t. t = cdt_list s \ m = cdt s + \ cdt_list_relation t m ctes\" + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift u) + apply (simp add: o_def) + done + note list_wp = list_wp' [simplified all_ext simp_thms] + have rvk_wp': + "\ctes. \\s. revokable_relation (new_rvk s) (null_filter (new_caps s)) ctes\ + f + \\rv s. revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) ctes\" + unfolding revokable_relation_def + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift hoare_vcg_disj_lift u) + done + have exs_wp': + "\ctes. \\s. revokable_relation (new_rvk s) (null_filter (new_caps s)) ctes\ + f + \\rv s. revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) ctes\" + unfolding revokable_relation_def + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift hoare_vcg_disj_lift u) + done + note rvk_wp = rvk_wp' [simplified all_ext simp_thms] + have swp_cte_at: + "\s. swp cte_at s = ((\) None \ caps_of_state s)" + by (rule ext, simp, subst neq_commute, simp add: cte_wp_at_caps_of_state) + have abs_irq_together': + "\P. \\s. P (new_irqn s) (new_irqs s)\ f + \\rv s. \irn. interrupt_irq_node s = irn \ P irn (interrupt_states s)\" + by (wp hoare_vcg_ex_lift u, simp) + note abs_irq_together = abs_irq_together'[simplified] + show ?thesis + unfolding state_relation_def swp_cte_at + apply (subst conj_assoc[symmetric]) + apply (subst pspace_relations_def[symmetric]) + apply (rule corres_underlying_decomposition [OF x]) + apply (simp add: ghost_relation_of_heap) + apply (wpsimp wp: hoare_vcg_conj_lift mdb_wp rvk_wp list_wp u abs_irq_together simp: pt_types_of_heap_eq)+ + apply (intro z[simplified o_def] conjI | simp add: state_relation_def pspace_relations_def swp_cte_at + | (clarsimp, drule (1) z(6), simp add: state_relation_def pspace_relations_def swp_cte_at))+ + done +qed + +lemma getCTE_symb_exec_r: + "corres_underlying sr False nf' dc \ (cte_at' p) (return ()) (getCTE p)" + apply (rule corres_no_failI, wp) + apply (clarsimp simp: return_def + elim!: use_valid [OF _ getCTE_inv]) + done + +lemma updateMDB_symb_exec_r: + "corres_underlying {(s, s'). pspace_relations (ekheap s) (kheap s) (ksPSpace s')} False nf' dc + \ (pspace_aligned' and pspace_distinct' and (no_0 \ ctes_of) and (\s. p \ 0 \ cte_at' p s)) + (return ()) (updateMDB p m)" + using no_fail_updateMDB [of p m] + apply (clarsimp simp: corres_underlying_def return_def no_fail_def) + apply (drule(1) updateMDB_the_lot, simp, assumption+) + apply clarsimp + done + +lemma updateMDB_ctes_of_cases: + "\\s. P (modify_map (ctes_of s) p (if p = 0 then id else cteMDBNode_update f))\ + updateMDB p f \\rv s. P (ctes_of s)\" + apply (simp add: updateMDB_def split del: if_split) + apply (rule hoare_pre, wp getCTE_ctes_of) + apply (clarsimp simp: modify_map_def map_option_case + split: option.split + | rule conjI ext | erule rsubst[where P=P])+ + apply (case_tac y, simp) + done + +lemma setCTE_state_bits[wp]: + "\\s. P (ksMachineState s)\ setCTE p v \\rv s. P (ksMachineState s)\" + "\\s. Q (ksIdleThread s)\ setCTE p v \\rv s. Q (ksIdleThread s)\" + "\\s. R (ksArchState s)\ setCTE p v \\rv s. R (ksArchState s)\" + "\\s. S (ksInterruptState s)\ setCTE p v \\rv s. S (ksInterruptState s)\" + apply (simp_all add: setCTE_def setObject_def split_def) + apply (wp updateObject_cte_inv | simp)+ + done + +lemma cte_map_eq_subst: + "\ cte_at p s; cte_at p' s; valid_objs s; pspace_aligned s; pspace_distinct s \ + \ (cte_map p = cte_map p') = (p = p')" + by (fastforce elim!: cte_map_inj_eq) + +lemma revokable_relation_simp: + "\ (s, s') \ state_relation; null_filter (caps_of_state s) p = Some c; ctes_of s' (cte_map p) = Some (CTE cap node) \ + \ mdbRevocable node = is_original_cap s p" + by (cases p, clarsimp simp: state_relation_def revokable_relation_def) + +crunches setCTE + for gsUserPages[wp]: "\s. P (gsUserPages s)" + and gsCNodes[wp]: "\s. P (gsCNodes s)" + and domain_time[wp]: "\s. P (ksDomainTime s)" + and work_units_completed[wp]: "\s. P (ksWorkUnitsCompleted s)" + (simp: setObject_def wp: updateObject_cte_inv) + +lemma set_original_symb_exec_l': + "corres_underlying {(s, s'). f (ekheap s) (kheap s) s'} False nf' dc P P' (set_original p b) (return x)" + by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) + +lemma create_reply_master_corres: + "\ sl' = cte_map sl ; AllowGrant \ rights \ \ + corres dc + (cte_wp_at ((=) cap.NullCap) sl and valid_pspace and valid_mdb and valid_list) + (cte_wp_at' (\c. cteCap c = NullCap \ mdbPrev (cteMDBNode c) = 0) sl' + and valid_mdb' and valid_pspace') + (do + y \ set_original sl True; + set_cap (cap.ReplyCap thread True rights) sl + od) + (setCTE sl' (CTE (capability.ReplyCap thread True True) initMDBNode))" + apply clarsimp + apply (rule corres_caps_decomposition) + defer + apply (wp|simp)+ + apply (clarsimp simp: o_def cdt_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (case_tac cte, clarsimp) + apply (fold fun_upd_def) + apply (subst descendants_of_Null_update') + apply fastforce + apply fastforce + apply assumption + apply assumption + apply (simp add: nullPointer_def) + apply (subgoal_tac "cte_at (a, b) s") + prefer 2 + apply (drule not_sym, clarsimp simp: cte_wp_at_caps_of_state + split: if_split_asm) + apply (simp add: state_relation_def cdt_relation_def) + apply (clarsimp simp: o_def cdt_list_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (case_tac cte, clarsimp) + apply (clarsimp simp: state_relation_def cdt_list_relation_def) + apply (simp split: if_split_asm) + apply (erule_tac x=a in allE, erule_tac x=b in allE) + apply clarsimp + apply(case_tac "next_slot (a, b) (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(fastforce simp: valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp simp add: revokable_relation_def cte_wp_at_ctes_of + split del: if_split) + apply simp + apply (rule conjI) + apply (clarsimp simp: initMDBNode_def) + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state s) (a, b) \ None") + prefer 2 + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state + split: if_split_asm) + apply (subgoal_tac "cte_at (a,b) s") + prefer 2 + apply clarsimp + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst revokable_relation_simp + cte_wp_at_cte_at valid_pspace_def) + apply (clarsimp simp: state_relation_def) + apply (clarsimp elim!: state_relationE simp: ghost_relation_of_heap pt_types_of_heap_eq o_def)+ + apply (rule corres_guard_imp) + apply (rule corres_underlying_symb_exec_l [OF set_original_symb_exec_l']) + apply (rule setCTE_corres) + apply simp + apply wp + apply (clarsimp simp: cte_wp_at_cte_at valid_pspace_def) + apply (clarsimp simp: valid_pspace'_def cte_wp_at'_def) + done + +lemma cte_map_nat_to_cref: + "\ n < 2 ^ b; b < word_bits \ \ + cte_map (p, nat_to_cref b n) = p + (of_nat n * 2^cte_level_bits)" + apply (clarsimp simp: cte_map_def nat_to_cref_def shiftl_t2n + dest!: less_is_drop_replicate) + apply (subst mult_ac) + apply (rule arg_cong [where f="\x. x * 2^cte_level_bits"]) + apply (subst of_drop_to_bl) + apply (simp add: word_bits_def) + apply (subst mask_eq_iff_w2p) + apply (simp add: word_size) + apply (simp add: word_less_nat_alt word_size word_bits_def) + apply (rule order_le_less_trans; assumption?) + apply (subst unat_of_nat) + apply (rule mod_less_eq_dividend) + done + +lemma valid_nullcapsE: + "\ valid_nullcaps m; m p = Some (CTE NullCap n); + \ mdbPrev n = 0; mdbNext n = 0 \ \ P \ + \ P" + by (fastforce simp: valid_nullcaps_def nullMDBNode_def nullPointer_def) + +lemma valid_nullcaps_prev: + "\ m (mdbPrev n) = Some (CTE NullCap n'); m p = Some (CTE c n); + no_0 m; valid_dlist m; valid_nullcaps m \ \ False" + apply (erule (1) valid_nullcapsE) + apply (erule_tac p=p in valid_dlistEp, assumption) + apply clarsimp + apply clarsimp + done + +lemma valid_nullcaps_next: + "\ m (mdbNext n) = Some (CTE NullCap n'); m p = Some (CTE c n); + no_0 m; valid_dlist m; valid_nullcaps m \ \ False" + apply (erule (1) valid_nullcapsE) + apply (erule_tac p=p in valid_dlistEn, assumption) + apply clarsimp + apply clarsimp + done + +defs noReplyCapsFor_def: + "noReplyCapsFor \ \t s. \sl m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) sl s" + +lemma pspace_relation_no_reply_caps: + assumes pspace: "pspace_relation (kheap s) (ksPSpace s')" + and invs: "invs s" + and tcb: "tcb_at t s" + and m_cte': "cte_wp_at' ((=) cte) sl' s'" + and m_null: "cteCap cte = capability.NullCap" + and m_sl: "sl' = cte_map (t, tcb_cnode_index 2)" + shows "noReplyCapsFor t s'" +proof - + from tcb have m_cte: "cte_at (t, tcb_cnode_index 2) s" + by (clarsimp elim!: tcb_at_cte_at) + have m_cte_null: + "cte_wp_at (\c. c = cap.NullCap) (t, tcb_cnode_index 2) s" + using pspace invs + apply (frule_tac pspace_relation_cte_wp_atI') + apply (rule assms) + apply clarsimp + apply (clarsimp simp: m_sl) + apply (frule cte_map_inj_eq) + apply (rule m_cte) + apply (erule cte_wp_cte_at) + apply clarsimp+ + apply (clarsimp elim!: cte_wp_at_weakenE simp: m_null) + done + have no_reply_caps: + "\sl m r. \ cte_wp_at (\c. c = cap.ReplyCap t m r) sl s" + by (rule no_reply_caps_for_thread [OF invs tcb m_cte_null]) + hence noReplyCaps: + "\sl m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) sl s'" + apply (intro allI) + apply (clarsimp simp: cte_wp_at_neg2 cte_wp_at_ctes_of simp del: split_paired_All) + apply (frule pspace_relation_cte_wp_atI [OF pspace _ invs_valid_objs [OF invs]]) + apply (clarsimp simp: cte_wp_at_neg2 simp del: split_paired_All) + apply (drule_tac x="(a, b)" in spec) + apply (clarsimp simp: cte_wp_cte_at cte_wp_at_caps_of_state) + apply (case_tac c, simp_all) + apply fastforce + done + thus ?thesis + by (simp add: noReplyCapsFor_def) +qed + +lemma setupReplyMaster_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (setup_reply_master t) (setupReplyMaster t)" + apply (simp add: setupReplyMaster_def setup_reply_master_def) + apply (simp add: locateSlot_conv tcbReplySlot_def objBits_def objBitsKO_def) + apply (simp add: nullMDBNode_def, fold initMDBNode_def) + apply (rule_tac F="t + 2*2^cte_level_bits = cte_map (t, tcb_cnode_index 2)" in corres_req) + apply (clarsimp simp: tcb_cnode_index_def2 cte_map_nat_to_cref word_bits_def cte_level_bits_def) + apply (clarsimp simp: cte_level_bits_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_when) + apply fastforce + apply (rule_tac P'="einvs and tcb_at t" in corres_stateAssert_implied) + apply (rule create_reply_master_corres; simp) + apply (subgoal_tac "\cte. cte_wp_at' ((=) cte) (cte_map (t, tcb_cnode_index 2)) s' + \ cteCap cte = capability.NullCap") + apply (fastforce dest: pspace_relation_no_reply_caps + state_relation_pspace_relation) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def cte_wp_at_ctes_of) + apply (rule_tac Q="\rv. einvs and tcb_at t and + cte_wp_at ((=) rv) (t, tcb_cnode_index 2)" + in hoare_strengthen_post) + apply (wp hoare_drop_imps get_cap_wp) + apply (clarsimp simp: invs_def valid_state_def elim!: cte_wp_at_weakenE) + apply (rule_tac Q="\rv. valid_pspace' and valid_mdb' and + cte_wp_at' ((=) rv) (cte_map (t, tcb_cnode_index 2))" + in hoare_strengthen_post) + apply (wp hoare_drop_imps getCTE_wp') + apply (rename_tac rv s) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac rv, fastforce elim: valid_nullcapsE) + apply (fastforce elim: tcb_at_cte_at) + apply (clarsimp simp: cte_at'_obj_at' tcb_cte_cases_def cte_map_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + done + +crunch tcb'[wp]: setupReplyMaster "tcb_at' t" + (wp: crunch_wps) + +crunch idle'[wp]: setupReplyMaster "valid_idle'" + +(* Levity: added (20090126 19:32:14) *) +declare stateAssert_wp [wp] + +lemma setupReplyMaster_valid_mdb: + "slot = t + 2 ^ objBits (undefined :: cte) * tcbReplySlot \ + \valid_mdb' and valid_pspace' and tcb_at' t\ + setupReplyMaster t + \\rv. valid_mdb'\" + apply (clarsimp simp: setupReplyMaster_def locateSlot_conv + nullMDBNode_def) + apply (fold initMDBNode_def) + apply (wp setCTE_valid_mdb getCTE_wp') + apply clarsimp + apply (intro conjI) + apply (case_tac cte) + apply (fastforce simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def + no_mdb_def + elim: valid_nullcapsE) + apply (frule obj_at_aligned') + apply (simp add: valid_cap'_def capAligned_def + objBits_simps' word_bits_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp: caps_no_overlap'_def capRange_def) + apply (clarsimp simp: fresh_virt_cap_class_def + elim!: ranE) + apply (clarsimp simp add: noReplyCapsFor_def cte_wp_at_ctes_of) + apply (case_tac x) + apply (rename_tac capability mdbnode) + apply (case_tac capability; simp) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp) + apply fastforce + done + +lemma setupReplyMaster_valid_objs [wp]: + "\ valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' t\ + setupReplyMaster t + \\_. valid_objs'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_valid_objs getCTE_wp') + apply (clarsimp) + apply (frule obj_at_aligned') + apply (simp add: valid_cap'_def capAligned_def + objBits_simps' word_bits_def)+ + done + +lemma setupReplyMaster_wps[wp]: + "\pspace_aligned'\ setupReplyMaster t \\rv. pspace_aligned'\" + "\pspace_distinct'\ setupReplyMaster t \\rv. pspace_distinct'\" + "slot = cte_map (t, tcb_cnode_index 2) \ + \\s. P ((cteCaps_of s)(slot \ (capability.ReplyCap t True True))) \ P (cteCaps_of s)\ + setupReplyMaster t + \\rv s. P (cteCaps_of s)\" + apply (simp_all add: setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp | simp add: o_def cte_wp_at_ctes_of)+ + apply clarsimp + apply (rule_tac x=cte in exI) + apply (clarsimp simp: tcbReplySlot_def objBits_simps' fun_upd_def word_bits_def + tcb_cnode_index_def2 cte_map_nat_to_cref cte_level_bits_def) + done + +crunch no_0_obj'[wp]: setupReplyMaster no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma setupReplyMaster_valid_pspace': + "\valid_pspace' and tcb_at' t\ + setupReplyMaster t + \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp setupReplyMaster_valid_mdb) + apply (simp_all add: valid_pspace'_def) + done + +lemma setupReplyMaster_ifunsafe'[wp]: + "slot = t + 2 ^ objBits (undefined :: cte) * tcbReplySlot \ + \if_unsafe_then_cap' and ex_cte_cap_to' slot\ + setupReplyMaster t + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: ifunsafe'_def3 setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp') + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def + cte_level_bits_def objBits_simps') + apply (drule_tac x=crefa in spec) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=cref in exI, fastforce) + apply clarsimp + apply (rule_tac x=cref' in exI, fastforce) + done + + +lemma setupReplyMaster_iflive'[wp]: + "\if_live_then_nonz_cap'\ setupReplyMaster t \\rv. if_live_then_nonz_cap'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_iflive' getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +declare azobj_refs'_only_vcpu[simp] + +lemma setupReplyMaster_global_refs[wp]: + "\\s. valid_global_refs' s \ thread \ global_refs' s \ tcb_at' thread s + \ ex_nonz_cap_to' thread s \ valid_objs' s\ + setupReplyMaster thread + \\rv. valid_global_refs'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp getCTE_wp') + apply (clarsimp simp: capRange_def cte_wp_at_ctes_of objBits_simps) + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rename_tac "prev_cte") + apply (case_tac prev_cte, simp) + apply (frule(1) ctes_of_valid_cap') + apply (drule(1) valid_global_refsD_with_objSize)+ + apply (clarsimp simp: valid_cap'_def objBits_simps' obj_at'_def projectKOs + split: capability.split_asm) + done + +crunch valid_arch'[wp]: setupReplyMaster "valid_arch_state'" + (wp: crunch_wps simp: crunch_simps) + +lemma ex_nonz_tcb_cte_caps': + "\ex_nonz_cap_to' t s; tcb_at' t s; valid_objs' s; sl \ dom tcb_cte_cases\ \ + ex_cte_cap_to' (t + sl) s" + apply (clarsimp simp: ex_nonz_cap_to'_def ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (subgoal_tac "s \' cteCap cte") + apply (rule_tac x=cref in exI, rule_tac x=cte in exI) + apply (clarsimp simp: valid_cap'_def obj_at'_def dom_def typ_at_to_obj_at_arches + split: cte.split_asm capability.split_asm) + apply (case_tac cte) + apply (clarsimp simp: ctes_of_valid_cap') + done + +lemma ex_nonz_cap_not_global': + "\ex_nonz_cap_to' t s; valid_objs' s; valid_global_refs' s\ \ + t \ global_refs' s" + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (frule(1) valid_global_refsD') + apply clarsimp + apply (drule orthD1, erule (1) subsetD) + apply (subgoal_tac "s \' cteCap cte") + apply (fastforce simp: valid_cap'_def capRange_def capAligned_def + is_aligned_no_overflow + split: cte.split_asm capability.split_asm) + apply (case_tac cte) + apply (clarsimp simp: ctes_of_valid_cap') + done + +crunch typ_at'[wp]: setupReplyMaster "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: crunch_simps) + +lemma setCTE_irq_handlers': + "\\s. valid_irq_handlers' s \ (\irq. cteCap cte = IRQHandlerCap irq \ irq_issued' irq s)\ + setCTE ptr cte + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def cteCaps_of_def irq_issued'_def) + apply (wp hoare_use_eq [where f=ksInterruptState, OF setCTE_ksInterruptState setCTE_ctes_of_wp]) + apply (auto simp: ran_def) + done + +lemma setupReplyMaster_irq_handlers'[wp]: + "\valid_irq_handlers'\ setupReplyMaster t \\rv. valid_irq_handlers'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +crunches setupReplyMaster + for irq_states'[wp]: valid_irq_states' + and irqs_masked' [wp]: irqs_masked' + and pred_tcb_at' [wp]: "pred_tcb_at' proj P t" + and ksMachine[wp]: "\s. P (ksMachineState s)" + and pspace_domain_valid[wp]: "pspace_domain_valid" + and ct_not_inQ[wp]: "ct_not_inQ" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksIdlethread[wp]: "\s. P (ksIdleThread s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and scheduler_action[wp]: "\s. P (ksSchedulerAction s)" + and obj_at'_inQ[wp]: "obj_at' (inQ d p) t" + and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" + and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + and ready_queues[wp]: "\s. P (ksReadyQueues s)" + and ready_queuesL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and ready_queuesL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) + +lemma setupReplyMaster_vms'[wp]: + "\valid_machine_state'\ setupReplyMaster t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def ) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply wp+ + done + +lemma setupReplyMaster_urz[wp]: + "\untyped_ranges_zero' and valid_mdb' and valid_objs'\ + setupReplyMaster t + \\rv. untyped_ranges_zero'\" + apply (simp add: setupReplyMaster_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp untyped_ranges_zero_lift getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (subst untyped_ranges_zero_fun_upd, assumption, simp_all) + apply (clarsimp simp: cteCaps_of_def untypedZeroRange_def Let_def isCap_simps) + done + +lemma setupReplyMaster_invs'[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + setupReplyMaster t + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp setupReplyMaster_valid_pspace' sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift + valid_queues_lift cur_tcb_lift valid_queues_lift' hoare_vcg_disj_lift + valid_irq_node_lift | simp)+ + apply (clarsimp simp: ex_nonz_tcb_cte_caps' valid_pspace'_def + objBits_simps' tcbReplySlot_def + ex_nonz_cap_not_global' dom_def) + done + +lemma setupReplyMaster_cte_wp_at'': + "\cte_wp_at' (\cte. P (cteCap cte)) p and K (\ P NullCap)\ + setupReplyMaster t + \\rv s. cte_wp_at' (P \ cteCap) p s\" + apply (simp add: setupReplyMaster_def locateSlot_conv tree_cte_cteCap_eq) + apply (wp getCTE_wp') + apply (fastforce simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas setupReplyMaster_cte_wp_at' = setupReplyMaster_cte_wp_at''[unfolded o_def] + +lemma setupReplyMaster_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setupReplyMaster t \\rv. ex_nonz_cap_to' p\" + apply (simp add: ex_nonz_cap_to'_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_ex_lift setupReplyMaster_cte_wp_at') + apply clarsimp + done + +definition + is_arch_update' :: "capability \ cte \ bool" +where + "is_arch_update' cap cte \ isArchObjectCap cap \ capMasterCap cap = capMasterCap (cteCap cte)" + +lemma mdb_next_pres: + "\ m p = Some v; + mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \ b = m \ a \ b" + by (simp add: mdb_next_unfold) + +lemma mdb_next_trans_next_pres: + "\ m p = Some v; mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \\<^sup>+ b = m \ a \\<^sup>+ b" + apply (rule iffI) + apply (erule trancl_induct) + apply (fastforce simp: mdb_next_pres) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (fastforce simp: mdb_next_pres) + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (simp add: mdb_next_pres del: fun_upd_apply) + apply (erule trancl_trans) + apply (fastforce simp: mdb_next_pres simp del: fun_upd_apply) + done + +lemma mdb_next_rtrans_next_pres: + "\ m p = Some v; mdbNext (cteMDBNode x) = mdbNext (cteMDBNode v) \ \ + m(p \ x) \ a \\<^sup>* b = m \ a \\<^sup>* b" + by (safe; clarsimp simp: mdb_next_trans_next_pres + dest!: rtrancl_eq_or_trancl[THEN iffD1] + intro!: rtrancl_eq_or_trancl[THEN iffD2] mdb_next_trans_next_pres[THEN iffD1]) + + +lemma arch_update_descendants': + "\ is_arch_update' cap oldcte; m p = Some oldcte\ \ + descendants_of' x (m(p \ cteCap_update (\_. cap) oldcte)) = descendants_of' x m" + apply (erule same_master_descendants) + apply (auto simp: is_arch_update'_def isCap_simps) + done + +lemma arch_update_setCTE_mdb: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and valid_mdb'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply wp + apply (clarsimp simp: valid_mdb_ctes_def cte_wp_at_ctes_of simp del: fun_upd_apply) + apply (rule conjI) + apply (rule valid_dlistI) + apply (fastforce split: if_split_asm elim: valid_dlistE) + apply (fastforce split: if_split_asm elim: valid_dlistE) + apply (rule conjI) + apply (clarsimp simp: no_0_def) + apply (rule conjI) + apply (simp add: mdb_chain_0_def mdb_next_trans_next_pres) + apply blast + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: valid_badges_def mdb_next_pres simp del: fun_upd_apply) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + prefer 2 + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply simp + apply (simp add: sameRegionAs_def3) + apply (rule conjI) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (rule conjI) + apply (clarsimp simp: caps_contained'_def simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (frule capMaster_untypedRange) + apply (frule capMaster_capRange) + apply (drule sym [where s="capMasterCap cap"]) + apply (frule masterCap.intro) + apply (clarsimp simp: masterCap.isUntypedCap split: if_split_asm) + subgoal by fastforce + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + subgoal by fastforce + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp simp: mdb_chunked_def mdb_next_trans_next_pres simp del: fun_upd_apply) + apply (drule sym [where s="capMasterCap cap"]) + apply (frule masterCap.intro) + apply (clarsimp split: if_split_asm) + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp: masterCap.sameRegionAs) + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p in allE) + apply (clarsimp simp: masterCap.sameRegionAs) + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (simp add: masterCap.sameRegionAs is_chunk_def mdb_next_trans_next_pres + mdb_next_rtrans_next_pres) + subgoal by fastforce + apply (rule conjI) + apply (clarsimp simp: is_arch_update'_def untyped_mdb'_def arch_update_descendants' + simp del: fun_upd_apply) + apply (cases oldcte) + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + apply (frule capMaster_isUntyped) + apply (drule capMaster_capRange) + apply simp + apply (rule conjI) + apply (clarsimp simp: untyped_inc'_def arch_update_descendants' + simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def) + apply (drule capMaster_untypedRange) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + apply (erule_tac x=pa in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: valid_nullcaps_def is_arch_update'_def isCap_simps) + apply (rule conjI) + apply (cases oldcte) + apply (clarsimp simp: ut_revocable'_def is_arch_update'_def isCap_simps) + apply (rule conjI) + apply (clarsimp simp: class_links_def simp del: fun_upd_apply) + apply (cases oldcte) + apply (clarsimp simp: is_arch_update'_def mdb_next_pres) + apply (drule capMaster_capClass) + apply (clarsimp split: if_split_asm) + apply fastforce + apply (rule conjI) + apply (erule(1) distinct_zombies_sameMasterE) + apply (clarsimp simp: is_arch_update'_def) + apply (clarsimp simp: irq_control_def) + apply (cases oldcte) + apply (subgoal_tac "cap \ IRQControlCap") + prefer 2 + apply (clarsimp simp: is_arch_update'_def isCap_simps) + apply (rule conjI) + apply clarsimp + apply (simp add: reply_masters_rvk_fb_def) + apply (erule ball_ran_fun_updI) + apply (clarsimp simp add: is_arch_update'_def isCap_simps) + done + +lemma capMaster_zobj_refs: + "capMasterCap c = capMasterCap c' \ zobj_refs' c = zobj_refs' c'" + by (simp add: capMasterCap_def split: capability.splits arch_capability.splits) + +lemma cte_refs_Master: + "cte_refs' (capMasterCap cap) = cte_refs' cap" + by (rule ext, simp add: capMasterCap_def split: capability.split) + +lemma zobj_refs_Master: + "zobj_refs' (capMasterCap cap) = zobj_refs' cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split) + +lemma capMaster_same_refs: + "capMasterCap a = capMasterCap b \ cte_refs' a = cte_refs' b \ zobj_refs' a = zobj_refs' b" + apply (rule conjI) + apply (rule master_eqI, rule cte_refs_Master, simp) + apply (rule master_eqI, rule zobj_refs_Master, simp) + done + +lemma arch_update_setCTE_iflive: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and if_live_then_nonz_cap'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. if_live_then_nonz_cap'\" + apply (wp setCTE_iflive') + apply (clarsimp simp: cte_wp_at_ctes_of is_arch_update'_def dest!: capMaster_zobj_refs) + done + +lemma arch_update_setCTE_ifunsafe: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and if_unsafe_then_cap'\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv s. if_unsafe_then_cap' s\" + apply (clarsimp simp: ifunsafe'_def2 cte_wp_at_ctes_of pred_conj_def) + apply (rule hoare_lift_Pf2 [where f=irq_node']) + prefer 2 + apply wp + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of is_arch_update'_def) + apply (frule capMaster_same_refs) + apply clarsimp + apply (rule conjI, clarsimp) + apply (erule_tac x=p in allE) + apply clarsimp + apply (erule impE) + apply clarsimp + apply clarsimp + apply (rule_tac x=cref' in exI) + apply clarsimp + apply clarsimp + apply (erule_tac x=cref in allE) + apply clarsimp + apply (rule_tac x=cref' in exI) + apply clarsimp + done + +lemma setCTE_cur_tcb[wp]: + "\cur_tcb'\ setCTE ptr val \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +lemma setCTE_vms'[wp]: + "\valid_machine_state'\ setCTE ptr val \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def ) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + apply wp+ + done + +lemma arch_update_setCTE_invs: + "\cte_wp_at' (is_arch_update' cap) p and cte_wp_at' ((=) oldcte) p and invs' and valid_cap' cap\ + setCTE p (cteCap_update (\_. cap) oldcte) + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp arch_update_setCTE_mdb valid_queues_lift sch_act_wf_lift tcb_in_cur_domain'_lift ct_idle_or_in_cur_domain'_lift + arch_update_setCTE_iflive arch_update_setCTE_ifunsafe + valid_irq_node_lift setCTE_typ_at' setCTE_irq_handlers' + valid_queues_lift' setCTE_pred_tcb_at' irqs_masked_lift + setCTE_norq hoare_vcg_disj_lift untyped_ranges_zero_lift + | simp add: pred_tcb_at'_def)+ + apply (clarsimp simp: valid_global_refs'_def is_arch_update'_def fun_upd_def[symmetric] + cte_wp_at_ctes_of isCap_simps untyped_ranges_zero_fun_upd) + apply (frule capMaster_eq_capBits_eq) + apply (frule capMaster_isUntyped) + apply (frule capMaster_capRange) + apply (clarsimp simp: valid_refs'_def valid_cap_sizes'_def) + apply (subst untyped_ranges_zero_delta[where xs="[p]"], assumption, simp_all) + apply (clarsimp simp: ran_restrict_map_insert cteCaps_of_def + untypedZeroRange_def Let_def + isCap_simps(1-11)[where v="ArchObjectCap ac" for ac]) + apply (fastforce simp: ran_def) + done + +definition + "safe_parent_for' m p cap \ + \parent node. m p = Some (CTE parent node) \ + sameRegionAs parent cap \ + ((\irq. cap = IRQHandlerCap irq) \ + parent = IRQControlCap \ + (\p n'. m p \ Some (CTE cap n')) + \ + isUntypedCap parent \ descendants_of' p m = {} \ capRange cap \ {} + \ capBits cap \ capBits parent)" + +definition + "is_simple_cap' cap \ + cap \ NullCap \ + cap \ IRQControlCap \ + \ isUntypedCap cap \ + \ isReplyCap cap \ + \ isEndpointCap cap \ + \ isNotificationCap cap \ + \ isThreadCap cap \ + \ isCNodeCap cap \ + \ isZombie cap \ + \ isArchFrameCap cap" + +end + +(* FIXME: duplicated *) +locale mdb_insert_simple = mdb_insert + + assumes safe_parent: "safe_parent_for' m src c'" + assumes simple: "is_simple_cap' c'" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma dest_no_parent_n: + "n \ dest \ p = False" + using src simple safe_parent + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold n_dest new_dest_def n) + apply (cases "mdbNext src_node = dest") + apply (subgoal_tac "m \ src \ dest") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: src) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: is_simple_cap'_def Retype_H.isCapRevocable_def AARCH64_H.isCapRevocable_def + split: capability.splits arch_capability.splits) + apply (cases c', auto simp: isCap_simps)[1] + apply (clarsimp simp add: sameRegionAs_def2) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: safe_parent_for'_def isCap_simps) + apply (cases c', auto simp: isCap_simps)[1] + done + +lemma src_node_revokable [simp]: + "mdbRevocable src_node" + using safe_parent ut_rev src + apply (clarsimp simp add: safe_parent_for'_def) + apply (erule disjE) + apply clarsimp + apply (erule irq_revocable, rule irq_control) + apply (clarsimp simp: ut_revocable'_def) + done + +lemma new_child [simp]: + "isMDBParentOf new_src new_dest" + using safe_parent ut_rev src + apply (simp add: new_src_def new_dest_def isMDBParentOf_def) + apply (clarsimp simp: safe_parent_for'_def) + apply (auto simp: isCap_simps) + done + +lemma n_dest_child: + "n \ src \ dest" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def src dest n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = src then n \ p \ dest \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = src") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=dest]) + apply (rule n_dest_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_src_def src) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_src_def src) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_dest_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = dest", simp) + apply (cases "d = dest", simp) + apply (cases "c = src") + apply clarsimp + apply (erule subtree.trans_parent [where c'=dest]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_src_def src) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_dest_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: src new_src_def) + done +qed + +lemma n_to_dest [simp]: + "n \ p \ dest = (p = src)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = dest then p \ src \ m \ p \ src else m \ p \ p'" +proof - + from assms have [simp]: "p \ dest" by (clarsimp simp: dest_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=src") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n src new_src_def split: if_split_asm) + done + qed +qed + + +lemma descendants: + "descendants_of' p n = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_dest_child split: if_split_asm) + done + +end + +declare if_split [split del] + +lemma setUntypedCapAsFull_safe_parent_for': + "\\s. safe_parent_for' (ctes_of s) slot a \ cte_wp_at' ((=) srcCTE) slot s\ + setUntypedCapAsFull (cteCap srcCTE) c' slot + \\rv s. safe_parent_for' (ctes_of s) slot a\" + apply (clarsimp simp:safe_parent_for'_def setUntypedCapAsFull_def split:if_splits) + apply (intro conjI impI) + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (ctes_of s) + (modify_map (ctes_of s) slot + (cteCap_update (\_. capFreeIndex_update (\_. max_free_index (capBlockSize c')) (cteCap srcCTE))))") + apply (frule mdb_inv_preserve.descendants_of[where p = slot]) + apply (clarsimp simp:isCap_simps modify_map_def cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (clarsimp cong:sameRegionAs_update_untyped) + apply (rule mdb_inv_preserve_updateCap) + apply (simp add:cte_wp_at_ctes_of) + apply simp + apply wp + apply simp + done + +lemma maskedAsFull_revokable_safe_parent: + "\is_simple_cap' c'; safe_parent_for' m p c'; m p = Some cte; + cteCap cte = (maskedAsFull src_cap' a)\ + \ isCapRevocable c' (maskedAsFull src_cap' a) = isCapRevocable c' src_cap'" + apply (clarsimp simp:isCapRevocable_def AARCH64_H.isCapRevocable_def maskedAsFull_def + split:if_splits capability.splits) + apply (intro allI impI conjI) + apply (clarsimp simp:isCap_simps is_simple_cap'_def)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_simple_corres: + assumes "cap_relation c c'" "src' = cte_map src" "dest' = cte_map dest" + notes trans_state_update'[symmetric,simp] + shows "corres dc + (valid_objs and pspace_distinct and pspace_aligned and + valid_mdb and valid_list and K (src\dest) and + cte_wp_at (\c. c=cap.NullCap) dest and + K (is_simple_cap c) and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s)) + (pspace_distinct' and pspace_aligned' and valid_mdb' and valid_cap' c' and + K (is_simple_cap' c') and + cte_wp_at' (\c. cteCap c=NullCap) dest' and + (\s. safe_parent_for' (ctes_of s) src' c')) + (cap_insert c src dest) + (cteInsert c' src' dest')" + (is "corres _ (?P and (\s. cte_wp_at _ _ s)) (?P' and cte_wp_at' _ _ and _) _ _") + using assms + unfolding cap_insert_def cteInsert_def + supply subst_all [simp del] + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="cteCap rv' = NullCap" in corres_gen_asm2) + apply simp + apply (rule_tac P="?P and cte_at dest and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s) and + cte_wp_at ((=) src_cap) src" and + Q="?P' and + cte_wp_at' ((=) rv') (cte_map dest) and + cte_wp_at' ((=) srcCTE) (cte_map src) and + (\s. safe_parent_for' (ctes_of s) src' c')" + in corres_assert_assume) + prefer 2 + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def valid_nullcaps_def) + apply (case_tac rv') + apply (simp add: initMDBNode_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullPointer_def) + apply (rule corres_guard_imp) + apply (rule_tac R="\r. ?P and cte_at dest and + (\s. cte_wp_at (safe_parent_for (cdt s) src c) src s) and + cte_wp_at ((=) (masked_as_full src_cap c)) src" and + R'="\r. ?P' and cte_wp_at' ((=) rv') (cte_map dest) + and cte_wp_at' ((=) (CTE (maskedAsFull (cteCap srcCTE) c') (cteMDBNode srcCTE))) (cte_map src) + and (\s. safe_parent_for' (ctes_of s) src' c')" + in corres_split[where r'=dc]) + apply (rule setUntypedCapAsFull_corres; simp) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def) + apply (erule_tac valid_dlistEn[where p = "cte_map src"]) + apply (simp+)[3] + apply (clarsimp simp: corres_underlying_def state_relation_def + in_monad valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) pspace_relationsD) + apply (drule (18) set_cap_not_quite_corres) + apply (rule refl) + apply (elim conjE exE) + apply (rule bind_execI, assumption) + apply (subgoal_tac "mdb_insert_abs (cdt a) src dest") + prefer 2 + apply (clarsimp simp: cte_wp_at_caps_of_state valid_mdb_def2) + apply (rule mdb_insert_abs.intro) + apply clarsimp + apply (erule (1) mdb_cte_at_Null_None) + apply (erule (1) mdb_cte_at_Null_descendants) + apply (subgoal_tac "no_mloop (cdt a)") + prefer 2 + apply (simp add: valid_mdb_def) + apply (clarsimp simp: exec_gets update_cdt_def bind_assoc set_cdt_def + exec_get exec_put set_original_def modify_def + simp del: fun_upd_apply + + | (rule bind_execI[where f="cap_insert_ext x y z x' y'" for x y z x' y'], clarsimp simp: mdb_insert_abs.cap_insert_ext_det_def2 update_cdt_list_def set_cdt_list_def put_def simp del: fun_upd_apply) | rule refl)+ + + apply (clarsimp simp: put_def state_relation_def simp del: fun_upd_apply) + apply (drule updateCap_stuff) + apply clarsimp + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (drule (3) updateMDB_the_lot', simp only: no_0_modify_map, simp only:, elim conjE) + apply (clarsimp simp: pspace_relations_def) + apply (rule conjI) + subgoal by (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (thin_tac "gsCNodes t = p" for t p)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksWorkUnitsCompleted t = p" for t p)+ + apply (thin_tac "ksIdleThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (thin_tac "cur_thread t = p" for t p)+ + apply (thin_tac "domain_index t = p" for t p)+ + apply (thin_tac "domain_time t = p" for t p)+ + apply (thin_tac "cur_domain t = p" for t p)+ + apply (thin_tac "scheduler_action t = p" for t p)+ + apply (thin_tac "ready_queues t = p" for t p)+ + apply (thin_tac "idle_thread t = p" for t p)+ + apply (thin_tac "machine_state t = p" for t p)+ + apply (thin_tac "work_units_completed t = p" for t p)+ + apply (thin_tac "ksArchState t = p" for t p)+ + apply (thin_tac "gsUserPages t = p" for t p)+ + apply (thin_tac "ksCurDomain t = p" for t p)+ + apply (thin_tac "ksInterruptState t = p" for t p)+ + apply (thin_tac "ksDomScheduleIdx t = p" for t p)+ + apply (thin_tac "ksDomainTime t = p" for t p)+ + apply (thin_tac "ksDomSchedule t = p" for t p)+ + apply (thin_tac "ctes_of t = p" for t p)+ + apply (thin_tac "ekheap_relation t p" for t p)+ + apply (thin_tac "pspace_relation t p" for t p)+ + apply (thin_tac "interrupt_state_relation s t p" for s t p)+ + apply (thin_tac "sched_act_relation t p" for t p)+ + apply (thin_tac "ready_queues_relation t p" for t p)+ + apply (clarsimp simp: cte_wp_at_ctes_of nullPointer_def prev_update_modify_mdb_relation) + apply (subgoal_tac "cte_map dest \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def + valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "cte_map src \ 0") + prefer 2 + apply (clarsimp simp: valid_mdb'_def + valid_mdb_ctes_def no_0_def) + apply (subgoal_tac "should_be_parent_of src_cap (is_original_cap a src) c (is_cap_revocable c src_cap) = True") + prefer 2 + apply (subst should_be_parent_of_masked_as_full[symmetric]) + apply (subst safe_parent_is_parent) + apply ((simp add: cte_wp_at_caps_of_state)+)[4] + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + defer + apply (clarsimp simp: modify_map_apply) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (simp split: if_split) + apply (rule conjI) + apply clarsimp + apply (subgoal_tac "mdbRevocable node = isCapRevocable c' (cteCap srcCTE)") + prefer 2 + apply (case_tac rv') + apply (clarsimp simp add: const_def modify_map_def split: if_split_asm) + apply clarsimp + apply (rule is_cap_revocable_eq, assumption, assumption) + apply (subst same_region_as_relation [symmetric]) + prefer 3 + apply (rule safe_parent_same_region) + apply (simp add: cte_wp_at_caps_of_state) + apply assumption + apply assumption + apply (clarsimp simp: cte_wp_at_def is_simple_cap_def) + apply clarsimp + apply (case_tac srcCTE) + apply (case_tac rv') + apply clarsimp + apply (subgoal_tac "\cap' node'. ctes_of b (cte_map (aa,bb)) = Some (CTE cap' node')") + prefer 2 + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply clarsimp + apply (drule set_cap_caps_of_state_monad)+ + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state null_filter_def split: if_splits) + apply clarsimp + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (subgoal_tac "mdbRevocable node = mdbRevocable node'") + apply clarsimp + apply (subgoal_tac "cte_map (aa,bb) \ cte_map dest") + subgoal by (clarsimp simp: modify_map_def split: if_split_asm) + apply (erule (5) cte_map_inj) + apply (wp set_untyped_cap_full_valid_objs set_untyped_cap_as_full_valid_mdb set_untyped_cap_as_full_valid_list + set_untyped_cap_as_full_cte_wp_at setUntypedCapAsFull_valid_cap + setUntypedCapAsFull_cte_wp_at setUntypedCapAsFull_safe_parent_for' | clarsimp | wps)+ + apply (clarsimp simp:cte_wp_at_caps_of_state ) + apply (case_tac rv',clarsimp simp:cte_wp_at_ctes_of maskedAsFull_def) + apply (wp getCTE_wp' get_cap_wp)+ + apply clarsimp + subgoal by (fastforce elim: cte_wp_at_weakenE) + subgoal by (clarsimp simp: cte_wp_at'_def) + apply (case_tac "srcCTE") + apply (rename_tac src_cap' src_node) + apply (case_tac "rv'") + apply (rename_tac dest_node) + apply (clarsimp simp: in_set_cap_cte_at_swp) + apply (subgoal_tac "cte_at src a \ safe_parent_for (cdt a) src c src_cap") + prefer 2 + subgoal by (fastforce simp: cte_wp_at_def) + apply (erule conjE) + apply (subgoal_tac "mdb_insert (ctes_of b) (cte_map src) (maskedAsFull src_cap' c') src_node + (cte_map dest) NullCap dest_node") + prefer 2 + apply (rule mdb_insert.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro, simp add: valid_mdb_ctes_def) + apply (erule mdb_ptr_axioms.intro; assumption) + apply (rule mdb_insert_axioms.intro; assumption?) + apply (rule refl) + apply (erule (5) cte_map_inj) + apply (rule conjI) + apply (simp (no_asm_simp) add: cdt_relation_def split: if_split) + apply (intro impI allI) + apply (frule mdb_insert_simple_axioms.intro) + apply(clarsimp simp:cte_wp_at_ctes_of) + apply (drule (1) mdb_insert_simple.intro) + apply (drule_tac src_cap' = src_cap' in maskedAsFull_revokable_safe_parent[symmetric]) + apply simp+ + apply (subst mdb_insert_simple.descendants) + apply simp + apply (subst mdb_insert_abs.descendants_child, assumption) + apply (frule set_cap_caps_of_state_monad) + apply (subgoal_tac "cte_at (aa,bb) a") + prefer 2 + subgoal by (clarsimp simp: cte_wp_at_caps_of_state split: if_split_asm) + apply (simp add: descendants_of_eq' cdt_relation_def split: if_split del: split_paired_All) + apply clarsimp + apply (drule (5) cte_map_inj)+ + apply simp + (* exact reproduction of proof in cteInsert_corres, + as it does not used is_derived *) + apply(simp add: cdt_list_relation_def del: split_paired_All split_paired_Ex) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def) + apply(intro impI allI) + apply(simp add: fun_upd_twist) + + apply(subst next_slot_eq[OF mdb_insert_abs.next_slot]) + apply(simp_all del: fun_upd_apply) + apply(simp split: option.splits del: fun_upd_apply add: fun_upd_twist) + apply(intro allI impI) + apply(subgoal_tac "src \ (aa, bb)") + prefer 2 + apply(rule notI) + apply(simp add: valid_mdb_def no_mloop_weaken) + apply(subst fun_upd_twist, simp, simp) + + apply(case_tac "ca=src") + apply(simp) + apply(clarsimp simp: modify_map_def) + subgoal by(fastforce split: if_split_asm) + apply(case_tac "ca = dest") + apply(simp) + apply(case_tac "next_slot src (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(drule_tac p="cte_map src" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(simp_all)[6] + apply(erule_tac x="fst src" in allE) + apply(erule_tac x="snd src" in allE) + apply(fastforce) + apply(simp) + apply(case_tac "next_slot ca (cdt_list (a)) (cdt a)") + apply(simp) + apply(simp) + apply(subgoal_tac "cte_at ca a") + prefer 2 + subgoal by (rule cte_at_next_slot; simp) + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + subgoal by (drule cte_map_inj_eq; simp) + apply(drule_tac p="cte_map src" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp) + apply(clarsimp) + apply(case_tac z) + apply(erule_tac x=aa in allE) + apply(erule_tac x=bb in allE) + apply(fastforce) + subgoal by (drule cte_map_inj_eq; simp) + subgoal by (drule cte_map_inj_eq; simp) + subgoal by (drule cte_map_inj_eq; simp) + by(fastforce) + +declare if_split [split] + +lemma sameRegion_capRange_sub: + "sameRegionAs cap cap' \ capRange cap' \ capRange cap" + apply (clarsimp simp: sameRegionAs_def2 isCap_Master capRange_Master) + apply (erule disjE, fastforce dest!: capMaster_capRange) + apply (erule disjE, fastforce) + apply (clarsimp simp: isCap_simps capRange_def split: if_split_asm) + done + +lemma safe_parent_for_capRange_capBits: + "\ safe_parent_for' m p cap; m p = Some cte \ \ capRange cap \ capRange (cteCap cte) + \ capBits cap \ capBits (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: capRange_def) + by (auto simp: sameRegionAs_def2 isCap_simps capRange_def + capMasterCap_def capRange_Master objBits_simps + split:capability.splits arch_capability.splits) + +lemma safe_parent_Null: + "\ m src = Some (CTE NullCap n); safe_parent_for' m src c' \ \ False" + by (simp add: safe_parent_for'_def) + +lemma notUntypedRange: + "\isUntypedCap cap \ untypedRange cap = {}" + by (cases cap) (auto simp: isCap_simps) + +lemma safe_parent_for_untypedRange: + "\ safe_parent_for' m p cap; m p = Some cte \ \ untypedRange cap \ untypedRange (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply clarsimp + apply clarsimp + apply (simp add: sameRegionAs_def2) + apply (erule disjE) + apply clarsimp + apply (drule capMaster_untypedRange) + apply blast + apply (erule disjE) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply (cases "isUntypedCap cap") + apply (clarsimp simp: capRange_Master untypedCapRange) + apply blast + apply (drule notUntypedRange) + apply simp + apply (clarsimp simp: isCap_Master isCap_simps) + done + +lemma safe_parent_for_capUntypedRange: + "\ safe_parent_for' m p cap; m p = Some cte \ \ capRange cap \ untypedRange (cteCap cte)" + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: capRange_def) + apply clarsimp + apply (simp add: sameRegionAs_def2) + apply (erule disjE) + apply clarsimp + apply (frule capMaster_capRange) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply (erule disjE) + apply (clarsimp simp: capRange_Master untypedCapRange) + apply blast + apply (clarsimp simp: isCap_Master isCap_simps) + done + +lemma safe_parent_for_descendants': + "\ safe_parent_for' m p cap; m p = Some (CTE pcap n); isUntypedCap pcap \ \ descendants_of' p m = {}" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_not_ep': + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ \isEndpointCap src_cap" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_not_ntfn': + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ \isNotificationCap src_cap" + by (auto simp: safe_parent_for'_def isCap_simps) + +lemma safe_parent_capClass: + "\ safe_parent_for' m p cap; m p = Some (CTE src_cap n) \ \ capClass cap = capClass src_cap" + by (auto simp: safe_parent_for'_def isCap_simps sameRegionAs_def2 capRange_Master capRange_def + capMasterCap_def + split: capability.splits arch_capability.splits) +end +locale mdb_insert_simple' = mdb_insert_simple + + fixes n' + defines "n' \ modify_map n (mdbNext src_node) (cteMDBNode_update (mdbPrev_update (\_. dest)))" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma no_0_n' [intro!]: "no_0 n'" by (auto simp: n'_def) +lemmas n_0_simps' [iff] = no_0_simps [OF no_0_n'] + +lemmas no_0_m_prev [iff] = no_0_prev [OF no_0] +lemmas no_0_n_prev [iff] = no_0_prev [OF no_0_n'] + +lemma chain_n': "mdb_chain_0 n'" + unfolding n'_def + by (rule mdb_chain_0_modify_map_prev) (rule chain_n) + +lemma no_loops_n': "no_loops n'" using chain_n' no_0_n' + by (rule mdb_chain_0_no_loops) + +lemma n_direct_eq': + "n' \ p \ p' = (if p = src then p' = dest else + if p = dest then m \ src \ p' + else m \ p \ p')" + by (simp add: n'_def n_direct_eq) + +lemma dest_no_next_p: + "m p = Some cte \ mdbNext (cteMDBNode cte) \ dest" + using dest dest_prev + apply (cases cte) + apply (rule notI) + apply (rule dlistEn, assumption) + apply clarsimp + apply clarsimp + done + +lemma dest_no_src_next [iff]: + "mdbNext src_node \ dest" + using src by (clarsimp dest!: dest_no_next_p) + +lemma n_dest': + "n' dest = Some new_dest" + by (simp add: n'_def n modify_map_if new_dest_def) + +lemma n'_trancl_eq: + "n' \ p \\<^sup>+ p' = + (if p' = dest then p = src \ m \ p \\<^sup>+ src + else if p = dest then m \ src \\<^sup>+ p' + else m \ p \\<^sup>+ p')" + unfolding n'_def trancl_prev_update + by (simp add: n_trancl_eq) + +lemma n_rtrancl_eq': + "n' \ p \\<^sup>* p' = + (if p' = dest then p = dest \ p \ dest \ m \ p \\<^sup>* src + else if p = dest then p' \ src \ m \ src \\<^sup>* p' + else m \ p \\<^sup>* p')" + unfolding n'_def rtrancl_prev_update + by (simp add: n_rtrancl_eq) + +lemma n'_cap: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then cap = c' \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node')" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma n'_rev: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then mdbRevocable node = isCapRevocable c' src_cap \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node') \ mdbRevocable node = mdbRevocable node'" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma m_cap': + "m p = Some (CTE cap node) \ + \node'. if p = dest then cap = dest_cap \ n' p = Some (CTE c' node') + else n' p = Some (CTE cap node')" + apply (simp add: n'_def n new_src_def new_dest_def modify_map_if) + apply (cases "p=dest") + apply (auto simp: src dest) + done + +lemma descendants': + "descendants_of' p n' = + (if src \ descendants_of' p m \ p = src + then descendants_of' p m \ {dest} else descendants_of' p m)" + by (simp add: n'_def descendants descendants_of_prev_update) + +lemma ut_revocable_n' [simp]: + "ut_revocable' n'" + using dest + apply (clarsimp simp: ut_revocable'_def) + apply (frule n'_cap) + apply (drule n'_rev) + apply clarsimp + apply (clarsimp simp: n_dest' new_dest_def split: if_split_asm) + apply (clarsimp simp: Retype_H.isCapRevocable_def isCap_simps) + apply (drule_tac p=p and m=m in ut_revocableD', assumption) + apply (rule ut_rev) + apply simp + done + +lemma valid_nc' [simp]: + "valid_nullcaps n'" + unfolding valid_nullcaps_def + using src dest dest_prev dest_next simple safe_parent + apply (clarsimp simp: n'_def n_def modify_map_if) + apply (rule conjI) + apply (clarsimp simp: is_simple_cap'_def) + apply clarsimp + apply (rule conjI) + apply (fastforce dest!: safe_parent_Null) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) valid_nullcaps_next, rule no_0, rule dlist, rule nullcaps) + apply simp + apply clarsimp + apply (erule nullcapsD', rule nullcaps) + done + +lemma n'_prev_eq: + "n' \ p \ p' = + (if p' = mdbNext src_node \ p' \ 0 then p = dest + else if p' = dest then p = src + else m \ p \ p')" + using src dest dest_prev dest_next + apply (cases "p' = 0", simp) + apply (simp split del: if_split) + apply (cases "p' = mdbNext src_node") + apply (clarsimp simp: modify_map_apply n'_def n_def mdb_prev_def) + apply (clarsimp simp: modify_map_if) + apply (rule iffI, clarsimp) + apply clarsimp + apply (rule dlistEn, assumption, simp) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (cases "p' = dest") + apply (clarsimp simp: modify_map_if n'_def n_def mdb_prev_def) + apply clarsimp + apply (clarsimp simp: modify_map_if n'_def n_def mdb_prev_def) + apply (cases "p' = src", simp) + apply clarsimp + apply (rule iffI, clarsimp) + apply clarsimp + apply (case_tac z) + apply clarsimp + done + +lemma m_prev_of_next: + "m \ p \ mdbNext src_node = (p = src \ mdbNext src_node \ 0)" + using src + apply (clarsimp simp: mdb_prev_def) + apply (rule iffI) + apply clarsimp + apply (rule dlistEn, assumption, clarsimp) + apply clarsimp + apply clarsimp + apply (rule dlistEn, assumption, clarsimp) + apply clarsimp + done + +lemma src_next_eq: + "m \ p \ mdbNext src_node = (if mdbNext src_node \ 0 then p = src else m \ p \ 0)" + using src + apply - + apply (rule iffI) + prefer 2 + apply (clarsimp split: if_split_asm) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule (1) dlist_nextD0) + apply (clarsimp simp: m_prev_of_next) + apply clarsimp + done + +lemma src_next_eq': + "m (mdbNext src_node) = Some cte \ m \ p \ mdbNext src_node = (p = src)" + by (subst src_next_eq) auto + +lemma dest_no_prev [iff]: + "\ m \ dest \ p" + using dest dest_next + apply (clarsimp simp: mdb_prev_def) + apply (rule dlistEp [where p=p], assumption, clarsimp) + apply clarsimp + done + +lemma src_prev [iff]: + "m \ src \ p = (p = mdbNext src_node \ p \ 0)" + using src + apply - + apply (rule iffI) + prefer 2 + apply (clarsimp simp: mdb_ptr_src.next_p_prev) + apply (clarsimp simp: mdb_prev_def) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (rule dlistEp [where p=p], assumption, clarsimp) + apply simp + done + +lemma dlist' [simp]: + "valid_dlist n'" + using src dest + apply (unfold valid_dlist_def3 n_direct_eq' n'_prev_eq) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply (split if_split) + apply simp + apply (intro conjI impI allI notI) + apply (fastforce simp: src_next_eq') + apply (clarsimp simp: src_next_eq split: if_split_asm) + apply (simp add: mdb_ptr_src.p_next) + apply (erule (1) dlist_nextD0) + apply clarsimp + apply clarsimp + apply clarsimp + apply (erule (1) dlist_prevD0) + done + +lemma utRange_c': + "untypedRange c' \ untypedRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_untypedRange, simp) + +lemma capRange_c': + "capRange c' \ capRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_capRange_capBits, simp) + +lemma not_ut_c' [simp]: + "\isUntypedCap c'" + using simple + by (simp add: is_simple_cap'_def) + +lemma utCapRange_c': + "capRange c' \ untypedRange src_cap" + using safe_parent src + by - (drule (1) safe_parent_for_capUntypedRange, simp) + +lemma ut_descendants: + "isUntypedCap src_cap \ descendants_of' src m = {}" + using safe_parent src + by (rule safe_parent_for_descendants') + +lemma ut_mdb' [simp]: + "untyped_mdb' n'" + using src dest utRange_c' capRange_c' utCapRange_c' + apply (clarsimp simp: untyped_mdb'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants') + apply (clarsimp split: if_split_asm) + apply (cases "isUntypedCap src_cap") + prefer 2 + apply (drule_tac p=p and p'=src and m=m in untyped_mdbD', assumption+) + apply blast + apply (rule untyped_mdb) + apply simp + apply (frule ut_descendants) + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply blast + apply (fastforce elim: untyped_mdbD' intro!: untyped_mdb) + done + +lemma n'_badge: + "n' p = Some (CTE cap node) \ + \node'. if p = dest then mdbFirstBadged node = isCapRevocable c' src_cap \ m p = Some (CTE dest_cap node') + else m p = Some (CTE cap node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (auto simp add: n'_def n src dest new_src_def new_dest_def modify_map_if split: if_split_asm) + +lemma src_not_ep [simp]: + "\isEndpointCap src_cap" + using safe_parent src by (rule safe_parent_not_ep') + +lemma src_not_ntfn [simp]: + "\isNotificationCap src_cap" + using safe_parent src by (rule safe_parent_not_ntfn') + +lemma c_not_ep [simp]: + "\isEndpointCap c'" + using simple by (simp add: is_simple_cap'_def) + +lemma c_not_ntfn [simp]: + "\isNotificationCap c'" + using simple by (simp add: is_simple_cap'_def) + +lemma valid_badges' [simp]: + "valid_badges n'" + using simple src dest + apply (clarsimp simp: valid_badges_def) + apply (simp add: n_direct_eq') + apply (frule_tac p=p in n'_badge) + apply (frule_tac p=p' in n'_badge) + apply (drule n'_cap)+ + apply (clarsimp split: if_split_asm) + apply (insert valid_badges) + apply (simp add: valid_badges_def) + apply blast + done + +lemma caps_contained' [simp]: + "caps_contained' n'" + using src dest capRange_c' utCapRange_c' + apply (clarsimp simp: caps_contained'_def) + apply (drule n'_cap)+ + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (drule capRange_untyped) + apply simp + apply (drule capRange_untyped) + apply clarsimp + apply (cases "isUntypedCap src_cap") + prefer 2 + apply (drule_tac p=p and p'=src in caps_containedD', assumption+) + apply blast + apply (rule caps_contained) + apply blast + apply (frule capRange_untyped) + apply (drule (3) untyped_incD', rule untyped_inc) + apply (clarsimp simp: ut_descendants) + apply blast + apply (drule (3) caps_containedD', rule caps_contained) + apply blast + done + +lemma capClass_c' [simp]: + "capClass c' = capClass src_cap" + using safe_parent src by (rule safe_parent_capClass) + +lemma class_links' [simp]: + "class_links n'" + using src dest + apply (clarsimp simp: class_links_def) + apply (simp add: n_direct_eq') + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (drule n'_cap)+ + apply clarsimp + apply (clarsimp split: if_split_asm) + apply (drule (2) class_linksD, rule class_links) + apply simp + apply (drule (2) class_linksD, rule class_links) + apply simp + done + +lemma untyped_inc' [simp]: + "untyped_inc' n'" + using src dest + apply (clarsimp simp: untyped_inc'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants') + apply (clarsimp split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (frule_tac p=src and p'=p' in untyped_incD', assumption+, rule untyped_inc) + apply (clarsimp simp: ut_descendants) + apply (intro conjI, clarsimp+) + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + done + +lemma sameRegion_src [simp]: + "sameRegionAs src_cap c'" + using safe_parent src + apply (simp add: safe_parent_for'_def) + done + +lemma sameRegion_src_c': + "sameRegionAs cap src_cap \ sameRegionAs cap c'" + using safe_parent simple src capRange_c' + apply (simp add: safe_parent_for'_def) + apply (erule disjE) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps capRange_def) + apply (clarsimp simp: sameRegionAs_def2 isCap_Master capRange_Master) + apply (erule disjE) + apply (elim conjE) + apply (erule disjE) + apply blast + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + done + +lemma irq_c'_new: + assumes irq_src: "isIRQControlCap src_cap" + shows "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using safe_parent irq_src src + apply (clarsimp simp: safe_parent_for'_def isCap_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_simps) + done + +lemma ut_capRange_non_empty: + "isUntypedCap src_cap \ capRange c' \ {}" + using safe_parent src unfolding safe_parent_for'_def + by (clarsimp simp: isCap_simps) + + +lemma ut_sameRegion_non_empty: + "\ isUntypedCap src_cap; sameRegionAs c' cap \ \ capRange cap \ {}" + using simple safe_parent src + apply (clarsimp simp: is_simple_cap'_def sameRegionAs_def2 isCap_Master) + apply (erule disjE) + apply (clarsimp simp: ut_capRange_non_empty dest!: capMaster_capRange) + apply clarsimp + apply (clarsimp simp: safe_parent_for'_def) + apply (erule disjE, clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps capRange_def) + done + +lemma ut_c'_new: + assumes ut_src: "isUntypedCap src_cap" + shows "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using src simple + apply clarsimp + apply (drule untyped_mdbD', rule ut_src, assumption) + apply (clarsimp simp: is_simple_cap'_def sameRegionAs_def2 isCap_Master capRange_Master) + apply (fastforce simp: isCap_simps) + apply (frule sameRegion_capRange_sub) + apply (drule ut_sameRegion_non_empty [OF ut_src]) + apply (insert utCapRange_c') + apply blast + apply (rule untyped_mdb) + apply (simp add: ut_descendants [OF ut_src]) + done + +lemma c'_new: + "m p = Some (CTE cap node) \ \ sameRegionAs c' cap" + using safe_parent src unfolding safe_parent_for'_def + apply (elim exE conjE) + apply (erule disjE) + apply (erule irq_c'_new [rotated]) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (drule (1) ut_c'_new) + apply simp + done + +lemma irq_control_src: + "\ isIRQControlCap src_cap; + m p = Some (CTE cap node); + sameRegionAs cap c' \ \ p = src" + using safe_parent src unfolding safe_parent_for'_def + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: sameRegionAs_def2 isCap_Master) + apply (erule disjE, clarsimp simp: isCap_simps) + apply (erule disjE, clarsimp simp: isCap_simps capRange_def) + apply (clarsimp simp: isCap_simps) + apply (drule (1) irq_controlD, rule irq_control) + apply simp + done + +lemma not_irq_parentD: + "\ isIRQControlCap src_cap \ + isUntypedCap src_cap \ descendants_of' src m = {} \ capRange c' \ {}" + using src safe_parent unfolding safe_parent_for'_def + by (clarsimp simp: isCap_simps) + +lemma ut_src_only_ut_c_parents: + "\ isUntypedCap src_cap; sameRegionAs cap c'; m p = Some (CTE cap node) \ \ isUntypedCap cap" + using safe_parent src unfolding safe_parent_for'_def + apply clarsimp + apply (erule disjE, clarsimp simp: isCap_simps) + apply clarsimp + apply (rule ccontr) + apply (drule (3) untyped_mdbD') + apply (frule sameRegion_capRange_sub) + apply (insert utCapRange_c')[1] + apply blast + apply (rule untyped_mdb) + apply simp + done + +lemma ut_src: + "\ isUntypedCap src_cap; sameRegionAs cap c'; m p = Some (CTE cap node) \ \ + isUntypedCap cap \ untypedRange cap \ untypedRange src_cap \ {}" + apply (frule (2) ut_src_only_ut_c_parents) + apply simp + apply (frule sameRegion_capRange_sub) + apply (insert utCapRange_c')[1] + apply (simp add: untypedCapRange) + apply (drule ut_capRange_non_empty) + apply blast + done + + +lemma chunked' [simp]: + "mdb_chunked n'" + using src dest + apply (clarsimp simp: mdb_chunked_def) + apply (drule n'_cap)+ + apply (clarsimp simp: n'_trancl_eq) + apply (clarsimp split: if_split_asm) + prefer 3 + apply (frule (3) mdb_chunkedD, rule chunked) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: is_chunk_def n'_trancl_eq n_rtrancl_eq' n_dest' new_dest_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply (erule sameRegion_src_c') + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (frule_tac p=p'' in m_cap') + apply clarsimp + apply clarsimp + apply (clarsimp simp: is_chunk_def n'_trancl_eq n_rtrancl_eq' n_dest' new_dest_def) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule_tac x=src in allE) + apply simp + apply (erule sameRegion_src_c') + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (frule_tac p=p'' in m_cap') + apply clarsimp + apply (case_tac "p' = src") + apply simp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq') + apply (erule disjE) + apply (simp add: n_dest' new_dest_def) + apply clarsimp + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply clarsimp + apply (drule c'_new) + apply (erule (1) notE) + apply (case_tac "p=src") + apply clarsimp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq') + apply (erule disjE) + apply (clarsimp simp: n_dest' new_dest_def) + apply clarsimp + apply (drule (1) trancl_rtrancl_trancl) + apply simp + apply (case_tac "isIRQControlCap src_cap") + apply (drule (2) irq_control_src) + apply simp + apply (drule not_irq_parentD) + apply clarsimp + apply (frule (2) ut_src) + apply clarsimp + apply (subgoal_tac "src \ descendants_of' p m") + prefer 2 + apply (drule (3) untyped_incD', rule untyped_inc) + apply clarsimp + apply fastforce + apply (frule_tac m=m and p=p and p'=src in mdb_chunkedD, assumption+) + apply (clarsimp simp: descendants_of'_def) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def isMDBParentOf_def split: if_split_asm) + apply simp + apply (rule chunked) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (rule conjI) + prefer 2 + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n_rtrancl_eq' split: if_split_asm) + apply (clarsimp simp: n_dest' new_dest_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap') + apply clarsimp + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, simp) + apply (clarsimp simp: descendants_of'_def) + apply (drule subtree_mdb_next) + apply (drule (1) trancl_trans) + apply simp + done + +lemma distinct_zombies_m: + "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma untyped_rangefree: + "\ isUntypedCap src_cap; m x = Some cte; x \ src; \ isUntypedCap (cteCap cte) \ + \ capRange (cteCap cte) \ capRange c'" + apply (frule ut_descendants) + apply (cases cte, clarsimp) + apply (frule(2) untyped_mdbD' [OF src _ _ _ _ untyped_mdb]) + apply (simp add: untypedCapRange[symmetric]) + apply (frule ut_capRange_non_empty) + apply (cut_tac capRange_c') + apply blast + apply simp + done + +lemma notZomb: + "\ isZombie src_cap" "\ isZombie c'" + using sameRegion_src simple + by (auto simp: isCap_simps sameRegionAs_def3 + simp del: sameRegion_src, + auto simp: is_simple_cap'_def isCap_simps) + +lemma notArchPage: + "\ isArchFrameCap c'" + using simple + by (clarsimp simp: isCap_simps is_simple_cap'_def) + +lemma distinct_zombies[simp]: + "distinct_zombies n'" + using distinct_zombies_m + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def modify_map_apply src dest) + apply (rule distinct_zombies_sameE[rotated]) + apply (simp add: src) + apply simp+ + apply (cases "isUntypedCap src_cap") + apply (erule distinct_zombies_seperateE) + apply (case_tac "y = src") + apply (clarsimp simp add: src) + apply (frule(3) untyped_rangefree) + apply (simp add: capRange_def) + apply (rule sameRegionAsE [OF sameRegion_src], simp_all) + apply (erule distinct_zombies_copyMasterE, rule src) + apply simp + apply (simp add: notZomb) + apply (simp add: notArchPage) + apply (clarsimp simp: isCap_simps) + apply (erule distinct_zombies_sameMasterE, rule dest) + apply (clarsimp simp: isCap_simps) + done + +lemma irq' [simp]: + "irq_control n'" using simple + apply (clarsimp simp: irq_control_def) + apply (frule n'_cap) + apply (drule n'_rev) + apply (clarsimp split: if_split_asm) + apply (simp add: is_simple_cap'_def) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n'_cap) + apply (clarsimp split: if_split_asm) + apply (erule disjE) + apply (clarsimp simp: is_simple_cap'_def) + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma reply_masters_rvk_fb: + "reply_masters_rvk_fb m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma reply_masters_rvk_fb' [simp]: + "reply_masters_rvk_fb n'" + using reply_masters_rvk_fb simple + apply (simp add: reply_masters_rvk_fb_def n'_def + n_def ball_ran_modify_map_eq) + apply (subst ball_ran_modify_map_eq) + apply (clarsimp simp: modify_map_def m_p is_simple_cap'_def) + apply (simp add: ball_ran_modify_map_eq m_p is_simple_cap'_def + dest_cap isCap_simps) + done + +lemma mdb: + "valid_mdb_ctes n'" + by (simp add: valid_mdb_ctes_def no_0_n' chain_n') + +end + +lemma updateCapFreeIndex_no_0: + assumes preserve:"\m m'. mdb_inv_preserve m m' + \ mdb_inv_preserve (Q m) (Q m')" + shows + "\\s. P (no_0(Q (ctes_of s))) \ cte_wp_at' (\c. c = srcCTE \ isUntypedCap (cteCap c)) src s\ + updateCap src (capFreeIndex_update (\_. index) (cteCap srcCTE)) + \\r s. P (no_0 (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) (cteCap srcCTE)))))") + apply (drule mdb_inv_preserve.by_products) + apply simp + apply (rule preserve) + apply (simp add:cte_wp_at_ctes_of)+ + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ +done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cteInsert_simple_mdb': + "\valid_mdb' and pspace_aligned' and pspace_distinct' and (\s. src \ dest) and K (capAligned cap) and + (\s. safe_parent_for' (ctes_of s) src cap) and K (is_simple_cap' cap) \ + cteInsert cap src dest + \\_. valid_mdb'\" + unfolding cteInsert_def valid_mdb'_def + apply simp + apply (rule hoare_name_pre_state) + apply (rule hoare_pre) + apply (wp updateCap_ctes_of_wp getCTE_wp' setUntypedCapAsFull_ctes + mdb_inv_preserve_updateCap mdb_inv_preserve_modify_map | clarsimp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb_ctes_def) + apply (case_tac cte) + apply (rename_tac src_cap src_node) + apply (case_tac ctea) + apply (rename_tac dest_cap dest_node) + apply clarsimp + apply (subst modify_map_eq) + apply simp+ + apply (clarsimp simp:maskedAsFull_def is_simple_cap'_def) + apply (subgoal_tac "mdb_insert_simple' + (ctes_of sa) src src_cap src_node dest NullCap dest_node cap") + prefer 2 + apply (intro mdb_insert_simple'.intro + mdb_insert_simple.intro mdb_insert_simple_axioms.intro + mdb_ptr.intro mdb_insert.intro vmdb.intro + mdb_ptr_axioms.intro mdb_insert_axioms.intro) + apply (simp add:modify_map_def valid_mdb_ctes_maskedAsFull)+ + apply (clarsimp simp:nullPointer_def)+ + apply ((clarsimp simp:valid_mdb_ctes_def)+) + apply (drule mdb_insert_simple'.mdb) + apply (clarsimp simp:valid_mdb_ctes_def) + done + +lemma cteInsert_valid_globals_simple: + "\valid_global_refs' and (\s. safe_parent_for' (ctes_of s) src cap)\ + cteInsert cap src dest + \\rv. valid_global_refs'\" + apply (simp add: cteInsert_def) + apply (rule hoare_pre) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) safe_parent_for_capRange_capBits) + apply (drule (1) valid_global_refsD_with_objSize) + apply (auto elim: order_trans[rotated]) + done + +lemma cteInsert_simple_invs: + "\invs' and cte_wp_at' (\c. cteCap c=NullCap) dest and valid_cap' cap and + (\s. src \ dest) and (\s. safe_parent_for' (ctes_of s) src cap) + and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s) + and cte_at' src + and ex_cte_cap_to' dest and K (is_simple_cap' cap)\ + cteInsert cap src dest + \\rv. invs'\" + apply (rule hoare_pre) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp cur_tcb_lift sch_act_wf_lift valid_queues_lift tcb_in_cur_domain'_lift + valid_irq_node_lift valid_queues_lift' irqs_masked_lift + cteInsert_simple_mdb' cteInsert_valid_globals_simple + cteInsert_norq | simp add: pred_tcb_at'_def)+ + apply (auto simp: invs'_def valid_state'_def valid_pspace'_def + is_simple_cap'_def untyped_derived_eq_def o_def + elim: valid_capAligned) + done + +lemma ensureEmptySlot_stronger [wp]: + "\\s. cte_wp_at' (\c. cteCap c = NullCap) p s \ P s\ ensureEmptySlot p \\rv. P\, -" + apply (simp add: ensureEmptySlot_def whenE_def unlessE_whenE) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at'_def) + done + +lemma lookupSlotForCNodeOp_real_cte_at'[wp]: + "\valid_objs' and valid_cap' rootCap\ + lookupSlotForCNodeOp isSrc rootCap cref depth + \\rv. real_cte_at' rv\,-" + apply (simp add: lookupSlotForCNodeOp_def split_def unlessE_def + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp resolveAddressBits_real_cte_at' | simp | wp (once) hoare_drop_imps)+ + done + +lemma cte_refs_maskCapRights[simp]: + "cte_refs' (maskCapRights rghts cap) = cte_refs' cap" + by (rule ext, cases cap, + simp_all add: maskCapRights_def isCap_defs Let_def + AARCH64_H.maskCapRights_def + split del: if_split + split: arch_capability.split) + +lemma getSlotCap_cap_to'[wp]: + "\\\ getSlotCap cp \\rv s. \r\cte_refs' rv (irq_node' s). ex_cte_cap_to' r s\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (fastforce simp: cte_wp_at_ctes_of ex_cte_cap_to'_def) + done + +lemma getSlotCap_cap_to2: + "\\ and K (\cap. P cap \ Q cap)\ + getSlotCap slot + \\rv s. P rv \ (\x \ cte_refs' rv (irq_node' s). ex_cte_cap_wp_to' Q x s)\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_wp_to'_def) + apply fastforce + done + +lemma locateSlot_cap_to'[wp]: + "\\s. isCNodeCap cap \ (\r \ cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)\ + locateSlotCNode (capCNodePtr cap) n (v && mask (capCNodeBits cap)) + \ex_cte_cap_wp_to' P\" + apply (simp add: locateSlot_conv) + apply wp + apply (clarsimp dest!: isCapDs valid_capAligned + simp: objBits_simps' mult.commute capAligned_def cte_level_bits_def shiftl_t2n) + apply (erule bspec) + apply (clarsimp intro!: word_and_le1) + done + +lemma rab_cap_to'': + assumes P: "\cap. isCNodeCap cap \ P cap" + shows + "s \ \\s. isCNodeCap cap \ (\r\cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)\ + resolveAddressBits cap cref depth + \\rv s. ex_cte_cap_wp_to' P (fst rv) s\,\\\\" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 cap fn cref depth) + show ?case + apply (subst resolveAddressBits.simps) + apply (simp add: Let_def split_def cap_case_CNodeCap[unfolded isCap_simps] + split del: if_split cong: if_cong) + apply (rule hoare_pre_spec_validE) + apply ((elim exE | wp (once) spec_strengthen_postE[OF "1.hyps"])+, + (rule refl conjI | simp add: in_monad split del: if_split del: cte_refs'.simps)+) + apply (wp getSlotCap_cap_to2 + | simp add: assertE_def split_def whenE_def locateSlotCap_def + split del: if_split | simp add: imp_conjL[symmetric] + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: P) + done +qed + +lemma rab_cap_to'[wp]: + "\(\s. isCNodeCap cap \ (\r\cte_refs' cap (irq_node' s). ex_cte_cap_wp_to' P r s)) + and K (\cap. isCNodeCap cap \ P cap)\ + resolveAddressBits cap cref depth + \\rv s. ex_cte_cap_wp_to' P (fst rv) s\,-" + apply (rule hoare_gen_asmE) + apply (unfold validE_R_def) + apply (rule use_spec, rule rab_cap_to'') + apply simp + done + +lemma lookupCNode_cap_to'[wp]: + "\\s. \r\cte_refs' rootCap (irq_node' s). ex_cte_cap_to' r s\ + lookupSlotForCNodeOp isSrc rootCap cref depth + \\p. ex_cte_cap_to' p\,-" + apply (simp add: lookupSlotForCNodeOp_def Let_def split_def unlessE_def + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | simp)+ + done + +lemma badge_derived'_refl[simp]: "badge_derived' c c" + by (simp add: badge_derived'_def) + +lemma derived'_not_Null: + "\ is_derived' m p c capability.NullCap" + "\ is_derived' m p capability.NullCap c" + by (clarsimp simp: is_derived'_def badge_derived'_def)+ + +lemma getSlotCap_wp: + "\\s. (\cap. cte_wp_at' (\c. cteCap c = cap) p s \ Q cap s)\ + getSlotCap p \Q\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at'_def) + done + +lemma storeWordUser_typ_at' : + "\\s. P (typ_at' T p s)\ storeWordUser v w \\_ s. P (typ_at' T p s)\" + unfolding storeWordUser_def by wpsimp + +lemma arch_update_updateCap_invs: + "\cte_wp_at' (is_arch_update' cap) p and invs' and valid_cap' cap\ + updateCap p cap + \\_. invs'\" + apply (simp add: updateCap_def) + apply (wp arch_update_setCTE_invs getCTE_wp') + apply clarsimp + done + +lemma updateCap_same_master: + "\ cap_relation cap cap' \ \ + corres dc (valid_objs and pspace_aligned and pspace_distinct and + cte_wp_at (\c. cap_master_cap c = cap_master_cap cap \ + \is_reply_cap c \ \is_master_reply_cap c \ + \is_ep_cap c \ \is_ntfn_cap c) slot) + (pspace_aligned' and pspace_distinct' and cte_at' (cte_map slot)) + (set_cap cap slot) + (updateCap (cte_map slot) cap')" (is "_ \ corres _ ?P ?P' _ _") + apply (unfold updateCap_def) + apply (rule corres_guard_imp) + apply (rule_tac Q="?P" and R'="\cte. ?P' and (\s. ctes_of s (cte_map slot) = Some cte)" + in corres_symb_exec_r_conj) + apply (rule corres_stronger_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply clarsimp + apply (clarsimp simp add: state_relation_def) + apply (drule (1) pspace_relationsD) + apply (frule (4) set_cap_not_quite_corres_prequel) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + apply assumption + apply simp + apply (rule refl) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply (clarsimp simp: pspace_relations_def) + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + apply (frule setCTE_pspace_only) + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (rule conjI) + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv data_at_def) + apply (intro allI conjI) + apply (frule use_valid[OF _ setCTE_gsUserPages]) + prefer 2 + apply simp+ + apply (frule use_valid[OF _ setCTE_gsCNodes]) + prefer 2 + apply simp+ + apply (rule use_valid[OF _ setCTE_arch]) + prefer 2 + apply simp+ + apply (subst conj_assoc[symmetric]) + apply (rule conjI) + prefer 2 + apply (rule conjI) + prefer 2 + apply (frule setCTE_pspace_only) + apply clarsimp + apply (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def + split: if_split_asm Structures_A.kernel_object.splits) + apply (frule set_cap_caps_of_state_monad) + apply (drule is_original_cap_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (clarsimp simp: revokable_relation_def simp del: fun_upd_apply) + apply (clarsimp split: if_split_asm) + apply (drule cte_map_inj_eq) + prefer 2 + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (simp add: null_filter_def split: if_split_asm) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule caps_of_state_cte_at) + apply fastforce + apply fastforce + apply fastforce + apply clarsimp + apply (simp add: null_filter_def split: if_split_asm) + apply (erule_tac x=aa in allE, erule_tac x=bb in allE) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE) + apply (clarsimp simp: cap_master_cap_simps dest!: cap_master_cap_eqDs) + apply (case_tac rv) + apply clarsimp + apply (subgoal_tac "(aa,bb) \ slot") + prefer 2 + apply clarsimp + apply (simp add: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (clarsimp simp: cdt_relation_def) + apply (frule set_cap_caps_of_state_monad) + apply (frule mdb_set_cap, frule exst_set_cap) + apply clarsimp + apply (erule use_valid [OF _ setCTE_ctes_of_wp]) + apply (frule cte_wp_at_norm) + apply (clarsimp simp del: fun_upd_apply) + apply (frule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply (clarsimp simp del: fun_upd_apply) + apply (subst same_master_descendants) + apply assumption + apply (clarsimp simp: master_cap_relation) + apply (frule_tac d=c in master_cap_relation [symmetric], assumption) + apply (frule is_reply_cap_relation[symmetric], + drule is_reply_master_relation[symmetric])+ + apply simp + apply (drule masterCap.intro) + apply (drule masterCap.isReplyCap) + apply simp + apply (drule is_ep_cap_relation)+ + apply (drule master_cap_ep) + apply simp + apply (drule is_ntfn_cap_relation)+ + apply (drule master_cap_ntfn) + apply simp + apply (simp add: in_set_cap_cte_at) + apply(simp add: cdt_list_relation_def split del: if_split) + apply(intro allI impI) + apply(erule_tac x=aa in allE)+ + apply(erule_tac x=bb in allE)+ + apply(clarsimp split: if_split_asm) + apply(case_tac rv, clarsimp) + apply (wp getCTE_wp')+ + apply clarsimp + apply (rule no_fail_pre, wp) + apply clarsimp + apply assumption + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma updateCapFreeIndex_valid_mdb_ctes: + assumes preserve:"\m m'. mdb_inv_preserve m m' \ mdb_inv_preserve (Q m) (Q m')" + and coin :"\m cte. \m src = Some cte\ \ (\cte'. (Q m) src = Some cte' \ cteCap cte = cteCap cte')" + and assoc :"\m f. Q (modify_map m src (cteCap_update f)) = modify_map (Q m) src (cteCap_update f)" + shows + "\\s. usableUntypedRange (capFreeIndex_update (\_. index) cap) \ usableUntypedRange cap \ isUntypedCap cap + \ valid_mdb_ctes (Q (ctes_of s)) \ cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. (valid_mdb_ctes (Q (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply (subgoal_tac "mdb_inv_preserve (Q (ctes_of s)) (Q (modify_map (ctes_of s) src + (cteCap_update (\_. capFreeIndex_update (\_. index) cap))))") + apply (clarsimp simp:valid_mdb_ctes_def) + apply (intro conjI) + apply ((simp add:mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products)+)[7] + apply (rule mdb_inv_preserve.untyped_inc') + apply assumption + apply (clarsimp simp:assoc cte_wp_at_ctes_of) + apply (clarsimp simp:modify_map_def split:if_splits) + apply (drule coin) + apply clarsimp + apply (erule(1) subsetD) + apply simp + apply (simp_all add:mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products) + apply (rule preserve) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (rule mdb_inv_preserve_updateCap) + apply (clarsimp simp:cte_wp_at_ctes_of)+ + done + +lemma usableUntypedRange_mono1: + "is_aligned ptr sz \ idx \ 2 ^ sz \ idx' \ 2 ^ sz + \ sz < word_bits + \ idx \ idx' + \ usableUntypedRange (UntypedCap dev ptr sz idx) + \ usableUntypedRange (UntypedCap dev' ptr sz idx')" + apply clarsimp + apply (rule word_plus_mono_right) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + apply (subst word_bits_def[symmetric]) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp + apply simp + apply (subst word_bits_def[symmetric]) + apply (erule le_less_trans) + apply (erule less_le_trans[OF _ power_increasing]) + apply simp+ + apply (erule is_aligned_no_wrap') + apply (rule word_of_nat_less) + apply simp + done + +lemma usableUntypedRange_mono2: + "isUntypedCap cap + \ isUntypedCap cap' + \ capAligned cap \ capFreeIndex cap \ 2 ^ capBlockSize cap + \ capFreeIndex cap' \ 2 ^ capBlockSize cap' + \ capFreeIndex cap \ capFreeIndex cap' + \ capPtr cap' = capPtr cap + \ capBlockSize cap' = capBlockSize cap + \ usableUntypedRange cap \ usableUntypedRange cap'" + apply (clarsimp simp only: isCap_simps capability.sel del: subsetI) + apply (rule usableUntypedRange_mono1, auto simp: capAligned_def) + done + +lemma ctes_of_cte_wpD: + "ctes_of s p = Some cte \ cte_wp_at' ((=) cte) p s" + by (simp add: cte_wp_at_ctes_of) + +lemma updateFreeIndex_forward_valid_objs': + "\\s. valid_objs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits) o cteCap) src s\ + updateFreeIndex src idx + \\r s. valid_objs' s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def updateCap_def getSlotCap_def) + apply (wp getCTE_wp') + apply clarsimp + apply (frule(1) CSpace1_R.ctes_of_valid) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps capAligned_def + valid_cap_simps' is_aligned_weaken[OF is_aligned_triv]) + apply (clarsimp simp add: valid_untyped'_def + simp del: usableUntypedRange.simps) + apply (erule allE, erule notE, erule ko_wp_at'_weakenE) + apply (rule disjCI2, simp only: simp_thms) + apply (rule notI, erule notE, erule disjoint_subset2[rotated]) + apply (rule usableUntypedRange_mono1, simp_all) + done + +crunch pspace_aligned'[wp]: updateFreeIndex "pspace_aligned'" +crunch pspace_distinct'[wp]: updateFreeIndex "pspace_distinct'" +crunch no_0_obj[wp]: updateFreeIndex "no_0_obj'" + +lemma updateFreeIndex_forward_valid_mdb': + "\\s. valid_mdb' s \ valid_objs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap) o cteCap) src s\ + updateFreeIndex src idx + \\r s. valid_mdb' s\" + apply (simp add: valid_mdb'_def updateFreeIndex_def + updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCapFreeIndex_valid_mdb_ctes getCTE_wp' | simp)+ + apply clarsimp + apply (frule(1) CSpace1_R.ctes_of_valid) + apply (clarsimp simp: cte_wp_at_ctes_of del: subsetI) + apply (rule usableUntypedRange_mono2, + auto simp add: isCap_simps valid_cap_simps' capAligned_def) + done + +lemma updateFreeIndex_forward_invs': + "\\s. invs' s \ cte_wp_at' ((\cap. isUntypedCap cap + \ capFreeIndex cap \ idx \ idx \ 2 ^ capBlockSize cap + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits) o cteCap) src s\ + updateFreeIndex src idx + \\r s. invs' s\" + apply (clarsimp simp:invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (simp add: valid_pspace'_def, wp updateFreeIndex_forward_valid_objs' + updateFreeIndex_forward_valid_mdb') + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (wp sch_act_wf_lift valid_queues_lift updateCap_iflive' tcb_in_cur_domain'_lift + | simp add: pred_tcb_at'_def)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply wp+ + apply (wp valid_irq_node_lift) + apply (rule hoare_vcg_conj_lift) + apply (simp add:updateCap_def) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (simp add:updateCap_def) + apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] + | simp add: getSlotCap_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (clarsimp simp: isCap_simps valid_pspace'_def) + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (intro conjI allI impI) + apply (clarsimp simp: modify_map_def cteCaps_of_def ifunsafe'_def3 split:if_splits) + apply (drule_tac x=src in spec) + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (drule_tac x = cref in spec) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (erule untyped_ranges_zero_fun_upd, simp_all) + apply (clarsimp simp: untypedZeroRange_def cteCaps_of_def isCap_simps) + done + +lemma no_fail_getSlotCap: + "no_fail (cte_at' p) (getSlotCap p)" + apply (rule no_fail_pre) + apply (simp add: getSlotCap_def | wp)+ + done + +end +end diff --git a/proof/refine/AARCH64/Corres.thy b/proof/refine/AARCH64/Corres.thy new file mode 100644 index 0000000000..01c1985822 --- /dev/null +++ b/proof/refine/AARCH64/Corres.thy @@ -0,0 +1,15 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Corres +imports StateRelation "CorresK.CorresK_Lemmas" +begin + +text \Instantiating the corres framework to this particular state relation.\ +abbreviation + "corres \ corres_underlying state_relation False True" + +end diff --git a/proof/refine/AARCH64/Detype_R.thy b/proof/refine/AARCH64/Detype_R.thy new file mode 100644 index 0000000000..0a4130fecd --- /dev/null +++ b/proof/refine/AARCH64/Detype_R.thy @@ -0,0 +1,5003 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Detype_R +imports Retype_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \Establishing that the invariants are maintained + when a region of memory is detyped, that is, + removed from the model.\ + +definition + "descendants_range_in' S p \ + \m. \p' \ descendants_of' p m. \c n. m p' = Some (CTE c n) \ capRange c \ S = {}" + +lemma null_filter_simp'[simp]: + "null_filter' (null_filter' x) = null_filter' x" + apply (rule ext) + apply (auto simp:null_filter'_def split:if_splits) + done + +lemma descendants_range_in'_def2: + "descendants_range_in' S p = (\m. \p'\descendants_of' p (null_filter' m). + \c n. (null_filter' m) p' = Some (CTE c n) \ capRange c \ S = {})" + apply (clarsimp simp:descendants_range_in'_def + split:if_splits) + apply (rule ext) + apply (rule subst[OF null_filter_descendants_of']) + apply simp + apply (rule iffI) + apply (clarsimp simp:null_filter'_def)+ + apply (drule(1) bspec) + apply (elim allE impE ballE) + apply (rule ccontr) + apply (clarsimp split:if_splits simp:descendants_of'_def) + apply (erule(1) subtree_not_Null) + apply fastforce + apply simp + done + +definition + "descendants_range' cap p \ + \m. \p' \ descendants_of' p m. \c n. m p' = Some (CTE c n) \ capRange c \ capRange cap = {}" + +lemma descendants_rangeD': + "\ descendants_range' cap p m; m \ p \ p'; m p' = Some (CTE c n) \ + \ capRange c \ capRange cap = {}" + by (simp add: descendants_range'_def descendants_of'_def) + +lemma descendants_range_in_lift': + assumes st: "\P. \\s. Q s \ P ((swp descendants_of') (null_filter' (ctes_of s)))\ + f \\r s. P ((swp descendants_of') (null_filter' (ctes_of s)))\" + assumes cap_range: + "\P p. \\s. Q' s \ cte_wp_at' (\c. P (capRange (cteCap c))) p s\ f \\r s. cte_wp_at' (\c. P (capRange (cteCap c))) p s\" + shows "\\s. Q s \ Q' s \ descendants_range_in' S slot (ctes_of s)\ f \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (clarsimp simp:descendants_range_in'_def2) + apply (subst swp_def[where f = descendants_of', THEN meta_eq_to_obj_eq, + THEN fun_cong, THEN fun_cong, symmetric])+ + apply (simp only: Ball_def[unfolded imp_conv_disj]) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift st cap_range) + apply (rule_tac Q = "\r s. cte_wp_at' (\c. capRange (cteCap c) \ S = {}) x s" + in hoare_strengthen_post) + apply (wp cap_range) + apply (clarsimp simp:cte_wp_at_ctes_of null_filter'_def) + apply clarsimp + apply (drule spec, drule(1) mp) + apply (subst (asm) null_filter_descendants_of') + apply simp + apply (case_tac "(ctes_of s) x") + apply (clarsimp simp:descendants_of'_def null_filter'_def subtree_target_Some) + apply (case_tac a) + apply (clarsimp simp:cte_wp_at_ctes_of null_filter'_def split:if_splits) + done + +lemma descendants_range_inD': + "\descendants_range_in' S p ms; p'\descendants_of' p ms; ms p' = Some cte\ + \ capRange (cteCap cte) \ S = {}" + apply (case_tac cte) + apply (auto simp:descendants_range_in'_def cte_wp_at_ctes_of dest!:bspec) + done +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma descendants_range'_def2: + "descendants_range' cap p = descendants_range_in' (capRange cap) p" + by (simp add: descendants_range_in'_def descendants_range'_def) + + +defs deletionIsSafe_def: + "deletionIsSafe \ \ptr bits s. \p t m r. + (cte_wp_at' (\cte. cteCap cte = capability.ReplyCap t m r) p s \ + t \ mask_range ptr bits) \ + (\ko. ksPSpace s p = Some (KOArch ko) \ p \ mask_range ptr bits \ 6 \ bits)" + +defs ksASIDMapSafe_def: + "ksASIDMapSafe \ \s. True" + +defs cNodePartialOverlap_def: + "cNodePartialOverlap \ \cns inRange. \p n. cns p = Some n + \ (\ is_aligned p (cte_level_bits + n) + \ cte_level_bits + n \ word_bits + \ (\ mask_range p (cte_level_bits + n) \ {p. inRange p} + \ \ mask_range p (cte_level_bits + n) \ {p. \ inRange p}))" + + +(* FIXME: move *) +lemma deleteObjects_def2: + "is_aligned ptr bits \ + deleteObjects ptr bits = do + stateAssert (deletionIsSafe ptr bits) []; + doMachineOp (freeMemory ptr bits); + stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; + modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits + then None else ksPSpace s x, + gsUserPages := \x. if x \ mask_range ptr bits + then None else gsUserPages s x, + gsCNodes := \x. if x \ mask_range ptr bits + then None else gsCNodes s x, + ksArchState := gsPTTypes_update (\_ x. if x \ mask_range ptr bits + then Nothing + else gsPTTypes (ksArchState s) x) + (ksArchState s)\); + stateAssert ksASIDMapSafe [] + od" + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def o_def) + apply (rule bind_eqI, rule ext) + apply (rule bind_eqI, rule ext) + apply (simp add: bind_assoc[symmetric]) + apply (rule bind_cong[rotated], rule refl) + apply (simp add: bind_assoc modify_modify deleteRange_def gets_modify_def) + apply (rule ext, simp add: exec_modify stateAssert_def assert_def bind_assoc exec_get + NOT_eq[symmetric] neg_mask_in_mask_range) + apply (clarsimp simp: simpler_modify_def) + apply (simp add: data_map_filterWithKey_def split: if_split_asm) + apply (rule arg_cong2[where f=ksArchState_update]) + apply (rule ext) + apply clarsimp + apply (rename_tac s, case_tac s, clarsimp) + apply (rename_tac ksArch ksMachine, case_tac ksArch, clarsimp) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong2[where f=gsCNodes_update]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong2[where f=gsUserPages_update]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext) + apply (rule arg_cong[where f="\f. ksPSpace_update f s" for s]) + apply (simp add: NOT_eq[symmetric] mask_in_range ext split: option.split) + done + +lemma deleteObjects_def3: + "deleteObjects ptr bits = + do + assert (is_aligned ptr bits); + stateAssert (deletionIsSafe ptr bits) []; + doMachineOp (freeMemory ptr bits); + stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) (\x. x \ mask_range ptr bits)) []; + modify (\s. s \ ksPSpace := \x. if x \ mask_range ptr bits + then None else ksPSpace s x, + gsUserPages := \x. if x \ mask_range ptr bits + then None else gsUserPages s x, + gsCNodes := \x. if x \ mask_range ptr bits + then None else gsCNodes s x, + ksArchState := gsPTTypes_update (\_ x. if x \ mask_range ptr bits + then Nothing + else gsPTTypes (ksArchState s) x) + (ksArchState s) \); + stateAssert ksASIDMapSafe [] + od" + apply (cases "is_aligned ptr bits") + apply (simp add: deleteObjects_def2) + apply (simp add: deleteObjects_def is_aligned_mask + unless_def alignError_def) + done + +lemma obj_relation_cuts_in_obj_range: + "\ (y, P) \ obj_relation_cuts ko x; x \ obj_range x ko; + kheap s x = Some ko; valid_objs s; pspace_aligned s \ \ y \ obj_range x ko" + apply (cases ko; simp) + apply (clarsimp split: if_split_asm) + apply (subgoal_tac "cte_at (x, ya) s") + apply (drule(2) cte_at_cte_map_in_obj_bits) + apply (simp add: obj_range_def) + apply (fastforce intro: cte_wp_at_cteI) + apply (frule(1) pspace_alignedD) + apply (frule valid_obj_sizes, erule ranI) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp) + apply (clarsimp simp only: obj_range_def atLeastAtMost_iff + obj_bits.simps arch_kobj_size.simps) + apply (rule context_conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: table_size_def) + apply (rule shiftl_less_t2n) + apply (erule order_le_less_trans) + apply (simp add: bit_simps mask_def) + apply (simp add: bit_simps) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right) + apply (subst word_less_sub_le, simp add: bit_simps) + apply (rule shiftl_less_t2n) + apply (erule order_le_less_trans) + apply (simp add: bit_simps mask_def) + apply (simp add: bit_simps) + apply (simp add: field_simps) + apply (clarsimp simp only: obj_range_def atLeastAtMost_iff) + apply (rule conjI) + apply (erule is_aligned_no_wrap') + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2) + apply (rule pbfs_atleast_pageBits) + using pbfs_less_wb' + apply (simp add: word_bits_def) + apply (subst add_diff_eq[symmetric]) + apply (rule word_plus_mono_right; simp add: add_diff_eq) + apply (simp add: shiftl_t2n mult_ac) + apply (rule word_less_power_trans2; (simp add: pbfs_atleast_pageBits)?) + using pbfs_less_wb' + apply (simp add: word_bits_def) + done + +lemma obj_relation_cuts_eqv_base_in_detype_range: + "\ (y, P) \ obj_relation_cuts ko x; kheap s x = Some ko; + valid_objs s; pspace_aligned s; + valid_untyped (cap.UntypedCap d base bits idx) s \ + \ (x \ mask_range base bits) = (y \ mask_range base bits)" + apply (simp add: valid_untyped_def mask_def add_diff_eq del: atLeastAtMost_iff) + apply (subgoal_tac "x \ obj_range x ko") + apply (subgoal_tac "y \ obj_range x ko") + apply blast + apply (erule(4) obj_relation_cuts_in_obj_range) + apply (simp add: obj_range_def) + apply (rule is_aligned_no_overflow) + apply (erule(1) pspace_alignedD) + done + +lemma detype_pspace_relation: + assumes psp: "pspace_relation (kheap s) (ksPSpace s')" + and bwb: "bits < word_bits" + and al: "is_aligned base bits" + and vs: "valid_pspace s" + and vu: "valid_untyped (cap.UntypedCap d base bits idx) s" + shows "pspace_relation (kheap (detype (mask_range base bits) s)) + (\x. if x \ mask_range base bits then None else ksPSpace s' x)" + (is "pspace_relation ?ps ?ps'") +proof - + let ?range = "mask_range base bits" + let ?ps'' = "(kheap s |` (-?range))" + + have pa: "pspace_aligned s" and vo: "valid_objs s" + using vs by (simp add: valid_pspace_def)+ + + have pspace: + "\x. \ x \ ?range; x \ dom (kheap s) \ \ ?ps x = kheap s x" + by (clarsimp simp add: detype_def field_simps) + + have pspace'': + "\x. \ x \ ?range; x \ dom (kheap s) \ \ ?ps'' x = kheap s x" + by (clarsimp simp add: detype_def) + + have psdom_pre: "dom ?ps = (dom (kheap s) - ?range)" + by (fastforce simp:field_simps) + + show ?thesis + unfolding pspace_relation_def + proof (intro conjI) + + have domeq': "dom (ksPSpace s') = pspace_dom (kheap s)" + using psp by (simp add: pspace_relation_def) + + note eqv_base_in = obj_relation_cuts_eqv_base_in_detype_range + [OF _ _ vo pa vu] + + note atLeastAtMost_iff[simp del] + show domeq: "pspace_dom ?ps = dom ?ps'" + apply (simp add: dom_if_None domeq') + apply (simp add: pspace_dom_def detype_def dom_if_None) + apply (intro set_eqI iffI, simp_all) + apply (clarsimp simp: eqv_base_in field_simps) + apply (rule rev_bexI, erule domI) + apply (simp add: image_def, erule rev_bexI, simp) + apply (elim exE bexE DiffE conjE domE) + apply (rule bexI, assumption) + apply (clarsimp simp add: eqv_base_in field_simps) + done + + show "\x\dom ?ps. + \(y, P)\obj_relation_cuts (the (?ps x)) x. + P (the (?ps x)) + (the (if y \ ?range then None else ksPSpace s' y))" + using psp + apply (simp add: pspace_relation_def psdom_pre split del: if_split) + apply (erule conjE, rule ballI, erule DiffE, drule(1) bspec) + apply (erule domE) + apply (simp add: field_simps detype_def cong: conj_cong) + apply (erule ballEI, clarsimp) + apply (simp add: eqv_base_in) + done + qed +qed + +declare plus_Collect_helper2[simp] + +lemma cte_map_obj_range_helper: + "\ cte_at cref s; pspace_aligned s; valid_objs s \ + \ \ko. kheap s (fst cref) = Some ko \ cte_map cref \ obj_range (fst cref) ko" + apply (drule(2) cte_at_cte_map_in_obj_bits) + apply (clarsimp simp: obj_range_def) + done + +lemma cte_map_untyped_range: + "\ s \ cap; cte_at cref s; pspace_aligned s; valid_objs s \ + \ (cte_map cref \ untyped_range cap) = (fst cref \ untyped_range cap)" + apply (cases cap, simp_all) + apply (drule(2) cte_map_obj_range_helper) + apply (clarsimp simp: valid_cap_def valid_untyped_def) + apply (elim allE, drule(1) mp) + apply (rule iffI) + apply (erule impE) + apply (rule notemptyI[where x="cte_map cref"]) + apply simp + apply clarsimp + apply (drule subsetD [OF _ p_in_obj_range]) + apply simp+ + apply (erule impE) + apply (rule notemptyI[where x="fst cref"]) + apply (simp add: p_in_obj_range) + apply clarsimp + apply (drule(1) subsetD) + apply simp + done + +lemma pspace_aligned'_cut: + "pspace_aligned' s \ + pspace_aligned' (s \ ksPSpace := \x. if P x then None else ksPSpace s x\)" + by (simp add: pspace_aligned'_def dom_if_None) + +lemma pspace_distinct'_cut: + "pspace_distinct' s \ + pspace_distinct' (s \ ksPSpace := \x. if P x then None else ksPSpace s x\)" + by (simp add: pspace_distinct'_def dom_if_None ps_clear_def Diff_Int_distrib) + +lemma ko_wp_at_delete': + "pspace_distinct' s \ + ko_wp_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ ko_wp_at' P p s)" + apply (simp add: ko_wp_at'_def ps_clear_def dom_if_None) + apply (intro impI iffI) + apply clarsimp + apply (drule(1) pspace_distinctD') + apply (simp add: ps_clear_def) + apply (clarsimp simp: Diff_Int_distrib) + done + +lemma obj_at_delete': + "pspace_distinct' s \ + obj_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ obj_at' P p s)" + unfolding obj_at'_real_def + by (rule ko_wp_at_delete') + +lemma cte_wp_at_delete': + "\ s \' UntypedCap d base magnitude idx; pspace_distinct' s \ \ + cte_wp_at' P p (s \ ksPSpace := \x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x \) + = (\ (base \ p \ p \ base + mask magnitude) \ cte_wp_at' P p s)" + apply (simp add: cte_wp_at_obj_cases' obj_at_delete') + apply (subgoal_tac "\Q n. obj_at' Q (p - n) s \ tcb_cte_cases n \ None \ + ((p - n) \ mask_range base magnitude) + = (p \ mask_range base magnitude)") + apply auto[1] + apply (clarsimp simp: obj_at'_real_def valid_cap'_def + valid_untyped'_def + simp del: atLeastAtMost_iff) + apply (drule_tac x="p - n" in spec) + apply (clarsimp simp: ko_wp_at'_def capAligned_def + simp del: atLeastAtMost_iff) + apply (thin_tac "is_aligned x minUntypedSizeBits" for x) + apply (drule(1) aligned_ranges_subset_or_disjoint) + apply (subgoal_tac "{p, p - n} \ obj_range' (p - n) (KOTCB obj)") + apply (clarsimp simp del: atLeastAtMost_iff + simp: field_simps objBits_simps obj_range'_def mask_def) + apply fastforce + apply (simp add: obj_range'_def neg_mask_in_mask_range[symmetric] + del: atLeastAtMost_iff) + apply (simp add: objBits_simps) + apply (frule(1) tcb_cte_cases_aligned_helpers) + apply simp + done + +lemma map_to_ctes_delete: + assumes vc: "s \' UntypedCap d base magnitude idx" + and vs: "pspace_distinct' s" + shows + "map_to_ctes (\x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x) + = (\x. if base \ x \ x \ base + mask magnitude then None else ctes_of s x)" + using cte_wp_at_delete' [where P="(=) cte" for cte, OF vc vs] + arg_cong [where f=Not, OF cte_wp_at_delete' [OF vc vs, where P="\"]] + apply (simp (no_asm_use) add: cte_wp_at_ctes_of) + apply (rule ext) + apply (case_tac "map_to_ctes (\x. if base \ x \ x \ base + mask magnitude then None else ksPSpace s x) x") + apply (fastforce split: if_split_asm) + apply simp + done + +lemma word_range_card: + "base \base + h \ card {base..base + (h::machine_word)} = (unat h) + 1" +proof (induct h rule: word_induct2) + case zero show ?case by simp +next + case (suc h) + have interval_plus_one_word32: + "\base ceil. \base \ ceil + 1;ceil \ ceil + 1\ \ + {base..ceil + 1} = {base .. ceil } \ {ceil + (1::machine_word)}" + by (auto intro:order_antisym simp:not_le inc_le) + show ?case + using suc plus_one_helper2[where n = h and x = h,simplified] + apply (subst add.commute[where a = 1]) + apply (subst add.assoc[symmetric]) + apply (subst interval_plus_one_word32) + apply (simp add: field_simps) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (simp add: field_simps) + apply (simp add: field_simps) + apply (subst card_Un_disjoint; simp) + apply (clarsimp simp: field_simps) + apply (subst suc) + apply (erule word_plus_mono_right2) + apply (simp add: field_simps) + apply simp + apply (simp add: unatSuc) + done +qed + +end +locale detype_locale' = detype_locale + constrains s::"det_state" + +lemma (in detype_locale') deletionIsSafe: + assumes sr: "(s, s') \ state_relation" + and cap: "cap = cap.UntypedCap d base magnitude idx" + and vs: "valid_pspace s" + and al: "is_aligned base magnitude" + and vu: "valid_untyped (cap.UntypedCap d base magnitude idx) s" + shows "deletionIsSafe base magnitude s'" +proof - + interpret Arch . (* FIXME: arch_split *) + note [simp del] = atLeastatMost_subset_iff atLeastLessThan_iff atLeastAtMost_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + have "\t m r. \ptr. cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s + \ t \ mask_range base magnitude" + by (fastforce dest!: valid_cap2 simp: cap obj_reply_refs_def mask_def add_diff_eq) + hence "\ptr t m r. cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s + \ t \ mask_range base magnitude" + by (fastforce simp del: split_paired_All) + hence "\t. t \ mask_range base magnitude \ + (\ptr m r. \ cte_wp_at ((=) (cap.ReplyCap t m r)) ptr s)" + by fastforce + hence cte: "\t. t \ mask_range base magnitude \ + (\ptr m r. \ cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) ptr s')" + unfolding deletionIsSafe_def + apply - + apply (erule allEI) + apply (rule impI, drule(1) mp) + apply (thin_tac "t \ S" for S) + apply (intro allI) + apply (clarsimp simp: cte_wp_at_neg2 cte_wp_at_ctes_of + simp del: split_paired_All) + apply (frule pspace_relation_cte_wp_atI [rotated]) + apply (rule invs_valid_objs [OF invs]) + apply (rule state_relation_pspace_relation [OF sr]) + apply (clarsimp simp: cte_wp_at_neg2 simp del: split_paired_All) + apply (drule_tac x="(a,b)" in spec) + apply (clarsimp simp: cte_wp_cte_at cte_wp_at_caps_of_state) + apply (case_tac c, simp_all) + apply fastforce + done + + have arch: + "\ ko p. \ ksPSpace s' p = Some (KOArch ko); p \ mask_range base magnitude \ \ 6 \ magnitude" + using sr vs vu + apply (clarsimp simp: state_relation_def) + apply (erule(1) pspace_dom_relatedE) + apply (frule obj_relation_cuts_eqv_base_in_detype_range[symmetric]) + apply simp + apply (clarsimp simp:valid_pspace_def)+ + apply simp + apply (clarsimp simp:valid_untyped_def add_mask_fold cong: if_cong) + apply (drule spec)+ + apply (erule(1) impE) + apply (erule impE) + apply (drule p_in_obj_range) + apply (clarsimp)+ + apply blast + apply clarsimp + apply (drule card_mono[rotated]) + apply fastforce + apply (clarsimp simp:valid_pspace_def obj_range_def p_assoc_help) + apply (subst (asm) word_range_card) + apply (rule is_aligned_no_overflow') + apply (erule(1) pspace_alignedD) + apply (subst (asm) word_range_card) + apply (rule is_aligned_no_overflow_mask[OF al]) + apply (rule ccontr) + apply (simp add:not_le) + apply (subgoal_tac "obj_bits koa < word_bits") + prefer 2 + apply (case_tac koa; simp add:objBits_simps word_bits_def) + apply (drule(1) valid_cs_size_objsI) + apply (clarsimp simp:valid_cs_size_def word_bits_def cte_level_bits_def) + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add:bit_simps word_bits_def) + apply (simp add:pageBitsForSize_def bit_simps split:vmpage_size.splits) + apply (subgoal_tac "6 \ obj_bits koa") + apply (simp add: unat_mask_word64 mask_2pm1[symmetric] le_diff_iff) + apply (case_tac koa, simp_all add: other_obj_relation_def + objBits_simps cte_relation_def + split: if_splits) + apply (rename_tac ako, + case_tac ako; + simp add: arch_kobj_size_def bit_simps pageBitsForSize_def + split: vmpage_size.splits) + apply (rename_tac ako, + case_tac ako; + simp add: arch_kobj_size_def bit_simps pageBitsForSize_def + split: vmpage_size.splits) + done + thus ?thesis using cte by (auto simp: deletionIsSafe_def) +qed + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* FIXME: generalizes lemma SubMonadLib.corres_submonad *) +(* FIXME: generalizes lemma SubMonad_R.corres_machine_op *) +(* FIXME: move *) +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r P Q x x'" + shows "corres r (P \ machine_state) (Q \ ksMachineState) + (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad3 + [OF submonad_do_machine_op submonad_doMachineOp _ _ _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma ekheap_relation_detype: + "ekheap_relation ekh kh \ + ekheap_relation (\x. if P x then None else (ekh x)) (\x. if P x then None else (kh x))" + by (fastforce simp add: ekheap_relation_def split: if_split_asm) + +lemma cap_table_at_gsCNodes_eq: + "(s, s') \ state_relation + \ (gsCNodes s' ptr = Some bits) = cap_table_at bits ptr s" + apply (clarsimp simp: state_relation_def ghost_relation_def + obj_at_def is_cap_table) + apply (drule_tac x = ptr in spec)+ + apply (drule_tac x = bits in spec)+ + apply fastforce + done + +lemma cNodeNoPartialOverlap: + "corres dc (\s. \cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodePartialOverlap (gsCNodes s) + (\x. base \ x \ x \ base + mask magnitude)) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodePartialOverlap_def) + apply (drule(1) cte_wp_valid_cap) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table) + apply (frule(1) pspace_alignedD) + apply (simp add: add_mask_fold) + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow_mask add_mask_fold) + apply (blast intro: order_trans) + apply (simp add: is_aligned_no_overflow_mask power_overflow word_bits_def) + apply wp+ + done + +declare wrap_ext_det_ext_ext_def[simp] + +lemma deleteObjects_corres: + "is_aligned base magnitude \ magnitude \ 3 \ + corres dc + (\s. einvs s + \ s \ (cap.UntypedCap d base magnitude idx) + \ (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s + \ descendants_range (cap.UntypedCap d base magnitude idx) cref s) + \ untyped_children_in_mdb s \ if_unsafe_then_cap s + \ valid_mdb s \ valid_global_refs s \ ct_active s) + (\s. s \' (UntypedCap d base magnitude idx) + \ valid_pspace' s) + (delete_objects base magnitude) (deleteObjects base magnitude)" + apply (simp add: deleteObjects_def2) + apply (rule corres_stateAssert_implied[where P'=\, simplified]) + prefer 2 + apply clarsimp + apply (rule_tac cap="cap.UntypedCap d base magnitude idx" and ptr="(a,b)" and + s=s in detype_locale'.deletionIsSafe, + simp_all add: detype_locale'_def + detype_locale_def p_assoc_help invs_valid_pspace)[1] + apply (simp add:valid_cap_simps) + apply (simp add: bind_assoc[symmetric] ksASIDMapSafe_def) + apply (simp add: delete_objects_def) + apply (rule_tac Q="\_ s. valid_objs s \ valid_list s \ + (\cref. cte_wp_at ((=) (cap.UntypedCap d base magnitude idx)) cref s \ + descendants_range (cap.UntypedCap d base magnitude idx) cref s ) \ + s \ cap.UntypedCap d base magnitude idx \ pspace_aligned s \ + valid_mdb s \ pspace_distinct s \ if_live_then_nonz_cap s \ + zombies_final s \ sym_refs (state_refs_of s) \ sym_refs (state_hyp_refs_of s) \ + untyped_children_in_mdb s \ if_unsafe_then_cap s \ + valid_global_refs s" and + Q'="\_ s. s \' capability.UntypedCap d base magnitude idx \ + valid_pspace' s" in corres_underlying_split) + apply (rule corres_bind_return) + apply (rule corres_guard_imp[where r=dc]) + apply (rule corres_split[OF _ cNodeNoPartialOverlap]) + apply (rule corres_machine_op[OF corres_Id], simp+) + apply (rule no_fail_freeMemory, simp+) + apply (wp hoare_vcg_ex_lift)+ + apply auto[1] + apply (auto elim: is_aligned_weaken)[1] + apply (rule corres_modify) + apply (simp add: valid_pspace'_def) + apply (rule state_relation_null_filterE, assumption, + simp_all add: pspace_aligned'_cut pspace_distinct'_cut)[1] + apply (simp add: detype_def, rule state.equality; simp add: detype_ext_def) + apply (intro exI, fastforce) + apply (rule ext, clarsimp simp add: null_filter_def) + apply (rule sym, rule ccontr, clarsimp) + apply (drule(4) cte_map_not_null_outside') + apply (fastforce simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule ext, clarsimp simp add: null_filter'_def + map_to_ctes_delete[simplified field_simps]) + apply (rule sym, rule ccontr, clarsimp) + apply (frule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE) + apply (frule(4) cte_map_not_null_outside') + apply (rule cte_wp_at_weakenE, erule conjunct1) + apply (case_tac y, clarsimp) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply clarsimp + apply (frule_tac cref="(aa, ba)" in cte_map_untyped_range, + erule cte_wp_at_weakenE[OF _ TrueI], assumption+) + apply (simp add: add_mask_fold) + apply (simp add: add_mask_fold) + apply (rule detype_pspace_relation[simplified], + simp_all add: state_relation_pspace_relation valid_pspace_def)[1] + apply (simp add: valid_cap'_def capAligned_def) + apply (clarsimp simp: valid_cap_def, assumption) + apply (fastforce simp add: detype_def detype_ext_def add_mask_fold intro!: ekheap_relation_detype) + apply (clarsimp simp: state_relation_def ghost_relation_of_heap + detype_def) + apply (drule_tac t="gsUserPages s'" in sym) + apply (drule_tac t="gsCNodes s'" in sym) + apply (drule_tac t="gsPTTypes (ksArchState s')" in sym) + apply (auto simp add: ups_of_heap_def cns_of_heap_def ext pt_types_of_heap_def add_mask_fold opt_map_def + split: option.splits kernel_object.splits)[1] + apply (simp add: valid_mdb_def) + apply (wp hoare_vcg_ex_lift hoare_vcg_ball_lift | wps | + simp add: invs_def valid_state_def valid_pspace_def + descendants_range_def | wp (once) hoare_drop_imps)+ + done + + +text \Invariant preservation across concrete deletion\ + +lemma caps_containedD': + "\ ctes_of s p = Some cte; ctes_of s p' = Some cte'; + \ isUntypedCap (cteCap cte); capRange (cteCap cte) \ untypedRange (cteCap cte') \ {}; + caps_contained' (ctes_of s) \ \ + capRange (cteCap cte) \ untypedRange (cteCap cte')" + apply (cases cte, cases cte') + apply (simp add: caps_contained'_def) + apply blast + done + +lemma untyped_mdbD': + "\ ctes p = Some cte; ctes p' = Some cte'; + isUntypedCap (cteCap cte); capRange (cteCap cte') \ untypedRange (cteCap cte) \ {}; + \ isUntypedCap (cteCap cte'); + untyped_mdb' ctes \ \ p' \ descendants_of' p ctes" + by (cases cte, cases cte', simp add: untyped_mdb'_def) + +lemma ko_wp_at_state_refs_ofD: + "\ ko_wp_at' P p s \ \ (\ko. P ko \ state_refs_of' s p = refs_of' ko)" + by (fastforce simp: ko_wp_at'_def state_refs_of'_def) + +lemma sym_refs_ko_wp_atD: + "\ ko_wp_at' P p s; sym_refs (state_refs_of' s) \ + \ (\ko. P ko \ state_refs_of' s p = refs_of' ko + \ (\(x, tp) \ refs_of' ko. (p, symreftype tp) \ state_refs_of' s x))" + apply (clarsimp dest!: ko_wp_at_state_refs_ofD) + apply (rule exI, erule conjI) + apply (drule sym) + apply clarsimp + apply (erule(1) sym_refsD) + done + +lemma ko_wp_at_state_hyp_refs_ofD: + "\ ko_wp_at' P p s \ \ (\ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' ko)" + by (fastforce simp: ko_wp_at'_def state_hyp_refs_of'_def) + +lemma sym_hyp_refs_ko_wp_atD: + "\ ko_wp_at' P p s; sym_refs (state_hyp_refs_of' s) \ + \ (\ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' ko + \ (\(x, tp) \ hyp_refs_of' ko. (p, symreftype tp) \ state_hyp_refs_of' s x))" + apply (clarsimp dest!: ko_wp_at_state_hyp_refs_ofD) + apply (rule exI, erule conjI) + apply (drule sym) + apply clarsimp + apply (erule(1) sym_refsD) + done + +lemma zobj_refs_capRange: + "capAligned c \ zobj_refs' c \ capRange c" + apply (cases c; simp add: capAligned_def capRange_def is_aligned_no_overflow) + apply (rename_tac ac) + apply (case_tac ac; simp) + apply clarsimp + apply (drule is_aligned_no_overflow) + apply simp + done + +end + +locale delete_locale = + fixes s and base and bits and ptr and idx and d + assumes cap: "cte_wp_at' (\cte. cteCap cte = UntypedCap d base bits idx) ptr s" + and nodesc: "descendants_range' (UntypedCap d base bits idx) ptr (ctes_of s)" + and invs: "invs' s" + and ct_act: "ct_active' s" + and sa_simp: "sch_act_simple s" + and bwb: "bits < word_bits" + and al: "is_aligned base bits" + and safe: "deletionIsSafe base bits s" + +context delete_locale begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_objs: "valid_objs' s" + and pa: "pspace_aligned' s" + and pd: "pspace_distinct' s" + and vq: "valid_queues s" + and vq': "valid_queues' s" + and sym_refs: "sym_refs (state_refs_of' s)" + and sym_hyp_refs: "sym_refs (state_hyp_refs_of' s)" + and iflive: "if_live_then_nonz_cap' s" + and ifunsafe: "if_unsafe_then_cap' s" + and dlist: "valid_dlist (ctes_of s)" + and no_0: "no_0 (ctes_of s)" + and chain_0: "mdb_chain_0 (ctes_of s)" + and badges: "valid_badges (ctes_of s)" + and contained: "caps_contained' (ctes_of s)" + and chunked: "mdb_chunked (ctes_of s)" + and umdb: "untyped_mdb' (ctes_of s)" + and uinc: "untyped_inc' (ctes_of s)" + and nullcaps: "valid_nullcaps (ctes_of s)" + and ut_rev: "ut_revocable' (ctes_of s)" + and dist_z: "distinct_zombies (ctes_of s)" + and irq_ctrl: "irq_control (ctes_of s)" + and clinks: "class_links (ctes_of s)" + and rep_r_fb: "reply_masters_rvk_fb (ctes_of s)" + and idle: "valid_idle' s" + and refs: "valid_global_refs' s" + and arch: "valid_arch_state' s" + and virq: "valid_irq_node' (irq_node' s) s" + and virqh: "valid_irq_handlers' s" + and virqs: "valid_irq_states' s" + and no_0_objs: "no_0_obj' s" + and ctnotinQ: "ct_not_inQ s" + and irqs_masked: "irqs_masked' s" + and ctcd: "ct_idle_or_in_cur_domain' s" + and cdm: "ksCurDomain s \ maxDomain" + and vds: "valid_dom_schedule' s" + using invs + by (auto simp add: invs'_def valid_state'_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def) + +abbreviation + "base_bits \ mask_range base bits" + +abbreviation + "state' \ (s \ ksPSpace := \x. if base \ x \ x \ base + mask bits then None else ksPSpace s x \)" + +lemma ko_wp_at'[simp]: + "\P p. (ko_wp_at' P p state') = (ko_wp_at' P p s \ p \ base_bits)" + by (fastforce simp add: ko_wp_at_delete'[OF pd]) + +lemma obj_at'[simp]: + "\P p. (obj_at' P p state') = (obj_at' P p s \ p \ base_bits)" + by (fastforce simp add: obj_at'_real_def) + +lemma typ_at'[simp]: + "typ_at' P p state' = (typ_at' P p s \ p \ base_bits)" + by (simp add: typ_at'_def) + +lemma valid_untyped[simp]: + "s \' UntypedCap d base bits idx" + using cte_wp_at_valid_objs_valid_cap' [OF cap valid_objs] + by clarsimp + +lemma cte_wp_at'[simp]: + "\P p. (cte_wp_at' P p state') = (cte_wp_at' P p s \ p \ base_bits)" + by (fastforce simp:cte_wp_at_delete'[where idx = idx,OF valid_untyped pd ]) + +(* the bits of caps they need for validity argument are within their capRanges *) +lemma valid_cap_ctes_pre: + "\c. s \' c \ case c of CNodeCap ref bits g gs \ + \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c + | Zombie ref (ZombieCNode bits) n \ + \x. ref + (x && mask bits) * 2^cteSizeBits \ capRange c + | ArchObjectCap (PageTableCap ref pt_t data) \ + \x. x \ mask (ptTranslationBits pt_t) \ ref + (x << pte_bits) \ capRange c + | ArchObjectCap (FrameCap ref r sz d m) \ + \p<2 ^ (pageBitsForSize sz - pageBits). ref + (p << pageBits) \ capRange c + | _ \ True" + apply (drule valid_capAligned) + apply (simp split: capability.split zombie_type.split arch_capability.split, safe) + using pre_helper[where a=cteSizeBits] + apply (clarsimp simp add: capRange_def capAligned_def objBits_simps field_simps) + apply (clarsimp simp add: capRange_def capAligned_def shiftl_t2n) + apply (frule pre_helper2[where bits=pageBits]; simp add: pbfs_atleast_pageBits mult_ac) + using pbfs_less_wb' apply (simp add: word_bits_conv) + apply (clarsimp simp add: capRange_def capAligned_def shiftl_t2n + simp del: atLeastAtMost_iff capBits.simps) + apply (simp del: atLeastAtMost_iff) + apply (drule_tac bits="pte_bits" and x="ucast x" in pre_helper2; simp add: mult_ac) + apply (simp add: bit_simps) + apply (simp add: table_size_def) + apply (erule order_le_less_trans) + apply (simp add: mask_def bit_simps) + apply (clarsimp simp add: capRange_def capAligned_def + simp del: atLeastAtMost_iff capBits.simps) + using pre_helper[where a=cteSizeBits] + apply (clarsimp simp add: capRange_def capAligned_def objBits_simps field_simps) + done + +lemma replycap_argument: + "\p t m r. cte_wp_at' (\cte. cteCap cte = ReplyCap t m r) p s + \ t \ mask_range base bits" + using safe + by (force simp: deletionIsSafe_def cte_wp_at_ctes_of) + +lemma valid_cap': + "\p c. \ s \' c; cte_wp_at' (\cte. cteCap cte = c) p s; + capRange c \ mask_range base bits = {} \ \ state' \' c" + apply (subgoal_tac "capClass c = PhysicalClass \ capUntypedPtr c \ capRange c") + apply (subgoal_tac "capClass c = PhysicalClass \ + capUntypedPtr c \ mask_range base bits") + apply (frule valid_cap_ctes_pre) + apply (case_tac c, simp_all add: valid_cap'_def replycap_argument + del: atLeastAtMost_iff + split: zombie_type.split_asm) + apply (simp add: field_simps del: atLeastAtMost_iff) + apply blast + defer + apply (simp add: valid_untyped'_def) + apply (simp add: field_simps bit_simps word_size_def del: atLeastAtMost_iff) + apply blast + apply blast + apply (clarsimp simp: capAligned_capUntypedPtr) + apply (rename_tac arch_cap) + apply (case_tac arch_cap; simp del: atLeastAtMost_iff add: frame_at'_def page_table_at'_def) + apply blast + apply blast + done + +lemma objRefs_notrange: + assumes asms: "ctes_of s p = Some c" "\ isUntypedCap (cteCap c)" + shows "capRange (cteCap c) \ base_bits = {}" +proof - + from cap obtain node + where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + + show ?thesis using asms cap + apply - + apply (rule ccontr) + apply (drule untyped_mdbD' [OF ctes_of _ _ _ _ umdb]) + apply (simp add: isUntypedCap_def) + apply (simp add: add_mask_fold) + apply assumption + using nodesc + apply (simp add:descendants_range'_def2) + apply (drule(1) descendants_range_inD') + apply (simp add:asms) + apply (simp add: add_mask_fold) + done +qed + +lemma ctes_of_valid [elim!]: + "ctes_of s p = Some cte \ s \' cteCap cte" + by (case_tac cte, simp add: ctes_of_valid_cap' [OF _ valid_objs]) + +lemma valid_cap2: + "\ cte_wp_at' (\cte. cteCap cte = c) p s \ \ state' \' c" + apply (case_tac "isUntypedCap c") + apply (drule cte_wp_at_valid_objs_valid_cap' [OF _ valid_objs]) + apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def) + apply (rule valid_cap'[rotated], assumption) + apply (clarsimp simp: cte_wp_at_ctes_of dest!: objRefs_notrange) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma ex_nonz_cap_notRange: + "ex_nonz_cap_to' p s \ p \ base_bits" + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (case_tac "isUntypedCap (cteCap cte)") + apply (clarsimp simp: isCap_simps) + apply (drule subsetD[OF zobj_refs_capRange, rotated]) + apply (rule valid_capAligned, erule ctes_of_valid) + apply (drule(1) objRefs_notrange) + apply (drule_tac a=p in equals0D) + apply simp + done + +lemma live_notRange: + "\ ko_wp_at' P p s; \ko. P ko \ live' ko \ \ p \ base_bits" + apply (drule if_live_then_nonz_capE' [OF iflive ko_wp_at'_weakenE]) + apply simp + apply (erule ex_nonz_cap_notRange) + done + +lemma refs_notRange: + "(x, tp) \ state_refs_of' s y \ y \ base_bits" + apply (drule state_refs_of'_elemD) + apply (erule live_notRange) + apply (rule refs_of_live') + apply clarsimp + done + +lemma hyp_refs_notRange: + "(x, tp) \ state_hyp_refs_of' s y \ y \ base_bits" + apply (drule state_hyp_refs_of'_elemD) + apply (erule live_notRange) + apply (rule hyp_refs_of_live') + apply clarsimp + done + +lemma sym_refs_VCPU_hyp_live': + "\ko_wp_at' ((=) (KOArch (KOVCPU v))) p s; sym_refs (state_hyp_refs_of' s); vcpuTCBPtr v = Some t\ + \ ko_wp_at' (\ko. koTypeOf ko = TCBT \ hyp_live' ko) t s" + apply (drule (1) sym_hyp_refs_ko_wp_atD) + apply (clarsimp) + apply (drule state_hyp_refs_of'_elemD) + apply (simp add: ko_wp_at'_def) + apply (clarsimp simp: hyp_refs_of_rev' hyp_live'_def) + done + +lemma sym_refs_TCB_hyp_live': + "\ko_wp_at' ((=) (KOTCB t)) p s; sym_refs (state_hyp_refs_of' s); atcbVCPUPtr (tcbArch t) = Some v\ + \ ko_wp_at' (\ko. koTypeOf ko = ArchT VCPUT \ hyp_live' ko) v s" + apply (drule (1) sym_hyp_refs_ko_wp_atD) + apply (clarsimp) + apply (drule state_hyp_refs_of'_elemD) + apply (simp add: ko_wp_at'_def) + apply (clarsimp simp: hyp_refs_of_rev' hyp_live'_def arch_live'_def) + done + +lemma valid_obj': + "\ valid_obj' obj s; ko_wp_at' ((=) obj) p s \ \ valid_obj' obj state'" + apply (case_tac obj, simp_all add: valid_obj'_def) + apply (rename_tac endpoint) + apply (case_tac endpoint, simp_all add: valid_ep'_def)[1] + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (rename_tac notification) + apply (case_tac notification, simp_all add: valid_ntfn'_def valid_bound_tcb'_def)[1] + apply (rename_tac ntfn bound) + apply (case_tac ntfn, simp_all split:option.splits)[1] + apply ((clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs] refs_notRange)+)[4] + apply (drule(1) bspec)+ + apply (clarsimp dest!: refs_notRange) + apply (clarsimp dest!: sym_refs_ko_wp_atD [OF _ sym_refs] refs_notRange) + apply (frule sym_refs_ko_wp_atD [OF _ sym_refs]) + apply (clarsimp simp: valid_tcb'_def ko_wp_at'_def + objBits_simps) + apply (rule conjI) + apply (erule ballEI, clarsimp elim!: ranE) + apply (rule_tac p="p + x" in valid_cap2) + apply (erule(2) cte_wp_at_tcbI') + apply fastforce + apply simp + apply (rename_tac tcb) + apply (simp only: conj_assoc[symmetric], rule conjI) + apply (case_tac "tcbState tcb"; + clarsimp simp: valid_tcb_state'_def valid_bound_ntfn'_def + dest!: refs_notRange split: option.splits) + using sym_hyp_refs + apply (clarsimp simp add: valid_arch_tcb'_def split: option.split_asm) + apply (drule (1) sym_refs_TCB_hyp_live'[rotated]) + apply (clarsimp simp: ko_wp_at'_def objBits_simps; (rule conjI|assumption)+) + apply (drule live_notRange, clarsimp simp: live'_def) + apply (case_tac ko; simp) + apply clarsimp + apply (clarsimp simp: valid_cte'_def) + apply (rule_tac p=p in valid_cap2) + apply (clarsimp simp: ko_wp_at'_def objBits_simps' cte_level_bits_def[symmetric]) + apply (erule(2) cte_wp_at_cteI') + apply simp + done + +lemma st_tcb: + "\P p. \ st_tcb_at' P p s; \ P Inactive; \ P IdleThreadState \ \ st_tcb_at' P p state'" + by (fastforce simp: pred_tcb_at'_def obj_at'_real_def live'_def hyp_live'_def + dest: live_notRange) + +lemma irq_nodes_global: + "\irq :: irq. irq_node' s + (ucast irq << cteSizeBits) \ global_refs' s" + by (simp add: global_refs'_def) + +lemma global_refs: + "global_refs' s \ base_bits = {}" + using cap + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule valid_global_refsD' [OF _ refs]) + apply (fastforce simp add: field_simps mask_def) + done + +lemma global_refs2: + "global_refs' s \ (- base_bits)" + using global_refs by blast + +lemma irq_nodes_range: + "\irq :: irq. irq_node' s + (ucast irq << cteSizeBits) \ base_bits" + using irq_nodes_global global_refs + by blast + +lemma cte_refs_notRange: + assumes asms: "ctes_of s p = Some c" + shows "cte_refs' (cteCap c) (irq_node' s) \ base_bits = {}" +proof - + from cap obtain node + where ctes_of: "ctes_of s ptr = Some (CTE (UntypedCap d base bits idx) node)" + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, simp) + done + + show ?thesis using asms + apply - + apply (rule ccontr) + apply (clarsimp elim!: nonemptyE) + apply (frule ctes_of_valid) + apply (frule valid_capAligned) + apply (case_tac "\irq. cteCap c = IRQHandlerCap irq") + apply (insert irq_nodes_range)[1] + apply clarsimp + apply (frule subsetD [OF cte_refs_capRange]) + apply simp + apply assumption + apply (frule caps_containedD' [OF _ ctes_of _ _ contained]) + apply (clarsimp dest!: isCapDs) + apply (rule_tac x=x in notemptyI) + apply (simp add: field_simps mask_def) + apply (simp add: add_mask_fold) + apply (drule objRefs_notrange) + apply (clarsimp simp: isCap_simps) + apply blast + done +qed + +lemma non_null_present: + "cte_wp_at' (\c. cteCap c \ NullCap) p s \ p \ base_bits" + apply (drule (1) if_unsafe_then_capD' [OF _ ifunsafe]) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of + dest!: cte_refs_notRange simp del: atLeastAtMost_iff) + apply blast + done + +lemma cte_cap: + "ex_cte_cap_to' p s \ ex_cte_cap_to' p state'" + apply (clarsimp simp: ex_cte_cap_to'_def) + apply (frule non_null_present [OF cte_wp_at_weakenE']) + apply clarsimp + apply fastforce + done + +lemma idle_notRange: + "\cref. \ cte_wp_at' (\c. ksIdleThread s \ capRange (cteCap c)) cref s + \ ksIdleThread s \ base_bits" + apply (insert cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule_tac x=ptr in allE, clarsimp simp: field_simps mask_def) + done + +abbreviation + "ctes' \ map_to_ctes (\x. if base \ x \ x \ base + mask bits then None else ksPSpace s x)" + +lemmas tree_to_ctes = map_to_ctes_delete [OF valid_untyped pd] + +lemma map_to_ctesE[elim!]: + "\ ctes' x = Some cte; \ ctes_of s x = Some cte; x \ base_bits \ \ P \ \ P" + by (clarsimp simp: tree_to_ctes split: if_split_asm) + +lemma not_nullMDBNode: + "\ ctes_of s x = Some cte; cteCap cte = NullCap; cteMDBNode cte = nullMDBNode \ P \ \ P" + using nullcaps + apply (cases cte) + apply (simp add: valid_nullcaps_def) + done + +lemma mdb_src: "\ ctes_of s \ x \ y; y \ 0 \ \ x \ base_bits" + apply (rule non_null_present) + apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def) + done + +lemma mdb_dest: "\ ctes_of s \ x \ y; y \ 0 \ \ y \ base_bits" + apply (case_tac "x = 0") + apply (insert no_0, simp add: next_unfold')[1] + apply (drule(1) vdlist_nextD0 [OF _ _ dlist]) + apply (rule non_null_present) + apply (clarsimp simp: next_unfold' cte_wp_at_ctes_of mdb_prev_def) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def) + done + +lemma trancl_next[elim]: + "\ ctes_of s \ x \\<^sup>+ y; x \ base_bits \ \ ctes' \ x \\<^sup>+ y" + apply (erule rev_mp, erule converse_trancl_induct) + apply clarsimp + apply (rule r_into_trancl) + apply (simp add: next_unfold' tree_to_ctes) + apply clarsimp + apply (rule_tac b=z in trancl_into_trancl2) + apply (simp add: next_unfold' tree_to_ctes) + apply (case_tac "z = 0") + apply (insert no_0)[1] + apply (erule tranclE2) + apply (simp add: next_unfold') + apply (simp add: next_unfold') + apply (drule(1) mdb_dest) + apply (simp add: next_unfold') + done + +lemma mdb_parent_notrange: + "ctes_of s \ x \ y \ x \ base_bits \ y \ base_bits" + apply (erule subtree.induct) + apply (frule(1) mdb_src, drule(1) mdb_dest, simp) + apply (drule(1) mdb_dest, simp) + done + +lemma mdb_parent: + "ctes_of s \ x \ y \ ctes' \ x \ y" + apply (erule subtree.induct) + apply (frule(1) mdb_src, frule(1) mdb_dest) + apply (rule subtree.direct_parent) + apply (simp add: next_unfold' tree_to_ctes) + apply assumption + apply (simp add: parentOf_def tree_to_ctes) + apply (frule(1) mdb_src, frule(1) mdb_dest) + apply (erule subtree.trans_parent) + apply (simp add: next_unfold' tree_to_ctes) + apply assumption + apply (frule mdb_parent_notrange) + apply (simp add: parentOf_def tree_to_ctes) + done + +lemma trancl_next_rev: + "ctes' \ x \\<^sup>+ y \ ctes_of s \ x \\<^sup>+ y" + apply (erule converse_trancl_induct) + apply (rule r_into_trancl) + apply (clarsimp simp: next_unfold') + apply (rule_tac b=z in trancl_into_trancl2) + apply (clarsimp simp: next_unfold') + apply assumption + done + +lemma is_chunk[elim!]: + "is_chunk (ctes_of s) cap x y \ is_chunk ctes' cap x y" + apply (simp add: is_chunk_def) + apply (erule allEI) + apply (clarsimp dest!: trancl_next_rev) + apply (drule rtranclD, erule disjE) + apply (clarsimp simp: tree_to_ctes) + apply (cut_tac p=y in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply simp + apply (clarsimp dest!: trancl_next_rev simp: trancl_into_rtrancl) + apply (clarsimp simp: tree_to_ctes) + apply (cut_tac p=p'' in non_null_present) + apply (clarsimp simp add: cte_wp_at_ctes_of) + apply simp + done + +end + +lemma exists_disj: + "((\a. P a \ Q a)\(\a. P a \ Q' a)) + = (\a. P a \ (Q a \ Q' a))" + by auto + +lemma (in delete_locale) delete_invs': + "invs' (ksMachineState_update + (\ms. underlying_memory_update + (\m x. if base \ x \ x \ base + (2 ^ bits - 1) then 0 else m x) ms) + state')" (is "invs' (?state'')") +using vds +proof (simp add: invs'_def valid_state'_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def, + safe) + interpret Arch . (*FIXME: arch_split*) + let ?s = state' + let ?ran = base_bits + + show "pspace_aligned' ?s" using pa + by (simp add: pspace_aligned'_def dom_def) + + show "pspace_distinct' ?s" using pd + by (clarsimp simp add: pspace_distinct'_def ps_clear_def + dom_if_None Diff_Int_distrib) + + show "valid_objs' ?s" using valid_objs + apply (clarsimp simp: valid_objs'_def ran_def) + apply (rule_tac p=a in valid_obj') + apply fastforce + apply (frule pspace_alignedD'[OF _ pa]) + apply (frule pspace_distinctD'[OF _ pd]) + apply (clarsimp simp: ko_wp_at'_def) + done + + from sym_refs show "sym_refs (state_refs_of' ?s)" + apply - + apply (clarsimp simp: state_refs_ko_wp_at_eq + elim!: rsubst[where P=sym_refs]) + apply (rule ext) + apply safe + apply (simp add: refs_notRange[simplified] state_refs_ko_wp_at_eq) + done + + from sym_hyp_refs show "sym_refs (state_hyp_refs_of' ?s)" + apply - + apply (clarsimp simp: state_hyp_refs_ko_wp_at_eq + elim!: rsubst[where P=sym_refs]) + apply (rule ext) + apply safe + apply (simp add: hyp_refs_notRange[simplified] state_hyp_refs_ko_wp_at_eq) + done + + from vq show "valid_queues ?s" + apply (clarsimp simp: valid_queues_def bitmapQ_defs) + apply (clarsimp simp: valid_queues_no_bitmap_def) + apply (drule spec, drule spec, drule conjunct1, drule(1) bspec) + apply (clarsimp simp: obj_at'_real_def) + apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) + apply (clarsimp simp: inQ_def live'_def) + apply (clarsimp dest!: ex_nonz_cap_notRange) + done + + from vq' show "valid_queues' ?s" + by (simp add: valid_queues'_def) + + show "if_live_then_nonz_cap' ?s" using iflive + apply (clarsimp simp: if_live_then_nonz_cap'_def) + apply (drule spec, drule(1) mp) + apply (clarsimp simp: ex_nonz_cap_to'_def) + apply (rule exI, rule conjI, assumption) + apply (drule non_null_present [OF cte_wp_at_weakenE']) + apply clarsimp + apply simp + done + + from ifunsafe show "if_unsafe_then_cap' ?s" + by (clarsimp simp: if_unsafe_then_cap'_def + intro!: cte_cap) + + from idle_notRange refs + have "ksIdleThread s \ ?ran" + apply (simp add: cte_wp_at_ctes_of valid_global_refs'_def valid_refs'_def) + apply blast + done + with idle show "valid_idle' ?s" + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def) + apply (clarsimp simp add: ps_clear_def dom_if_None Diff_Int_distrib) + done + + from tcb_at_invs' [OF invs] ct_act + show "cur_tcb' ?s" unfolding cur_tcb'_def + apply (clarsimp simp: cur_tcb'_def ct_in_state'_def) + apply (drule st_tcb) + apply simp + apply simp + apply (simp add: pred_tcb_at'_def) + done + + let ?ctes' = ctes' + + from no_0 show no_0': "no_0 ?ctes'" + by (simp add: no_0_def tree_to_ctes) + + from dlist show "valid_dlist ?ctes'" + apply (simp only: valid_dlist_def3) + apply (rule conjI) + apply (drule conjunct1) + apply (elim allEI) + apply (clarsimp simp: mdb_prev_def next_unfold' + tree_to_ctes) + apply (rule ccontr, clarsimp) + apply (cut_tac p="mdbNext (cteMDBNode cte)" in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def no_0) + apply simp + apply (drule conjunct2) + apply (elim allEI) + apply (clarsimp simp: mdb_prev_def next_unfold' + tree_to_ctes) + apply (rule ccontr, clarsimp) + apply (cut_tac p="mdbPrev (cteMDBNode z)" in non_null_present) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule(1) not_nullMDBNode) + apply (simp add: nullMDBNode_def nullPointer_def no_0) + apply simp + done + + from chain_0 show "mdb_chain_0 ?ctes'" + by (fastforce simp: mdb_chain_0_def Ball_def) + + from umdb show "untyped_mdb' ?ctes'" + apply (simp add: untyped_mdb'_def) + apply (erule allEI)+ + apply (clarsimp simp: descendants_of'_def) + apply (rule mdb_parent) + apply (clarsimp simp: tree_to_ctes split: if_split_asm) + done + + from badges show "valid_badges ?ctes'" + by (simp add: valid_badges_def tree_to_ctes next_unfold') + + from contained show "caps_contained' ?ctes'" + by (simp add: caps_contained'_def tree_to_ctes) + + from chunked show "mdb_chunked ?ctes'" + apply (simp add: mdb_chunked_def) + apply (elim allEI) + apply clarsimp + apply (intro conjI impI) + apply (erule disjEI) + apply fastforce + apply fastforce + apply (clarsimp dest!: trancl_next_rev) + apply (clarsimp dest!: trancl_next_rev) + done + + from uinc show "untyped_inc' ?ctes'" + apply (simp add: untyped_inc'_def) + apply (elim allEI) + apply clarsimp + apply (safe del: impCE, simp_all add: descendants_of'_def + mdb_parent) + done + + from nullcaps show "valid_nullcaps ?ctes'" + by (clarsimp simp: valid_nullcaps_def) + + from ut_rev + show "ut_revocable' ?ctes'" + by (clarsimp simp: ut_revocable'_def) + + show "class_links ?ctes'" using clinks + by (simp add: class_links_def tree_to_ctes mdb_next_unfold) + + show "valid_global_refs' ?s" using refs + by (simp add: valid_global_refs'_def tree_to_ctes valid_cap_sizes'_def + global_refs'_def valid_refs'_def ball_ran_eq) + + show "valid_arch_state' ?s" + using arch global_refs2 + apply (simp add: valid_arch_state'_def global_refs'_def) + apply (case_tac "armHSCurVCPU (ksArchState s)"; clarsimp simp add: split_def) + apply (drule live_notRange, clarsimp, case_tac ko; simp add: is_vcpu'_def live'_def) + done + + show "valid_irq_node' (irq_node' s) ?s" + using virq irq_nodes_range + by (simp add: valid_irq_node'_def mult.commute mult.left_commute ucast_ucast_mask_8) + + show "valid_irq_handlers' ?s" using virqh + apply (simp add: valid_irq_handlers'_def irq_issued'_def + cteCaps_of_def tree_to_ctes Ball_def) + apply (erule allEI) + apply (clarsimp simp: ran_def) + done + + from irq_ctrl + show "irq_control ?ctes'" + by (clarsimp simp: irq_control_def) + + from dist_z + show "distinct_zombies ?ctes'" + apply (simp add: tree_to_ctes distinct_zombies_def + distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply clarsimp + done + + show "reply_masters_rvk_fb ?ctes'" + using rep_r_fb + by (simp add: tree_to_ctes reply_masters_rvk_fb_def + ball_ran_eq) + + from virqs + show "valid_irq_states' s" . + + from no_0_objs + show "no_0_obj' state'" + by (simp add: no_0_obj'_def) + + from irqs_masked + show "irqs_masked' state'" + by (simp add: irqs_masked'_def) + + from sa_simp ct_act + show "sch_act_wf (ksSchedulerAction s) state'" + apply (simp add: sch_act_simple_def) + apply (case_tac "ksSchedulerAction s", simp_all add: ct_in_state'_def) + apply (fastforce dest!: st_tcb elim!: pred_tcb'_weakenE) + done + + from invs + have "pspace_domain_valid s" by (simp add: invs'_def valid_state'_def) + thus "pspace_domain_valid state'" + by (simp add: pspace_domain_valid_def) + + from invs + have "valid_machine_state' s" by (simp add: invs'_def valid_state'_def) + thus "valid_machine_state' ?state''" + apply (clarsimp simp: valid_machine_state'_def) + apply (drule_tac x=p in spec) + apply (simp add: pointerInUserData_def pointerInDeviceData_def typ_at'_def) + apply (simp add: ko_wp_at'_def exists_disj) + apply (elim exE conjE) + apply (cut_tac ptr'=p in mask_in_range) + apply fastforce + using valid_untyped[simplified valid_cap'_def capability.simps] + apply (simp add: valid_untyped'_def capAligned_def) + apply (elim conjE) + apply (drule_tac x="p && ~~ mask pageBits" in spec) + apply (cut_tac x=p in is_aligned_neg_mask[OF le_refl]) + apply (clarsimp simp: mask_2pm1 ko_wp_at'_def obj_range'_def objBitsKO_def) + apply (frule is_aligned_no_overflow'[of base bits]) + apply (frule is_aligned_no_overflow'[of _ pageBits]) + apply (frule (1) aligned_ranges_subset_or_disjoint + [where n=bits and n'=pageBits]) + apply (case_tac ko, simp_all add: objBits_simps) + apply (auto simp add: x_power_minus_1) + done + + from sa_simp ctnotinQ + show "ct_not_inQ state'" + apply (clarsimp simp: ct_not_inQ_def pred_tcb_at'_def) + apply (drule obj_at'_and + [THEN iffD2, OF conjI, + OF ct_act [unfolded ct_in_state'_def pred_tcb_at'_def]]) + apply (clarsimp simp: obj_at'_real_def) + apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) + apply clarsimp + apply (case_tac "tcbState obj"; clarsimp simp: live'_def) + apply (clarsimp dest!: ex_nonz_cap_notRange) + done + + from ctcd show "ct_idle_or_in_cur_domain' state'" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (intro impI) + apply (elim disjE impE) + apply simp+ + apply (intro impI) + apply (rule disjI2) + apply (drule obj_at'_and + [THEN iffD2, OF conjI, + OF ct_act [unfolded ct_in_state'_def st_tcb_at'_def]]) + apply (clarsimp simp: obj_at'_real_def) + apply (frule if_live_then_nonz_capE'[OF iflive, OF ko_wp_at'_weakenE]) + apply (clarsimp simp: live'_def) + apply (case_tac "tcbState obj"; clarsimp) + apply (clarsimp dest!: ex_nonz_cap_notRange elim!: ko_wp_at'_weakenE) + done + + from cdm show "ksCurDomain s \ maxDomain" . + + from invs + have urz: "untyped_ranges_zero' s" by (simp add: invs'_def valid_state'_def) + show "untyped_ranges_zero_inv (cteCaps_of state') (gsUntypedZeroRanges s)" + apply (simp add: untyped_zero_ranges_cte_def + urz[unfolded untyped_zero_ranges_cte_def, rule_format, symmetric]) + apply (clarsimp simp: fun_eq_iff intro!: arg_cong[where f=Ex]) + apply safe + apply (drule non_null_present[OF cte_wp_at_weakenE']) + apply (clarsimp simp: untypedZeroRange_def) + apply simp + done + +qed (clarsimp) + +lemma (in delete_locale) delete_ko_wp_at': + assumes objs: "ko_wp_at' P p s \ ex_nonz_cap_to' p s" + shows "ko_wp_at' P p state'" + using objs + by (clarsimp simp: ko_wp_at'_def ps_clear_def dom_if_None Diff_Int_distrib + dest!: ex_nonz_cap_notRange) + +lemma (in delete_locale) null_filter': + assumes descs: "Q (null_filter' (ctes_of s))" + shows "Q (null_filter' (ctes_of state'))" + using descs ifunsafe + apply (clarsimp elim!: rsubst[where P=Q]) + apply (rule ext) + apply (clarsimp simp:null_filter'_def tree_to_ctes) + apply (rule ccontr) + apply (clarsimp) + apply (cut_tac p = x in non_null_present) + apply (simp add:cte_wp_at_ctes_of) + apply (rule ccontr) + apply simp + apply (erule(1) not_nullMDBNode) + apply (case_tac y,simp) + apply simp + done + +lemma (in delete_locale) delete_ex_cte_cap_to': + assumes exc: "ex_cte_cap_to' p s" + shows "ex_cte_cap_to' p state'" + using exc + by (clarsimp elim!: cte_cap) + + +lemma deleteObjects_null_filter: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and (\s. P (null_filter' (ctes_of s))) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv s. P (null_filter' (ctes_of s))\" + apply (simp add: deleteObjects_def3) + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule_tac Q = P in delete_locale.null_filter') + apply assumption + apply (clarsimp simp:p_assoc_help) + apply (simp add: eq_commute field_simps mask_def) + apply (subgoal_tac "ksPSpace (s\ksMachineState := snd ((), b)\) = + ksPSpace s", simp only:, simp) + apply (unfold_locales, simp_all) + done + +lemma deleteObjects_descendants: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and (\s. descendants_range_in' H p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv s. descendants_range_in' H p (ctes_of s)\" + apply (simp add:descendants_range_in'_def2) + apply (wp deleteObjects_null_filter) + apply fastforce + done + +lemma doMachineOp_modify: + "doMachineOp (modify g) = modify (ksMachineState_update g)" + apply (simp add: doMachineOp_def split_def select_f_returns) + apply (rule ext) + apply (simp add: simpler_gets_def simpler_modify_def bind_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma deleteObjects_invs': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. invs'\" +proof - + show ?thesis + apply (rule hoare_pre) + apply (rule_tac G="is_aligned ptr bits \ 3 \ bits \ bits \ word_bits" in hoare_grab_asm) + apply (clarsimp simp add: deleteObjects_def2) + apply (simp add: freeMemory_def bind_assoc doMachineOp_bind) + apply (simp add: bind_assoc[where f="\_. modify f" for f, symmetric]) + apply (simp add: mapM_x_storeWord_step[simplified word_size_bits_def] + doMachineOp_modify modify_modify) + apply (simp add: bind_assoc intvl_range_conv'[where 'a=machine_word_len, folded word_bits_def] mask_def field_simps) + apply (wp) + apply (simp cong: if_cong) + apply (subgoal_tac "is_aligned ptr bits \ 3 \ bits \ bits < word_bits",simp) + apply clarsimp + apply (frule(2) delete_locale.intro, simp_all)[1] + apply (simp add: ksASIDMapSafe_def invs'_gsTypes_update) + apply (rule subst[rotated, where P=invs'], erule delete_locale.delete_invs') + apply (simp add: field_simps mask_def) + apply clarsimp + apply (drule invs_valid_objs') + apply (drule (1) cte_wp_at_valid_objs_valid_cap') + apply (clarsimp simp add: valid_cap'_def capAligned_def untypedBits_defs) + done +qed + +lemma deleteObjects_st_tcb_at': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and st_tcb_at' (P and (\) Inactive and (\) IdleThreadState) t + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. st_tcb_at' P t\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule delete_locale.delete_ko_wp_at' + [where p = t and + P="case_option False (P \ tcbState) \ projectKO_opt", + simplified eq_commute]) + apply (simp add: pred_tcb_at'_def obj_at'_real_def) + apply (rule conjI) + apply (fastforce elim: ko_wp_at'_weakenE simp: projectKO_opt_tcb) + apply (erule if_live_then_nonz_capD' [rotated]) + apply (clarsimp simp: live'_def) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def + field_simps ko_wp_at'_def ps_clear_def + cong:if_cong + split: option.splits) + apply (simp add: delete_locale_def) + done + +lemma deleteObjects_cap_to': + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and ex_cte_cap_to' p' + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. ex_cte_cap_to' p'\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subgoal_tac "delete_locale s ptr bits p idx d") + apply (drule delete_locale.delete_ex_cte_cap_to', assumption) + apply (simp cong:if_cong) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\)",erule ssubst) + apply (simp add: field_simps ex_cte_cap_wp_to'_def cong:if_cong) + apply simp + apply (simp add: delete_locale_def) + done + +lemma valid_untyped_no_overlap: + "\ valid_untyped' d ptr bits idx s; is_aligned ptr bits; valid_pspace' s \ + \ pspace_no_overlap' ptr bits (s\ksPSpace := ksPSpace s |` (- mask_range ptr bits)\)" + apply (clarsimp simp del: atLeastAtMost_iff + simp: pspace_no_overlap'_def valid_cap'_def valid_untyped'_def) + apply (drule_tac x=x in spec) + apply (drule restrict_map_Some_iff[THEN iffD1]) + apply clarsimp + apply (frule pspace_alignedD') + apply (simp add: valid_pspace'_def) + apply (frule pspace_distinctD') + apply (simp add: valid_pspace'_def) + apply (unfold ko_wp_at'_def obj_range'_def) + apply (drule (1) aligned_ranges_subset_or_disjoint) + apply (clarsimp simp del: Int_atLeastAtMost atLeastAtMost_iff atLeastatMost_subset_iff) + apply (elim disjE) + apply (subgoal_tac "ptr \ mask_range x (objBitsKO ko)") + apply (clarsimp simp:p_assoc_help mask_def) + apply (clarsimp simp:p_assoc_help mask_def) + apply (fastforce simp: mask_def add_diff_eq)+ + done + +lemma deleteObject_no_overlap[wp]: + "\valid_cap' (UntypedCap d ptr bits idx) and valid_pspace'\ + deleteObjects ptr bits + \\rv s. pspace_no_overlap' ptr bits s\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply (clarsimp simp: valid_cap'_def cong:if_cong) + apply (drule (2) valid_untyped_no_overlap) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := ksPSpace s |` (- mask_range ptr bits)\)", simp) + apply (case_tac s, simp) + apply (rule ext) + apply simp + done + +lemma deleteObjects_cte_wp_at': + "\\s. cte_wp_at' P p s \ p \ mask_range ptr bits + \ s \' (UntypedCap d ptr bits idx) \ valid_pspace' s\ + deleteObjects ptr bits + \\rv s. cte_wp_at' P p s\" + apply (simp add: deleteObjects_def3 doMachineOp_def split_def) + apply wp + apply (clarsimp simp: valid_pspace'_def cong:if_cong) + apply (subgoal_tac + "s\ksMachineState := b, + ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\ = + ksMachineState_update (\_. b) + (s\ksPSpace := \x. if ptr \ x \ x \ ptr + mask bits then None + else ksPSpace s x\)", erule ssubst) + apply (simp add: cte_wp_at_delete' x_power_minus_1) + apply (case_tac s, simp) + done + +lemma deleteObjects_invs_derivatives: + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. valid_pspace'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. valid_mdb'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. pspace_aligned'\" + "\cte_wp_at' (\c. cteCap c = UntypedCap d ptr bits idx) p + and invs' and ct_active' and sch_act_simple + and (\s. descendants_range' (UntypedCap d ptr bits idx) p (ctes_of s)) + and K (bits < word_bits \ is_aligned ptr bits)\ + deleteObjects ptr bits + \\rv. pspace_distinct'\" + by (safe intro!: hoare_strengthen_post [OF deleteObjects_invs']) + +lemma deleteObjects_nosch: + "\\s. P (ksSchedulerAction s)\ + deleteObjects ptr sz + \\rv s. P (ksSchedulerAction s)\" + by (simp add: deleteObjects_def3 | wp hoare_drop_imp)+ + +(* Prooving the reordering here *) + +lemma createObjects'_wp_subst: + "\\P\createObjects a b c d\\r. Q\\ \ \P\createObjects' a b c d\\r. Q\" + apply (clarsimp simp:createObjects_def valid_def return_def bind_def) + apply (drule_tac x = s in spec) + apply (clarsimp simp:split_def) + apply auto + done + +definition pspace_no_overlap_cell' where + "pspace_no_overlap_cell' p \ \kh. + \x ko. kh x = Some ko \ p \ mask_range x (objBitsKO ko)" + +lemma pspace_no_overlap'_lift: + assumes typ_at:"\slot P Q. \\s. P (typ_at' Q slot s)\ f \\r s. P (typ_at' Q slot s) \" + assumes ps :"\Q\ f \\r s. pspace_aligned' s \ pspace_distinct' s \" + shows "\Q and pspace_no_overlap' ptr sz \ f \\r. pspace_no_overlap' ptr sz\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:valid_def pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply (subgoal_tac "\ko'. ksPSpace s x = Some ko' \ koTypeOf ko = koTypeOf ko'") + apply (clarsimp dest!:objBits_type) + apply (rule ccontr) + apply clarsimp + apply (frule_tac slot1 = x and Q1 = "koTypeOf ko" and P1 = "\a. \ a" in use_valid[OF _ typ_at]) + apply (clarsimp simp:typ_at'_def ko_wp_at'_def)+ + apply (frule(1) use_valid[OF _ ps]) + apply (clarsimp simp:valid_pspace'_def) + apply (frule(1) pspace_alignedD') + apply (drule(1) pspace_distinctD') + apply simp + done +qed + +lemma setCTE_pspace_no_overlap': + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + setCTE cte src + \\r. pspace_no_overlap' ptr sz\" + apply (rule pspace_no_overlap'_lift; wp setCTE_typ_at') + apply auto + done + +lemma getCTE_commute: + assumes cte_at_modify: + "\Q. \\s. P s \ cte_wp_at' Q dest s \ f \\a s. cte_wp_at' Q dest s\" + shows "monad_commute (P and cte_at' dest) (getCTE dest) f" + proof - + have getsame: "\x y s. (x,y)\ fst (getCTE dest s) \ y = s" + apply (drule use_valid) + prefer 3 + apply (simp|wp)+ + done + show ?thesis + apply (simp add:monad_commute_def bind_assoc getCTE_def split_def cte_at'_def) + apply (clarsimp simp:bind_def split_def return_def) + apply (rule conjI) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (rule bexI[rotated], assumption) + apply (drule_tac Q1 ="(=) cte" in use_valid[OF _ cte_at_modify]) + apply (simp add:cte_wp_at'_def) + apply (simp add:cte_wp_at'_def) + apply clarsimp + apply (rule conjI) + apply (frule_tac Q1 = "(=) cte" in use_valid[OF _ cte_at_modify]) + apply (clarsimp simp:cte_wp_at'_def ko_wp_at'_def) + apply (clarsimp simp:cte_wp_at'_def) + apply (rule bexI[rotated], assumption) + apply (metis fst_eqD getObject_cte_det snd_eqD) + apply (cut_tac no_failD[OF no_fail_getCTE[unfolded getCTE_def]]) + prefer 2 + apply (simp add:cte_wp_at'_def) + apply fastforce + apply simp + apply (rule iffI) + apply clarsimp+ + apply (cut_tac s = b in no_failD[OF no_fail_getCTE[unfolded getCTE_def]]) + prefer 2 + apply fastforce + apply (drule_tac Q1 = "(=) cte" in use_valid[OF _ cte_at_modify]) + apply (simp add:cte_wp_at'_def) + apply (simp add:cte_wp_at_ctes_of) + done +qed + +definition "cte_check \ \b src a next. (case b of + KOTCB tcb \ (is_aligned a (objBits tcb) + \ (case next of None \ True | Some z \ 2^(objBits tcb) \ z - a)) \ + (src - a = tcbVTableSlot << cteSizeBits + \ src - a = tcbCTableSlot << cteSizeBits + \ src - a = tcbReplySlot << cteSizeBits + \ src - a = tcbCallerSlot << cteSizeBits + \ src - a = tcbIPCBufferSlot << cteSizeBits ) + | KOCTE v1 \ ( src = a \ (is_aligned a (objBits (makeObject::cte))) + \ (case next of None \ True | Some z \ 2^(objBits (makeObject::cte)) \ z - a)) + | _ \ False)" + +definition locateCTE where + "locateCTE src \ + (do ps \ gets ksPSpace; + (before, after) \ return (lookupAround2 src ps); + (ptr,val) \ maybeToMonad before; + assert (cte_check val src ptr after); + return ptr + od)" + +definition cte_update where + "cte_update \ \cte b src a. (case b of + KOTCB tcb \ if (src - a = tcbVTableSlot << cteSizeBits) then KOTCB (tcbVTable_update (\_. cte) tcb) + else if (src - a = tcbCTableSlot << cteSizeBits) then KOTCB (tcbCTable_update (\_. cte) tcb) + else if (src - a = tcbReplySlot << cteSizeBits) then KOTCB (tcbReply_update (\_. cte) tcb) + else if (src - a = tcbCallerSlot << cteSizeBits) then KOTCB (tcbCaller_update (\_. cte) tcb) + else if (src - a = tcbIPCBufferSlot << cteSizeBits) then KOTCB (tcbIPCBufferFrame_update (\_. cte) tcb) + else KOTCB tcb + | KOCTE v1 \ KOCTE cte + | x \ x)" + +lemma simpler_updateObject_def: + "updateObject (cte::cte) b src a next = + (\s. (if (cte_check b src a next) then ({(cte_update cte b src a,s)}, False) + else fail s))" + apply (rule ext) + apply (clarsimp simp:ObjectInstances_H.updateObject_cte objBits_simps) + apply (case_tac b) + apply (simp_all add:cte_check_def typeError_def fail_def + tcbIPCBufferSlot_def + tcbCallerSlot_def tcbReplySlot_def + tcbCTableSlot_def tcbVTableSlot_def) + by (intro conjI impI; + clarsimp simp:alignCheck_def unless_def when_def not_less[symmetric] + alignError_def is_aligned_mask magnitudeCheck_def + cte_update_def return_def tcbIPCBufferSlot_def + tcbCallerSlot_def tcbReplySlot_def + tcbCTableSlot_def tcbVTableSlot_def objBits_simps + cteSizeBits_def split:option.splits; + fastforce simp:return_def fail_def bind_def)+ + + +lemma setCTE_def2: + "(setCTE src cte) = + (do ptr \ locateCTE src; + modify (ksPSpace_update (\ps. ps(ptr \ (cte_update cte (the (ps ptr)) src ptr )))) od)" + apply (clarsimp simp:setCTE_def setObject_def split_def locateCTE_def bind_assoc) + apply (rule ext) + apply (rule_tac Q = "\r s'. s'= x \ r = ksPSpace x " in monad_eq_split) + apply (rule_tac Q = "\ptr s'. s' = x \ snd ptr = the ((ksPSpace x) (fst ptr) ) " in monad_eq_split) + apply (clarsimp simp:assert_def return_def fail_def bind_def simpler_modify_def) + apply (clarsimp simp:simpler_updateObject_def fail_def) + apply (wp|clarsimp simp:)+ + apply (simp add:lookupAround2_char1) + apply wp + apply simp + done + +lemma singleton_locateCTE: + "a \ fst (locateCTE src s) = ({a} = fst (locateCTE src s))" + apply (clarsimp simp:locateCTE_def assert_opt_def assert_def + gets_def get_def bind_def return_def split_def) + apply (clarsimp simp:return_def fail_def + split:if_splits option.splits)+ + done + +lemma locateCTE_inv: + "\P\locateCTE s\\r. P\" + apply (simp add:locateCTE_def split_def) + apply wp + apply clarsimp + done + +lemma locateCTE_case: + "\\\ locateCTE src + \\r s. \obj. ksPSpace s r = Some obj \ + (case obj of KOTCB tcb \ True | KOCTE v \ True | _ \ False)\" + apply (clarsimp simp:locateCTE_def split_def | wp)+ + apply (clarsimp simp: lookupAround2_char1) + apply (case_tac b) + apply (simp_all add:cte_check_def) + done + +lemma cte_wp_at_top: + "(cte_wp_at' \ src s) + = (\a b. ( fst (lookupAround2 src (ksPSpace s)) = Some (a, b) \ + cte_check b src a (snd (lookupAround2 src (ksPSpace s)))))" + apply (simp add: cte_wp_at'_def getObject_def gets_def get_def bind_def return_def split_def + assert_opt_def fail_def + split: option.splits) + apply (clarsimp simp:loadObject_cte) + apply (rename_tac obj) + apply (case_tac obj; simp) + apply ((simp add: typeError_def fail_def cte_check_def + split: Structures_H.kernel_object.splits)+)[5] + apply (simp add: loadObject_cte cte_check_def tcbIPCBufferSlot_def tcbCallerSlot_def + tcbReplySlot_def tcbCTableSlot_def tcbVTableSlot_def objBits_simps + cteSizeBits_def) + apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def + split: option.splits) + apply (intro conjI impI allI; simp add: not_le) + apply (clarsimp simp:cte_check_def) + apply (simp add: alignCheck_def bind_def alignError_def fail_def return_def objBits_simps + magnitudeCheck_def in_monad is_aligned_mask when_def unless_def + split: option.splits) + apply (intro conjI impI allI; simp add:not_le) + apply (simp add: typeError_def fail_def cte_check_def split: Structures_H.kernel_object.splits) + done + +lemma locateCTE_monad: + assumes ko_wp_at: "\Q dest. + \\s. P1 s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f + \\a s. ko_wp_at' (\obj. Q (objBitsKO obj)) dest s\" + assumes cte_wp_at: "\ dest. + \\s. P2 s \ cte_wp_at' \ dest s \ f + \\a s. cte_wp_at' \ dest s\" + assumes psp_distinct: + "\\s. P3 s \ f \\a s. pspace_distinct' s\" + assumes psp_aligned: + "\\s. P4 s \ f \\a s. pspace_aligned' s\" + shows + "\{(ptr, s)} = fst (locateCTE src s); + (r, s') \ fst (f s);pspace_aligned' s;pspace_distinct' s;(P1 and P2 and P3 and P4) s\ + \ {(ptr,s')} = fst (locateCTE src s')" +proof - + + have src_in_range: + "\obj src a m s'. \cte_check obj src a m;ksPSpace s' a = Some obj\ \ src \ {a..a + 2 ^ objBitsKO obj - 1}" + proof - + fix obj src a m + show "\s'. \cte_check obj src a m; ksPSpace s' a = Some obj\ \ src \ {a..a + 2 ^ objBitsKO obj - 1}" + by (case_tac obj) + (auto simp add: cte_check_def objBits_simps' diff_eq_eq + add.commute[where b=a] + word_plus_mono_right is_aligned_no_wrap' + tcbVTableSlot_def tcbCTableSlot_def tcbReplySlot_def + tcbCallerSlot_def tcbIPCBufferSlot_def ) + qed + + note blah[simp del] = usableUntypedRange.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + + have step1: + "\(ptr, s) \ fst (locateCTE src s); + (r, s') \ fst (f s); pspace_aligned' s; pspace_distinct' s; (P1 and P2 and P3 and P4) s\ + \ (ptr,s') \ fst (locateCTE src s')" + apply (frule use_valid[OF _ locateCTE_case]) + apply simp + apply (clarsimp simp: locateCTE_def gets_def split_def + get_def bind_def return_def assert_opt_def fail_def assert_def + split: option.splits if_split_asm) + apply (frule_tac dest1 = src in use_valid[OF _ cte_wp_at]) + apply simp + apply (subst cte_wp_at_top) + apply simp + apply (clarsimp simp add:cte_wp_at_top) + apply (clarsimp simp:lookupAround2_char1) + apply (frule_tac dest1 = ptr and Q1 = "\x. x = objBitsKO b" in use_valid[OF _ ko_wp_at]) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (auto simp add:ko_wp_at'_def)[1] + apply (clarsimp simp add:ko_wp_at'_def) + apply (rule ccontr) + apply (frule use_valid[OF _ psp_distinct]) + apply simp + apply (frule use_valid[OF _ psp_aligned]) + apply simp + apply (frule_tac x = a in pspace_distinctD') + apply simp + apply (frule_tac s = s' and a = ptr in rule_out_intv[rotated]) + apply simp+ + apply (frule_tac s = s' and b = ptr and a = a in rule_out_intv) + apply simp+ + apply (thin_tac "\x. P x \ Q x" for P Q)+ + apply (drule_tac p = ptr and p' = a in aligned_ranges_subset_or_disjoint) + apply (erule(1) pspace_alignedD') + apply (drule(1) src_in_range)+ + apply (drule base_member_set[OF pspace_alignedD']) + apply simp + apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) + apply (drule base_member_set[OF pspace_alignedD']) + apply simp + apply (simp add:objBitsKO_bounded2[unfolded word_bits_def,simplified]) + apply (clarsimp simp:field_simps mask_def) + apply blast + done + assume + "{(ptr, s)} = fst (locateCTE src s)" + "(r, s') \ fst (f s)" + "pspace_aligned' s" + "pspace_distinct' s" + "(P1 and P2 and P3 and P4) s" + thus ?thesis + using assms step1 + by (clarsimp simp:singleton_locateCTE) +qed + +lemma empty_fail_locateCTE: + "empty_fail (locateCTE src)" + by (fastforce simp: locateCTE_def bind_assoc split_def) + +lemma fail_empty_locateCTE: + "snd (locateCTE src s) \ fst (locateCTE src s) = {}" + by (auto simp: assert_def fail_def locateCTE_def bind_assoc return_def split_def gets_def + get_def bind_def assert_opt_def image_def + split:option.splits if_split_asm)+ + +lemma locateCTE_commute: + assumes nf: "no_fail P0 f" "no_fail P1 (locateCTE src)" + and psp_distinct: "\\s. P2 s \ f \\a s. pspace_distinct' s\" + and psp_aligned: "\\s. P3 s \ f \\a s. pspace_aligned' s\" + assumes ko_wp_at: "\Q dest. + \\s. (P0 and P1 and P2 and P3) s \ ko_wp_at' (\obj. Q (objBitsKO obj)) dest s \ f + \\a s. ko_wp_at' (\obj. Q (objBitsKO obj)) dest s\" + and cte_wp_at: "\ dest. + \\s. (P0 and P1 and P2 and P3) s \ cte_wp_at' \ dest s \ f + \\a s. cte_wp_at' \ dest s\" + shows "monad_commute (P0 and P1 and P2 and P3 and P4 and P5 and pspace_aligned' and pspace_distinct') + (locateCTE src) f" +proof - + have same: + "\ptr val next s s'. (ptr, s') \ fst (locateCTE src s) + \ s' = s" + by (erule use_valid[OF _ locateCTE_inv],simp) + show ?thesis + apply (clarsimp simp:monad_commute_def) + apply (clarsimp simp:bind_def return_def) + apply (intro conjI iffI set_eqI) + apply (clarsimp) + apply (frule same) + apply (clarsimp) + apply (rule bexI[rotated], assumption) + apply (frule singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (clarsimp) + apply (rule bexI[rotated]) + apply (fastforce) + apply clarsimp + apply clarsimp + apply (frule empty_failD2[OF empty_fail_locateCTE no_failD[OF nf(2)]]) + apply clarsimp + apply (rule bexI[rotated],assumption) + apply (clarsimp) + apply (frule_tac s = bb in same) + apply (frule_tac s = s in same) + apply clarsimp + apply (frule_tac s1 = s in singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (rule bexI[rotated],assumption) + apply (drule sym) + apply (clarsimp simp:singleton_locateCTE singleton_iff) + apply fastforce + apply (clarsimp simp:split_def image_def) + apply (elim disjE) + apply clarsimp + apply (drule same) + apply simp + apply (frule no_failD[OF nf(2)]) + apply simp + apply (clarsimp simp:split_def image_def) + apply (elim disjE) + apply clarsimp + apply (frule empty_failD2[OF empty_fail_locateCTE no_failD[OF nf(2)]]) + apply clarsimp + apply (frule same) + apply simp + apply (frule singleton_locateCTE[THEN iffD1]) + apply (frule locateCTE_monad [OF ko_wp_at cte_wp_at psp_distinct psp_aligned]) + apply assumption+ + apply simp + apply (clarsimp) + apply (simp add: fail_empty_locateCTE) + apply (simp add: no_failD[OF nf(1)]) + done +qed + +lemmas getObjSize_simps = AARCH64_H.getObjectSize_def[split_simps AARCH64_H.object_type.split apiobject_type.split] + +lemma arch_toAPIType_simps: + "toAPIType ty = Some a \ ty = APIObjectType a" + by (case_tac ty,auto simp:AARCH64_H.toAPIType_def) + +lemma createObject_cte_wp_at': + "\\s. Types_H.getObjectSize ty us < word_bits \ + is_aligned ptr (Types_H.getObjectSize ty us) \ + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) s \ + cte_wp_at' (\c. P c) slot s \ pspace_aligned' s \ + pspace_distinct' s\ + RetypeDecls_H.createObject ty ptr us d + \\r s. cte_wp_at' (\c. P c) slot s \" + apply (simp add:createObject_def) + apply (rule hoare_pre) + apply (wpc + | wp createObjects_orig_cte_wp_at'[where sz = "(Types_H.getObjectSize ty us)"] + threadSet_cte_wp_at' + | simp add: AARCH64_H.createObject_def placeNewDataObject_def + unless_def placeNewObject_def2 objBits_simps range_cover_full + curDomain_def bit_simps + getObjSize_simps apiGetObjectSize_def tcbBlockSizeBits_def + epSizeBits_def ntfnSizeBits_def cteSizeBits_def updatePTType_def + | intro conjI impI | clarsimp dest!: arch_toAPIType_simps)+ + done + +lemma createObject_getCTE_commute: + "monad_commute + (cte_wp_at' (\_. True) dests and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + K (ptr \ dests) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us d) (getCTE dests)" + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule getCTE_commute) + apply (rule hoare_pre) + apply (wp createObject_cte_wp_at') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply assumption + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma simpler_placeNewObject_def: + "\us < word_bits;is_aligned ptr (objBitsKO (injectKOS val) + us); + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s; pspace_aligned' s \ \ placeNewObject ptr val us s = + modify (ksPSpace_update + (\_. foldr (\addr map. map(addr \ injectKOS val)) (new_cap_addrs (2 ^ us) ptr (injectKOS val)) + (ksPSpace s))) s" + apply (clarsimp simp:placeNewObject_def2) + apply (clarsimp simp:createObjects'_def) + apply (simp add:bind_def in_monad when_def is_aligned_mask[THEN iffD1]) + apply (clarsimp simp:return_def bind_def gets_def assert_def fail_def get_def split_def + split:option.splits) + apply (clarsimp simp: new_cap_addrs_fold' word_1_le_power[where 'a=machine_word_len, folded word_bits_def] lookupAround2_char1 not_less) + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (drule_tac x = a in in_empty_interE) + apply clarsimp + apply (drule(1) pspace_alignedD') + apply (simp add:is_aligned_no_overflow) + apply (clarsimp simp: shiftL_nat p_assoc_help) + apply simp + done + +lemma fail_set: "fst (fail s) = {}" + by (clarsimp simp: fail_def) + +lemma locateCTE_cte_no_fail: + "no_fail (cte_at' src) (locateCTE src)" + apply (clarsimp simp:no_fail_def cte_wp_at'_def getObject_def + locateCTE_def return_def gets_def get_def bind_def split_def + assert_opt_def assert_def in_fail fail_set split:option.splits) + apply (clarsimp simp:cte_check_def ObjectInstances_H.loadObject_cte) + apply (drule in_singleton) + by (auto simp: objBits_simps cteSizeBits_def alignError_def + alignCheck_def in_monad is_aligned_mask magnitudeCheck_def + typeError_def + cong: if_cong split: if_splits option.splits kernel_object.splits) + +lemma not_in_new_cap_addrs: + "\is_aligned ptr (objBitsKO obj + us); + objBitsKO obj + us < word_bits; + pspace_no_overlap' ptr (objBitsKO obj + us) s; + ksPSpace s dest = Some ko;pspace_aligned' s\ + \ dest \ set (new_cap_addrs (2 ^ us) ptr obj)" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (rule ccontr) + apply simp + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (erule_tac x = dest in in_empty_interE) + apply (clarsimp) + apply (erule(1) is_aligned_no_overflow[OF pspace_alignedD']) + apply (erule subsetD[rotated]) + apply (simp add:p_assoc_help) + apply (rule new_cap_addrs_subset[unfolded ptr_add_def,simplified]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_pspace_aligned': + "\K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)\ + placeNewObject ptr val us + \\r s. pspace_aligned' s\" + apply (clarsimp simp:valid_def) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (erule(2) Retype_R.retype_aligned_distinct' [unfolded data_map_insert_def[symmetric]]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_pspace_distinct': + "\\s. objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us + \\a. pspace_distinct'\" + apply (clarsimp simp:valid_def) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (erule(2) Retype_R.retype_aligned_distinct' + [unfolded data_map_insert_def[symmetric]]) + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma placeNewObject_ko_wp_at': + "\\s. (if slot \ set (new_cap_addrs (2 ^ us) ptr (injectKOS val)) + then P (injectKOS val) + else ko_wp_at' P slot s) \ + objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us + \\a. ko_wp_at' P slot\" + apply (clarsimp simp:valid_def split del:if_split) + apply (simp add:simpler_placeNewObject_def simpler_modify_def) + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at') + apply simp+ + apply (rule range_cover_rel[OF range_cover_full]) + apply simp+ + done + +lemma cte_wp_at_cases_mask': + "cte_wp_at' P p = (\s. + (obj_at' P p s + \ p && mask tcbBlockSizeBits \ dom tcb_cte_cases + \ obj_at' (P \ fst (the (tcb_cte_cases (p && mask tcbBlockSizeBits)))) + (p && ~~ mask tcbBlockSizeBits) s))" + apply (rule ext) + apply (simp add:cte_wp_at_obj_cases_mask) + done + +lemma not_in_new_cap_addrs': + "\dest \ set (new_cap_addrs (2 ^ us) ptr obj); + is_aligned ptr (objBitsKO obj + us); + objBitsKO obj + us < word_bits; + pspace_no_overlap' ptr (objBitsKO obj + us) s; + pspace_aligned' s \ + \ ksPSpace s dest = None" + apply (rule ccontr) + apply clarsimp + apply (drule not_in_new_cap_addrs) + apply simp+ + done + +lemma placeNewObject_cte_wp_at': + "\K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + K (ptr \ src) and cte_wp_at' P src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)\ + placeNewObject ptr val us + \\r s. cte_wp_at' P src s\" + apply (clarsimp simp:placeNewObject_def2) + apply (wp createObjects_orig_cte_wp_at') + apply (auto simp:range_cover_full) + done + + +lemma placeNewObject_cte_wp_at'': + "\\s. cte_wp_at' P slot s \ + objBitsKO (injectKOS val) + us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s \ pspace_distinct' s\ + placeNewObject ptr val us \\a s. cte_wp_at' P slot s\" + apply (simp add:cte_wp_at_cases_mask' obj_at'_real_def) + apply (wp hoare_vcg_disj_lift placeNewObject_ko_wp_at') + apply (clarsimp simp:conj_comms) + apply (intro conjI impI allI impI) + apply (drule(4) not_in_new_cap_addrs') + apply (clarsimp simp:ko_wp_at'_def) + apply (drule (4)not_in_new_cap_addrs')+ + apply (clarsimp simp:ko_wp_at'_def) + apply (elim disjE) + apply simp + apply clarsimp + apply (drule (4)not_in_new_cap_addrs')+ + apply (clarsimp simp:ko_wp_at'_def) + done + +lemma no_fail_placeNewObject: + "no_fail (\s. us < word_bits \ + is_aligned ptr (objBitsKO (injectKOS val) + us) \ + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) s \ + pspace_aligned' s) + (placeNewObject ptr val us)" + by (clarsimp simp:no_fail_def simpler_modify_def simpler_placeNewObject_def) + +lemma placeNewObject_locateCTE_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + (objBitsKO (injectKOS val) + us) < word_bits \ ptr \ src) and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and pspace_distinct' and cte_at' src) + (placeNewObject ptr val us) (locateCTE src)" + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF locateCTE_commute]) + apply (wp no_fail_placeNewObject locateCTE_cte_no_fail + placeNewObject_pspace_aligned' + placeNewObject_pspace_distinct' + placeNewObject_ko_wp_at' | simp)+ + apply (clarsimp simp:ko_wp_at'_def) + apply (drule(3) not_in_new_cap_addrs) + apply fastforce+ + apply (wp placeNewObject_cte_wp_at'') + apply clarsimp + apply fastforce + done + +lemma update_ksPSpaceI: + "kh = kh' \ s\ksPSpace := kh\ = s\ksPSpace := kh'\" + by simp + +lemma placeNewObject_modify_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and ko_wp_at' (\a. objBitsKO (f (Some a)) = objBitsKO a) ptr') + (placeNewObject ptr val us) + (modify (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr')))))" + apply (clarsimp simp:monad_commute_def simpler_modify_def bind_def split_def return_def) + apply (subst simpler_placeNewObject_def; (simp add:range_cover_def)?) + apply (clarsimp simp: simpler_modify_def) + apply (frule(1) range_cover_full) + apply (simp add: simpler_placeNewObject_def) + apply (subgoal_tac "pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) + (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr'))) s)") + prefer 2 + apply (clarsimp simp:ko_wp_at'_def) + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (case_tac "x = ptr'") + apply (subgoal_tac "objBitsKO koa = objBitsKO ko") + apply (drule(1) pspace_no_overlapD') + apply (clarsimp simp:field_simps mask_def) + apply (clarsimp) + apply (drule_tac x = x and s = s in pspace_no_overlapD'[rotated]) + apply (simp) + apply (clarsimp simp:field_simps mask_def) + apply (subgoal_tac "pspace_aligned' (ksPSpace_update (\ps. ps(ptr' \ f (ps ptr'))) s)") + prefer 2 + apply (subst pspace_aligned'_def) + apply (rule ballI) + apply (erule domE) + apply (clarsimp simp:ko_wp_at'_def split:if_split_asm) + apply (drule(1) pspace_alignedD')+ + apply simp + apply (simp add:simpler_placeNewObject_def) + apply (clarsimp simp:simpler_modify_def Fun.comp_def singleton_iff image_def) + apply (intro conjI update_ksPSpaceI ext) + apply (clarsimp simp:ko_wp_at'_def foldr_upd_app_if) + apply (frule(1) pspace_no_overlapD') + apply (drule subsetD[rotated]) + apply (rule new_cap_addrs_subset) + apply (erule range_cover_rel) + apply simp + apply simp + apply (drule_tac x = ptr' in in_empty_interE) + apply (clarsimp simp:is_aligned_no_overflow) + apply (clarsimp simp:range_cover_def ptr_add_def obj_range'_def p_assoc_help) + apply simp + done + +lemma cte_update_objBits[simp]: + "(objBitsKO (cte_update cte b src a)) = objBitsKO b" + by (case_tac b, + (simp add:objBits_simps cte_update_def)+) + +lemma locateCTE_ret_neq: + "\ko_wp_at' (\x. koTypeOf x \ TCBT \ koTypeOf x \ CTET) ptr\ + locateCTE src \\r s. ptr \ r\" + apply (clarsimp simp add:valid_def) + apply (frule use_valid[OF _ locateCTE_case]) + apply simp + apply (frule(1) use_valid[OF _ locateCTE_inv]) + apply (clarsimp simp:ko_wp_at'_def koTypeOf_def) + apply (auto split:Structures_H.kernel_object.split_asm) + done + +lemma locateCTE_ko_wp_at': + "\cte_at' src and pspace_distinct' \ + locateCTE src + \\rv. ko_wp_at' \ rv \" + apply (clarsimp simp:locateCTE_def split_def) + apply wp + apply (clarsimp simp: cte_wp_at'_def getObject_def gets_def split_def get_def bind_def return_def + ko_wp_at'_def lookupAround2_char1 assert_opt_def) + apply (clarsimp split:option.splits + simp:fail_def return_def lookupAround2_char1) + apply (rename_tac ko) + apply (case_tac ko; + clarsimp simp: cte_check_def objBits_simps cte_update_def dest!: pspace_distinctD') + done + + +lemma setCTE_placeNewObject_commute: + "monad_commute + (K (is_aligned ptr (objBitsKO (injectKOS val) + us) \ + objBitsKO (injectKOS val) + us < word_bits) and + K(ptr \ src) and cte_wp_at' (\_. True) src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us)) + (setCTE src cte) (placeNewObject ptr val us)" + apply (clarsimp simp: setCTE_def2 split_def) + apply (rule commute_commute) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF placeNewObject_modify_commute]) + apply (rule placeNewObject_locateCTE_commute) + apply (wp locateCTE_inv locateCTE_ko_wp_at' | simp)+ + done + +lemma doMachineOp_upd_heap_commute: + "monad_commute \ (doMachineOp x) (modify (ksPSpace_update P))" + apply (clarsimp simp:doMachineOp_def split_def simpler_modify_def + gets_def get_def return_def bind_def select_f_def) + apply (clarsimp simp:monad_commute_def bind_def return_def) + apply fastforce + done + +lemma magnitudeCheck_det: + "\ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + ps_clear ptr (objBitsKO ko) s\ + \ magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) + (objBitsKO ko) s = + ({((), s)},False)" + apply (frule in_magnitude_check'[THEN iffD2]) + apply (case_tac ko) + apply (simp add: objBits_simps' pageBits_def)+ + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object) + apply (simp add:archObjSize_def pageBits_def vcpuBits_def)+ + apply (subgoal_tac + "\ snd (magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) (objBitsKO ko) s)") + apply (drule singleton_in_magnitude_check) + apply (drule_tac x = s in spec) + apply (case_tac + "(magnitudeCheck ptr (snd (lookupAround2 ptr (ksPSpace s))) (objBitsKO ko) s)") + apply simp + apply (rule ccontr) + apply (clarsimp simp:magnitudeCheck_assert assert_def fail_def return_def + split:if_splits option.splits) + done + +lemma getPTE_det: + "ko_wp_at' ((=) (KOArch (KOPTE pte))) p s + \ getObject p s = ({((pte::pte),s)},False)" + apply (clarsimp simp: ko_wp_at'_def getObject_def split_def + bind_def gets_def return_def get_def assert_opt_def + split: if_splits) + apply (clarsimp simp: fail_def return_def lookupAround2_known1) + apply (simp add: loadObject_default_def) + apply (clarsimp simp: projectKO_def projectKO_opt_pte alignCheck_def + objBits_simps unless_def) + apply (clarsimp simp: bind_def return_def is_aligned_mask) + apply (intro conjI) + apply (intro set_eqI iffI) + apply clarsimp + apply (subst (asm) in_magnitude_check') + apply (simp add:archObjSize_def is_aligned_mask)+ + apply (rule bexI[rotated]) + apply (rule in_magnitude_check'[THEN iffD2]) + apply (simp add:is_aligned_mask)+ + apply (clarsimp simp:image_def) + apply (clarsimp simp: magnitudeCheck_assert assert_def objBits_def archObjSize_def + return_def fail_def lookupAround2_char2 + split: option.splits if_split_asm) + apply (rule ccontr) + apply (simp add: ps_clear_def flip: is_aligned_mask) + apply (erule_tac x = x2 in in_empty_interE) + apply (clarsimp simp:less_imp_le) + apply (rule conjI) + apply (subst add.commute) + apply (rule word_diff_ls') + apply (clarsimp simp:field_simps not_le plus_one_helper mask_def) + apply (simp add: is_aligned_no_overflow_mask add_ac) + apply simp + apply blast + done + +lemma in_dom_eq: + "m a = Some obj \ dom (\b. if b = a then Some g else m b) = dom m" + by (rule set_eqI,clarsimp simp:dom_def) + +lemma setCTE_pte_at': + "\ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr and + cte_wp_at' (\_. True) src and pspace_distinct'\ + setCTE src cte + \\x s. ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s\" + apply (clarsimp simp:setCTE_def2) + including no_pre apply wp + apply (simp add:split_def) + apply (clarsimp simp:valid_def) + apply (subgoal_tac "b = s") + prefer 2 + apply (erule use_valid[OF _ locateCTE_inv]) + apply simp + apply (subgoal_tac "ptr \ a") + apply (frule use_valid[OF _ locateCTE_ko_wp_at']) + apply simp + apply (clarsimp simp:ko_wp_at'_def ps_clear_def) + apply (simp add:in_dom_eq) + apply (drule use_valid[OF _ locateCTE_case]) + apply simp + apply (clarsimp simp:ko_wp_at'_def objBits_simps) + done + +lemma storePTE_det: + "ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s + \ storePTE ptr (new_pte::pte) s = + modify (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPTE new_pte)))) s" + apply (clarsimp simp:ko_wp_at'_def storePTE_def split_def + bind_def gets_def return_def + get_def setObject_def + assert_opt_def split:if_splits) + apply (clarsimp simp:lookupAround2_known1 return_def alignCheck_def + updateObject_default_def split_def + unless_def projectKO_def + projectKO_opt_pte bind_def when_def + is_aligned_mask[symmetric] objBits_simps) + apply (drule magnitudeCheck_det; simp add:objBits_simps) + done + +lemma modify_obj_commute: + "monad_commute (K (ptr\ ptr')) + (modify (ksPSpace_update (\ps. ps(ptr \ ko)))) + (modify (ksPSpace_update (\ps. ps(ptr' \ ko'))))" + apply (clarsimp simp:monad_commute_def return_def bind_def simpler_modify_def) + apply (case_tac s) + apply auto + done + +lemma modify_specify: + "(\s. modify (ksPSpace_update (\_. P (ksPSpace s))) s) = + modify (ksPSpace_update (\ps. P ps))" + by (auto simp: simpler_modify_def) + +lemma modify_specify2: + "(modify (ksPSpace_update (\_. P (ksPSpace s))) >>= g) s = + (modify (ksPSpace_update (\ps. P ps)) >>=g) s" + apply (clarsimp simp:simpler_modify_def bind_def) + apply (rule arg_cong[where f = "\x. g () x"],simp) + done + +lemma modify_pte_pte_at': + "\pte_at' ptr\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pte_at' ptr\" + apply wp + apply (clarsimp simp del: fun_upd_apply + simp: typ_at'_def ko_wp_at'_def objBits_simps) + apply (clarsimp simp:ps_clear_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (clarsimp simp:archObjSize_def) + done + +lemma modify_pte_pspace_distinct': + "\pte_at' ptr and pspace_distinct'\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_distinct'\" + apply (clarsimp simp: simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_distinct'_def) + apply (intro ballI) + apply (erule domE) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_distinctD') + apply (simp add:objBits_simps) + apply (simp add:ps_clear_def) + apply (drule_tac x = x in pspace_distinctD') + apply simp + unfolding ps_clear_def + apply (erule disjoint_subset2[rotated]) + apply clarsimp + done + +lemma modify_pte_pspace_aligned': + "\pte_at' ptr and pspace_aligned'\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_aligned'\" + apply (clarsimp simp: simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_aligned'_def) + apply (intro ballI) + apply (erule domE) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_alignedD') + apply (simp add:objBits_simps) + apply (simp add:ps_clear_def) + apply (drule_tac x = x in pspace_alignedD') + apply simp + apply simp + done + +lemma modify_pte_psp_no_overlap': + "\pte_at' ptr and pspace_no_overlap' ptr' sz\ + modify (ksPSpace_update (\ps. ps(ptr \ KOArch (KOPTE new_pte)))) + \\a. pspace_no_overlap' ptr' sz\" +proof - + note [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:simpler_modify_def ko_wp_at'_def valid_def typ_at'_def) + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (clarsimp split:if_splits) + apply (drule(1) pspace_no_overlapD') + apply (simp add:objBits_simps field_simps mask_def) + apply (drule(1) pspace_no_overlapD')+ + apply (simp add:field_simps mask_def) + done +qed + +lemma koTypeOf_pte: + "koTypeOf ko = ArchT PTET \ \pte. ko = KOArch (KOPTE pte)" + apply (case_tac ko,simp_all) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object,simp_all) + done + +lemma modify_mapM_x: + "(modify (ksPSpace_update (foldr (\addr map. map(addr \ obj)) list))) = + (mapM_x (\x. modify (ksPSpace_update (\m. m(x\ obj)))) (rev list))" + apply (induct list) + apply (clarsimp simp:mapM_x_Nil) + apply (rule ext) + apply (simp add:simpler_modify_def return_def) + apply (clarsimp simp:mapM_x_append mapM_x_singleton simpler_modify_def) + apply (drule sym) + apply (rule ext) + apply (simp add:Fun.comp_def bind_def) + done + +lemma modify_obj_commute': + "monad_commute (K (ptr\ ptr') and ko_wp_at' \ ptr') + (modify (ksPSpace_update (\ps. ps(ptr \ ko)))) + (modify (ksPSpace_update (\ps. ps(ptr' \ f (the (ps ptr'))))))" + apply (clarsimp simp:monad_commute_def return_def + bind_def simpler_modify_def ko_wp_at'_def) + apply (case_tac s) + apply clarsimp + apply (rule ext) + apply clarsimp + done + +lemma setCTE_doMachineOp_commute: + assumes nf: "no_fail Q (doMachineOp x)" + shows "monad_commute (cte_at' dest and pspace_aligned' and pspace_distinct' and Q) + (setCTE dest cte) + (doMachineOp x)" + apply (simp add:setCTE_def2 split_def) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule doMachineOp_upd_heap_commute) + apply (rule commute_commute[OF locateCTE_commute]) + apply (wp nf locateCTE_cte_no_fail)+ + apply clarsimp + apply (wp|clarsimp|fastforce)+ + done + +lemma placeNewObject_valid_arch_state: + "\valid_arch_state' and + pspace_no_overlap' ptr (objBitsKO (injectKOS val) + us) and + pspace_aligned' and pspace_distinct' and + K (is_aligned ptr (objBitsKO (injectKOS val) + us)) and + K ( (objBitsKO (injectKOS val)+ us)< word_bits)\ + placeNewObject ptr val us + \\rv s. valid_arch_state' s\" + apply (simp add:placeNewObject_def2 split_def) + apply (rule createObjects'_wp_subst) + apply (wp createObjects_valid_arch) + apply clarsimp + apply (intro conjI,simp) + apply (erule(1) range_cover_full) + done + +lemma setCTE_modify_gsCNode_commute: + "monad_commute P (setCTE src (cte::cte)) + (modify (%ks. ks\gsCNodes := f (gsCNodes ks)\))" + by (auto simp: monad_commute_def setCTE_def setObject_def split_def bind_def + return_def simpler_modify_def simpler_gets_def assert_opt_def + fail_def simpler_updateObject_def + split: option.splits if_split_asm) + +lemma setCTE_modify_gsUserPages_commute: + "monad_commute P (setCTE src (cte::cte)) + (modify (%ks. ks\gsUserPages := f (gsUserPages ks)\))" + by (auto simp: monad_commute_def setCTE_def setObject_def split_def bind_def + return_def simpler_modify_def simpler_gets_def assert_opt_def + fail_def simpler_updateObject_def + split: option.splits if_split_asm) + +lemma setCTE_updatePTType_commute: + "monad_commute \ (setCTE src cte) (updatePTType p pt_t)" + unfolding updatePTType_def + apply (clarsimp simp: monad_commute_def) + apply (clarsimp simp: setCTE_def setObject_def bind_assoc exec_gets exec_modify) + apply (case_tac "lookupAround2 src (ksPSpace s)"; clarsimp simp: bind_assoc) + apply (simp add: assert_opt_def bind_assoc simpler_updateObject_def + simpler_modify_def simpler_gets_def return_def split_def fail_def + split: option.splits) + apply (clarsimp simp: bind_def fail_def) + apply (case_tac s, rename_tac arch mach, case_tac arch, simp) + apply fastforce + done + +lemma getTCB_det: + "ko_wp_at' ((=) (KOTCB tcb)) p s + \ getObject p s = ({(tcb,s)},False)" + apply (clarsimp simp:ko_wp_at'_def getObject_def split_def + bind_def gets_def return_def get_def + assert_opt_def split:if_splits) + apply (clarsimp simp: fail_def return_def lookupAround2_known1) + apply (simp add:loadObject_default_def) + apply (clarsimp simp:projectKO_def projectKO_opt_tcb alignCheck_def is_aligned_mask + objBits_simps' unless_def) + apply (clarsimp simp:bind_def return_def) + apply (intro conjI) + apply (intro set_eqI iffI) + apply clarsimp + apply (subst (asm) in_magnitude_check') + apply (simp add:archObjSize_def is_aligned_mask)+ + apply (rule bexI[rotated]) + apply (rule in_magnitude_check'[THEN iffD2]) + apply (simp add:is_aligned_mask)+ + apply (clarsimp simp:image_def) + apply (clarsimp simp: magnitudeCheck_assert assert_def objBits_def archObjSize_def + return_def fail_def lookupAround2_char2 + split:option.splits if_split_asm) + apply (rule ccontr) + apply (simp add:ps_clear_def field_simps) + apply (erule_tac x = x2 in in_empty_interE) + apply (clarsimp simp:less_imp_le) + apply (rule conjI) + apply (subst add.commute) + apply (rule word_diff_ls') + apply (clarsimp simp:field_simps not_le plus_one_helper mask_def) + apply (simp add:field_simps is_aligned_no_overflow_mask flip: is_aligned_mask) + apply simp + apply auto + done + +lemma threadSet_det: + "tcb_at' ptr s + \ threadSet f ptr s = + modify (ksPSpace_update + (\ps. ps(ptr \ (\t. case t of Some (KOTCB tcb) \ KOTCB (f tcb)) (ps ptr)))) s" + apply (clarsimp simp add: threadSet_def bind_def obj_at'_def) + apply (subst getTCB_det, simp add: ko_wp_at'_def)+ + apply (clarsimp simp: setObject_def gets_def get_def) + apply (subst bind_def) + apply (clarsimp simp: split_def) + apply (simp add: lookupAround2_known1 bind_assoc projectKO_def assert_opt_def + updateObject_default_def projectKO_opt_tcb) + apply (clarsimp simp: alignCheck_def unless_def when_def is_aligned_mask objBits_simps) + apply (clarsimp simp: magnitudeCheck_det bind_def) + apply (cut_tac ko = "KOTCB obj" in magnitudeCheck_det) + apply (simp add: objBits_simps is_aligned_mask)+ + apply (clarsimp simp: modify_def get_def put_def bind_def) + done + + +lemma setCTE_modify_tcbDomain_commute: + "monad_commute + (tcb_at' ptr and cte_wp_at' (\_. True) src and pspace_distinct' and pspace_aligned') (setCTE src cte) + (threadSet (tcbDomain_update (\_. ra)) ptr)" + proof - + note blah[simp del] = atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + atLeastAtMost_iff + + have hint: + "\P ptr a cte b src ra. monad_commute (tcb_at' ptr and ko_wp_at' P a ) + (threadSet (tcbDomain_update (\_. ra)) ptr) + (modify (ksPSpace_update (\ps. ps(a \ cte_update cte (the (ps a)) src a))))" + apply (clarsimp simp: monad_commute_def bind_def simpler_modify_def return_def) + apply (clarsimp simp: threadSet_det simpler_modify_def) + apply (subgoal_tac "tcb_at' ptr (ksPSpace_update (\ps. ps(a \ cte_update cte (the (ps a)) src a)) s)") + prefer 2 + apply (clarsimp simp:obj_at'_def) + apply (intro conjI impI) + apply simp + apply (clarsimp simp: projectKO_opt_tcb split:Structures_H.kernel_object.split_asm) + apply (simp add:cte_update_def) + apply (clarsimp simp: projectKO_opt_tcb split:Structures_H.kernel_object.split_asm) + apply (simp add:ps_clear_def) + apply (simp add:ps_clear_def) + apply (rule ccontr,simp) + apply (erule in_emptyE) + apply (clarsimp simp:ko_wp_at'_def) + apply blast + apply (simp add:threadSet_det simpler_modify_def) + apply (subst (asm) obj_at'_def) + apply (thin_tac "tcb_at' ptr P" for P) + apply (clarsimp simp: obj_at'_def projectKO_opt_tcb, + simp split: Structures_H.kernel_object.split_asm) + apply (case_tac s,clarsimp) + apply (intro conjI) + apply clarsimp + apply (rule ext,clarsimp) + apply (case_tac obj) + apply (simp add:cte_update_def) + apply clarsimp + apply (rule ext) + apply simp + done + + show ?thesis + apply (rule commute_name_pre_state) + apply (clarsimp simp add: setCTE_def2) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule hint) + apply (rule commute_commute) + apply (rule locateCTE_commute) + apply (wp locateCTE_cte_no_fail)+ + apply (wp threadSet_ko_wp_at2') + apply (clarsimp simp:objBits_simps) + apply (wp|simp)+ + apply (wp locateCTE_inv locateCTE_ko_wp_at') + apply clarsimp + apply fastforce + done +qed + +lemma curDomain_commute: + assumes cur:"\P. \\s. P (ksCurDomain s)\ f \\r s. P (ksCurDomain s)\" + shows "monad_commute \ f curDomain" + apply (clarsimp simp add:monad_commute_def curDomain_def get_def return_def + gets_def bind_def) + apply (rule conjI) + apply (rule set_eqI) + apply (rule iffI) + apply clarsimp + apply (rule bexI[rotated], assumption) + apply clarsimp + apply (frule_tac P1 = "\x. x = ksCurDomain s" in use_valid[OF _ cur]) + apply simp+ + apply clarsimp + apply (rule bexI[rotated], assumption) + apply clarsimp + apply (frule_tac P1 = "\x. x = ksCurDomain s" in use_valid[OF _ cur]) + apply simp+ + apply auto + done + +crunch inv[wp]: curDomain P + +lemma placeNewObject_tcb_at': + notes blah[simp del] = atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + atLeastAtMost_iff + shows + "\pspace_aligned' and pspace_distinct' + and pspace_no_overlap' ptr (objBits (makeObject::tcb)) + and K(is_aligned ptr (objBits (makeObject::tcb))) + \ placeNewObject ptr (makeObject::tcb) 0 + \\rv s. tcb_at' ptr s \" + apply (simp add:placeNewObject_def placeNewObject'_def split_def alignError_def) + apply wpsimp + apply (clarsimp simp: obj_at'_def lookupAround2_None1 objBits_simps + lookupAround2_char1 field_simps projectKO_opt_tcb return_def ps_clear_def + simp flip: is_aligned_mask) + apply (drule (1) pspace_no_overlap_disjoint') + apply (clarsimp intro!: set_eqI; + drule_tac m = "ksPSpace s" in domI, + erule in_emptyE, + fastforce elim!: in_emptyE simp:objBits_simps mask_def add_diff_eq) + done + +lemma monad_commute_if_weak_r: + "\ monad_commute P1 f h1; monad_commute P2 f h2\ \ + monad_commute (P1 and P2) f (if d then h1 else h2)" + apply (clarsimp) + apply (intro conjI impI) + apply (erule monad_commute_guard_imp,simp)+ + done + +lemma createObject_setCTE_commute: + "monad_commute + (cte_wp_at' (\_. True) src and + pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and K (ptr \ src) and + K (is_aligned ptr (Types_H.getObjectSize ty us)) and + K (Types_H.getObjectSize ty us < word_bits)) + (RetypeDecls_H.createObject ty ptr us d) + (setCTE src cte)" + apply (rule commute_grab_asm)+ + apply (subgoal_tac "ptr && mask (Types_H.getObjectSize ty us) = 0") + prefer 2 + apply (clarsimp simp: range_cover_def is_aligned_mask) + apply (clarsimp simp: createObject_def) + apply (case_tac ty, + simp_all add: AARCH64_H.toAPIType_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: + AARCH64_H.getObjectSize_def apiGetObjectSize_def + tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def + cteSizeBits_def) + \ \Untyped\ + apply (simp add: monad_commute_guard_imp[OF return_commute]) + \ \TCB, EP, NTFN\ + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule monad_commute_split[OF monad_commute_split]) + apply (rule monad_commute_split[OF commute_commute[OF return_commute]]) + apply (rule setCTE_modify_tcbDomain_commute) + apply wp + apply (rule curDomain_commute) + apply wp+ + apply (rule setCTE_placeNewObject_commute) + apply (wp placeNewObject_tcb_at' placeNewObject_cte_wp_at' + placeNewObject_pspace_distinct' + placeNewObject_pspace_aligned' + | clarsimp simp: objBits_simps')+ + apply (rule monad_commute_guard_imp[OF commute_commute] + ,rule monad_commute_split[OF commute_commute[OF return_commute]] + ,rule setCTE_placeNewObject_commute + ,(wp|clarsimp simp: objBits_simps')+)+ + \ \CNode\ + apply (rule monad_commute_guard_imp[OF commute_commute]) + apply (rule monad_commute_split)+ + apply (rule return_commute[THEN commute_commute]) + apply (rule setCTE_modify_gsCNode_commute[of \]) + apply (rule hoare_triv[of \]) + apply wp + apply (rule setCTE_placeNewObject_commute) + apply (wp|clarsimp simp: objBits_simps')+ + \ \Arch Objects\ + apply ((rule monad_commute_guard_imp[OF commute_commute] + , rule monad_commute_split[OF commute_commute[OF return_commute]] + , clarsimp simp: AARCH64_H.createObject_def + placeNewDataObject_def bind_assoc + split del: if_split + ,(rule monad_commute_split return_commute[THEN commute_commute] + setCTE_modify_gsUserPages_commute[of \] + modify_wp[of "%_. \"] + setCTE_doMachineOp_commute + setCTE_placeNewObject_commute + setCTE_updatePTType_commute + monad_commute_if_weak_r + | wp placeNewObject_pspace_distinct' + placeNewObject_pspace_aligned' + placeNewObject_cte_wp_at' + placeNewObject_valid_arch_state + | erule is_aligned_weaken + | simp add: objBits_simps word_bits_def mult_2 add.assoc + pageBits_less_word_bits[unfolded word_bits_def, simplified])+)+) + apply (simp add: bit_simps) + done + + +lemma createObject_updateMDB_commute: + "monad_commute + ((\s. src \ 0 \ cte_wp_at' (\_. True) src s) and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + pspace_aligned' and pspace_distinct' and valid_arch_state' and + K (ptr \ src) and + K (is_aligned ptr (Types_H.getObjectSize ty us)) and + K ((Types_H.getObjectSize ty us)< word_bits)) + (updateMDB src f) (RetypeDecls_H.createObject ty ptr us d)" + apply (clarsimp simp:updateMDB_def split:if_split_asm) + apply (intro conjI impI) + apply (simp add: monad_commute_guard_imp[OF return_commute]) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule createObject_setCTE_commute) + apply (rule createObject_getCTE_commute) + apply wp + apply (auto simp:range_cover_full) + done + +lemma updateMDB_pspace_no_overlap': + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + updateMDB slot f + \\rv s. pspace_no_overlap' ptr sz s\" + apply (rule hoare_pre) + apply (clarsimp simp: updateMDB_def split del: if_split) + apply (wp setCTE_pspace_no_overlap') + apply clarsimp + done + +lemma ctes_of_ko_at: + "ctes_of s p = Some a \ + (\ptr ko. (ksPSpace s ptr = Some ko \ p \ obj_range' ptr ko))" + apply (clarsimp simp: map_to_ctes_def Let_def split: if_split_asm) + apply (intro exI conjI, assumption) + apply (simp add: obj_range'_def objBits_simps' is_aligned_no_overflow_mask) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_simps' obj_range'_def word_and_le2) + apply (thin_tac "P" for P)+ + apply (simp add: mask_def) + apply word_bitwise + done + +lemma pspace_no_overlapD2': + "\is_aligned ptr sz; pspace_no_overlap' ptr sz s;sz < word_bits; + ctes_of s slot = Some cte\ + \ slot \ ptr" + apply (drule ctes_of_ko_at) + apply clarsimp + apply (drule(1) pspace_no_overlapD') + apply (erule in_empty_interE) + apply (simp add:obj_range'_def add_mask_fold) + apply clarsimp + apply (subst is_aligned_neg_mask_eq[symmetric]) + apply simp + apply (simp add: is_aligned_no_overflow) + done + +lemma caps_overlap_reserved'_subseteq: + "\caps_overlap_reserved' B s; A\ B\ \ caps_overlap_reserved' A s" + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (drule(1) bspec) + apply (erule disjoint_subset2) + apply simp + done + +definition weak_valid_dlist where + "weak_valid_dlist \ \m. + (\p cte. + m p = Some cte \ + (let next = mdbNext (cteMDBNode cte) + in (next \ 0 \ (\cte'. m next = Some cte' \ cteCap cte'\ capability.NullCap))))" + +lemma valid_arch_state'_updateMDB: + "\valid_arch_state' \ updateMDB a b \\rv. valid_arch_state'\" + by (clarsimp simp:updateMDB_def valid_arch_state_def,wp) + +lemma fail_commute: + "monad_commute \ fail f = empty_fail f" + apply (simp add: monad_commute_def empty_fail_def) + apply (simp add: fail_def bind_def del: split_paired_Ex) + apply blast + done + +lemma modify_commute: + "monad_commute P (modify f) (modify g) + = (\s. P s \ f (g s) = g (f s))" + apply (simp add: monad_commute_def exec_modify) + apply (simp add: return_def eq_commute) + done + +lemma createObjects_gsUntypedZeroRanges_commute': + "monad_commute \ + (createObjects' ptr n ko us) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: createObjects'_def unless_def when_def alignError_def + fail_commute) + apply clarsimp + apply (rule commute_commute) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], OF _ _ hoare_vcg_prop] + | simp add: modify_commute split: option.split prod.split)+ + apply (simp add: monad_commute_def exec_modify exec_gets assert_def) + done + +lemma assert_commute2: "empty_fail f + \ monad_commute \ (assert G) f" + apply (clarsimp simp:assert_def monad_commute_def) + apply (simp add: fail_def bind_def empty_fail_def del: split_paired_Ex) + apply blast + done + +lemma monad_commute_gsUntyped_updatePTType: + "monad_commute \ (modify (\s. s\gsUntypedZeroRanges := f (gsUntypedZeroRanges s)\)) + (updatePTType ptr pt_t)" + unfolding updatePTType_def + apply (clarsimp simp: monad_commute_def exec_gets exec_modify bind_assoc) + apply (clarsimp simp: return_def) + apply (case_tac s, rename_tac arch mach, case_tac arch) + apply fastforce + done + +lemma threadSet_gsUntypedZeroRanges_commute': + "monad_commute \ + (threadSet fn ptr) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: threadSet_def getObject_def setObject_def) + apply (rule commute_commute) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], OF _ _ hoare_vcg_prop] + | simp add: modify_commute updateObject_default_def alignCheck_assert + magnitudeCheck_assert return_commute return_commute[THEN commute_commute] + projectKO_def2 assert_commute2 assert_commute2[THEN commute_commute] + assert_opt_def2 loadObject_default_def + split: option.split prod.split)+ + apply (simp add: monad_commute_def exec_gets exec_modify) + done + +lemma createObject_gsUntypedZeroRanges_commute: + "monad_commute + \ + (RetypeDecls_H.createObject ty ptr us dev) + (modify (\s. s \ gsUntypedZeroRanges := f (gsUntypedZeroRanges s) \ ))" + apply (simp add: createObject_def AARCH64_H.createObject_def + placeNewDataObject_def + placeNewObject_def2 bind_assoc fail_commute + return_commute toAPIType_def + split: option.split apiobject_type.split object_type.split) + apply (strengthen monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], + OF _ _ hoare_vcg_prop, THEN commute_commute] + monad_commute_guard_imp[OF monad_commute_split[where P="\" and Q="\\"], + OF _ _ hoare_vcg_prop] + | simp add: modify_commute createObjects_gsUntypedZeroRanges_commute' + createObjects_gsUntypedZeroRanges_commute'[THEN commute_commute] + return_commute return_commute[THEN commute_commute] + threadSet_gsUntypedZeroRanges_commute'[THEN commute_commute] + monad_commute_gsUntyped_updatePTType + split: option.split prod.split cong: if_cong)+ + apply (simp add: curDomain_def monad_commute_def exec_modify exec_gets) + done + +lemma monad_commute_If_rhs: + "monad_commute P a b \ monad_commute Q a c + \ monad_commute (\s. (R \ P s) \ (\ R \ Q s)) a (if R then b else c)" + by simp + +lemma case_eq_if_isUntypedCap: + "(case c of UntypedCap _ _ _ _ \ x | _ \ y) + = (if isUntypedCap c then x else y)" + by (cases c, simp_all add: isCap_simps) + +lemma createObject_updateTrackedFreeIndex_commute: + "monad_commute + (cte_wp_at' (\_. True) slot and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (ptr \ slot) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us dev) (updateTrackedFreeIndex slot idx)" + apply (simp add: updateTrackedFreeIndex_def getSlotCap_def updateCap_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute] + monad_commute_split[OF _ createObject_gsUntypedZeroRanges_commute] + createObject_gsUntypedZeroRanges_commute)+ + apply (wp getCTE_wp')+ + apply (clarsimp simp: pspace_no_overlap'_def) + done + +lemma createObject_updateNewFreeIndex_commute: + "monad_commute + (cte_wp_at' (\_. True) slot and pspace_aligned' and pspace_distinct' and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (ptr \ slot) and K (Types_H.getObjectSize ty us < word_bits) and + K (is_aligned ptr (Types_H.getObjectSize ty us))) + (RetypeDecls_H.createObject ty ptr us dev) (updateNewFreeIndex slot)" + apply (simp add: updateNewFreeIndex_def getSlotCap_def case_eq_if_isUntypedCap + updateTrackedFreeIndex_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute]) + apply (rule monad_commute_If_rhs) + apply (rule createObject_updateTrackedFreeIndex_commute) + apply (rule commute_commute[OF return_commute]) + apply (wp getCTE_wp') + apply clarsimp + done + +lemma new_cap_object_comm_helper: + "monad_commute + (pspace_aligned' and pspace_distinct' and (\s. no_0 (ctes_of s)) and + (\s. weak_valid_dlist (ctes_of s)) and + (\s. valid_nullcaps (ctes_of s)) and + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent and + cte_wp_at' (\c. cteCap c = capability.NullCap) slot and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_arch_state' and + K (Types_H.getObjectSize ty us capability.NullCap) and + K (is_aligned ptr (Types_H.getObjectSize ty us) \ ptr \ 0 \ parent \ 0)) + (RetypeDecls_H.createObject ty ptr us d) (insertNewCap parent slot cap)" + apply (clarsimp simp:insertNewCap_def bind_assoc liftM_def) + apply (rule monad_commute_guard_imp) + apply (rule monad_commute_split[OF _ createObject_getCTE_commute])+ + apply (rule monad_commute_split[OF _ commute_commute[OF assert_commute]]) + apply (rule monad_commute_split[OF _ createObject_setCTE_commute]) + apply (rule monad_commute_split[OF _ commute_commute[OF createObject_updateMDB_commute]]) + apply (rule monad_commute_split[OF _ commute_commute[OF createObject_updateMDB_commute]]) + apply (rule createObject_updateNewFreeIndex_commute) + apply (wp getCTE_wp hoare_vcg_imp_lift hoare_vcg_disj_lift valid_arch_state'_updateMDB + updateMDB_pspace_no_overlap' setCTE_pspace_no_overlap' + | clarsimp simp:conj_comms)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (frule_tac slot = slot in pspace_no_overlapD2') + apply simp+ + apply (frule_tac slot = parent in pspace_no_overlapD2') + apply simp+ + apply (case_tac ctea,clarsimp) + apply (frule_tac p = slot in nullcapsD') + apply simp+ + apply (subgoal_tac "(mdbNext (cteMDBNode cte) = 0 \ + (\ctea. ctes_of s (mdbNext (cteMDBNode cte)) = Some ctea))") + apply (elim disjE) + apply clarsimp+ + apply (frule_tac slot = "(mdbNext (cteMDBNode cte))" + in pspace_no_overlapD2') + apply simp+ + apply (clarsimp simp:weak_valid_dlist_def) + apply (drule_tac x = "parent " in spec) + apply clarsimp + done + +crunch pspace_aligned'[wp]: updateNewFreeIndex "pspace_aligned'" +crunch pspace_distinct'[wp]: updateNewFreeIndex "pspace_distinct'" +crunch valid_arch_state'[wp]: updateNewFreeIndex "valid_arch_state'" +crunch pspace_no_overlap'[wp]: updateNewFreeIndex "pspace_no_overlap' ptr n" +crunch ctes_of[wp]: updateNewFreeIndex "\s. P (ctes_of s)" + +lemma updateNewFreeIndex_cte_wp_at[wp]: + "\\s. P (cte_wp_at' P' p s)\ updateNewFreeIndex slot \\rv s. P (cte_wp_at' P' p s)\" + by (simp add: cte_wp_at_ctes_of, wp) + +lemma new_cap_object_commute: + "monad_commute + (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent and + (\s. \slot\set list. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) and + pspace_no_overlap' ptr (Types_H.getObjectSize ty us) and + valid_pspace' and valid_arch_state' and + K (distinct (map fst (zip list caps))) and + K (\cap \ set caps. cap \ capability.NullCap) and + K (Types_H.getObjectSize ty us ptr \ 0)) + (RetypeDecls_H.createObject ty ptr us d) + (zipWithM_x (insertNewCap parent) list caps)" + apply (clarsimp simp:zipWithM_x_mapM_x) + apply (rule monad_commute_guard_imp) + apply (rule mapM_x_commute[where f = fst]) + apply (simp add:split_def) + apply (rule new_cap_object_comm_helper) + apply (clarsimp simp:insertNewCap_def split_def) + apply (wp updateMDB_weak_cte_wp_at updateMDB_pspace_no_overlap' + getCTE_wp valid_arch_state'_updateMDB + setCTE_weak_cte_wp_at setCTE_pspace_no_overlap') + apply (clarsimp simp:cte_wp_at_ctes_of simp del:fun_upd_apply) + apply (case_tac "parent \ aa") + prefer 2 + apply simp + apply (clarsimp simp: conj_comms) + apply (intro conjI exI) + apply (clarsimp simp: no_0_def) + apply (clarsimp simp: weak_valid_dlist_def modify_map_def Let_def) + subgoal by (intro conjI impI; fastforce) + apply (clarsimp simp:valid_nullcaps_def) + apply (frule_tac x = "p" in spec) + apply (case_tac ctec) + apply (case_tac cte) + apply (rename_tac cap' node') + apply (case_tac node') + apply (rename_tac word1 word2 bool1 bool2) + apply (clarsimp simp:modify_map_def split:if_split_asm) + apply (case_tac z) + apply (drule_tac x = word1 in spec) + apply (clarsimp simp:weak_valid_dlist_def) + apply (drule_tac x = parent in spec) + apply clarsimp + apply (clarsimp simp:valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + apply (intro conjI) + apply (clarsimp simp:weak_valid_dlist_def Let_def) + apply (frule(2) valid_dlist_nextD) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (drule_tac m = "ctes_of s" in nullcapsD') + apply simp + apply (clarsimp simp: no_0_def nullPointer_def) + apply (erule in_set_zipE) + apply clarsimp + apply (erule in_set_zipE) + apply clarsimp + apply (clarsimp simp:cte_wp_at_ctes_of) + done + +lemma createObjects'_pspace_no_overlap: + "gz = (objBitsKO val) + us \ + \pspace_no_overlap' (ptr + (1 + of_nat n << gz)) gz and + K (range_cover ptr sz gz (Suc (Suc n)) \ ptr \ 0)\ + createObjects' ptr (Suc n) val us + \\addrs s. pspace_no_overlap' (ptr + (1 + of_nat n << gz)) gz s\" +proof - + note simps [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + assume "gz = (objBitsKO val) + us" + thus ?thesis + apply - + apply (rule hoare_gen_asm) + apply (clarsimp simp:createObjects'_def split_def new_cap_addrs_fold') + apply (subst new_cap_addrs_fold') + apply clarsimp + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) + apply simp+ + apply (simp add:word_le_sub1) + apply (wp haskell_assert_wp unless_wp | wpc + | simp add:alignError_def if_apply_def2 del: fun_upd_apply hoare_fail_any)+ + apply (rule impI) + apply (subgoal_tac + "pspace_no_overlap' (ptr + (1 + of_nat n << objBitsKO val + us)) + (objBitsKO val + us) + (s\ksPSpace := foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat (1 + of_nat n << us)) ptr val) (ksPSpace s)\)") + apply (intro conjI impI allI) + apply assumption+ + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (subst (asm) foldr_upd_app_if) + apply (subst is_aligned_neg_mask_eq) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply simp + apply (clarsimp split:if_splits) + apply (drule obj_range'_subset_strong[rotated]) + apply (rule range_cover_rel[OF range_cover_le[where n = "Suc n"]],assumption) + apply simp + apply simp + apply (drule range_cover.unat_of_nat_n_shift + [OF range_cover_le[where n = "Suc n"],where gbits = us]) + apply simp + apply (simp add:shiftl_t2n field_simps)+ + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (clarsimp simp: simps) + apply (thin_tac "x \ y" for x y) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (drule_tac range_cover_no_0[rotated,where p = "Suc n"]) + apply simp + apply simp + apply (simp add:field_simps) + apply (simp add: power_add[symmetric]) + apply (simp add: word_neq_0_conv) + apply (simp add: power_add[symmetric] field_simps) + apply (frule range_cover_subset[where p = "Suc n"]) + apply simp + apply simp + apply (drule(1) pspace_no_overlapD') + apply (subst (asm) is_aligned_neg_mask_eq) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply simp + apply (simp add:word_le_sub1 shiftl_t2n mask_def field_simps) + done +qed + +lemma createNewCaps_not_nc: + "\\\ + createNewCaps ty ptr (Suc (length as)) us d + \\r s. (\cap\set r. cap \ capability.NullCap)\" + apply (clarsimp simp:simp:createNewCaps_def Arch_createNewCaps_def split del: if_split) + apply (rule hoare_pre) + apply wpc + apply wp + apply (simp add:Arch_createNewCaps_def split del: if_split) + apply (wpc|wp|clarsimp)+ + done + +lemma doMachineOp_psp_no_overlap: + "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s \ + doMachineOp f + \\y s. pspace_no_overlap' ptr sz s\" + by (wp pspace_no_overlap'_lift,simp) + +lemma createObjects'_psp_distinct: + "\ pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz ((objBitsKO ko) + us) n \ n \ 0 \ + is_aligned ptr (objBitsKO ko + us) \ objBitsKO ko + us < word_bits) \ + createObjects' ptr n ko us + \\rv s. pspace_distinct' s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:createObjects'_def split_def) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[where gbits = us,rotated]) + apply simp+ + apply unat_arith + apply (rule hoare_pre) + apply (wpc|wp|simp add: unless_def alignError_def del: fun_upd_apply hoare_fail_any)+ + apply clarsimp + apply (subst data_map_insert_def[symmetric])+ + apply (simp add: range_cover.unat_of_nat_n_shift) + apply (drule(2) retype_aligned_distinct'(1)[where ko = ko and n= "n*2^us" ]) + apply (erule range_cover_rel) + apply simp + apply clarsimp + apply (simp add: range_cover.unat_of_nat_n_shift) + done + +lemma createObjects'_psp_aligned: + "\ pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz ((objBitsKO ko) + us) n \ n \ 0 \ + is_aligned ptr (objBitsKO ko + us) \ objBitsKO ko + us < word_bits)\ + createObjects' ptr n ko us + \\rv s. pspace_aligned' s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createObjects'_def split_def) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[where gbits = us,rotated]) + apply simp+ + apply unat_arith + apply (rule hoare_pre) + apply (wpc|wp|simp add: unless_def alignError_def del: fun_upd_apply hoare_fail_any)+ + apply clarsimp + apply (frule(2) retype_aligned_distinct'(2)[where ko = ko and n= "n*2^us" ]) + apply (erule range_cover_rel) + apply simp + apply clarsimp + apply (subst data_map_insert_def[symmetric])+ + apply (simp add: range_cover.unat_of_nat_n_shift) + done + +lemma pspace_no_overlap'_le: + assumes psp: "pspace_no_overlap' ptr sz s" "sz'\ sz" + assumes b: "sz < word_bits" + shows "pspace_no_overlap' ptr sz' s" +proof - + note no_simps [simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + have diff_cancel: "\a b c. (a::machine_word) + b - c = b + (a - c)" + by simp + have bound: "(ptr && ~~ mask sz') - (ptr && ~~ mask sz) \ mask sz - mask sz'" + using neg_mask_diff_bound[OF psp(2)] + by (simp add: mask_def) + show ?thesis + using psp + apply (clarsimp simp:pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp simp: no_simps) + apply (rule word_plus_mcs[OF _ is_aligned_no_overflow_mask]) + apply (simp add:diff_cancel p_assoc_help) + apply (rule le_plus) + apply (rule bound) + apply (erule mask_mono) + apply simp + done +qed + +lemma pspace_no_overlap'_le2: + assumes "pspace_no_overlap' ptr sz s" "ptr \ ptr'" "ptr' &&~~ mask sz = ptr && ~~ mask sz" + shows "pspace_no_overlap' ptr' sz s" + proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + using assms + apply (clarsimp simp:pspace_no_overlap'_def) + apply (drule_tac x = x in spec) + apply clarsimp + apply (erule disjoint_subset2[rotated]) + apply (clarsimp simp:blah) + done +qed + +lemma pspace_no_overlap'_tail: + "\range_cover ptr sz us (Suc (Suc n)); pspace_aligned' s; pspace_distinct' s; + pspace_no_overlap' ptr sz s; ptr \ 0\ + \ pspace_no_overlap' (ptr + (1 + of_nat n << us)) sz s" + apply (erule pspace_no_overlap'_le2) + apply (erule(1) range_cover_ptr_le) + apply (erule(1) range_cover_tail_mask) + done + +lemma createNewCaps_pspace_no_overlap': + "\\s. range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (Suc n)) \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s \ + ptr \ 0\ + createNewCaps ty ptr (Suc n) us d + \\r s. pspace_no_overlap' + (ptr + (1 + of_nat n << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createNewCaps_def) + apply (subgoal_tac "pspace_no_overlap' (ptr + (1 + of_nat n << (Types_H.getObjectSize ty us))) + (Types_H.getObjectSize ty us) s") + prefer 2 + apply (rule pspace_no_overlap'_le[where sz = sz]) + apply (rule pspace_no_overlap'_tail) + apply simp+ + apply (simp add:range_cover_def) + apply (simp add:range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (rule_tac Q = "\r. pspace_no_overlap' (ptr + (1 + of_nat n << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) and + pspace_aligned' and pspace_distinct'" in hoare_strengthen_post) + apply (case_tac ty) + apply (simp_all add: apiGetObjectSize_def + AARCH64_H.toAPIType_def + AARCH64_H.getObjectSize_def objBits_simps objBits_defs + pageBits_def ptBits_def + createObjects_def) + apply (rule hoare_pre) + apply wpc + apply (clarsimp simp: apiGetObjectSize_def curDomain_def + AARCH64_H.toAPIType_def + AARCH64_H.getObjectSize_def objBits_simps objBits_defs + pageBits_def ptBits_def + createObjects_def Arch_createNewCaps_def + split: apiobject_type.splits + | wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap[where sz = sz] + createObjects'_psp_aligned[where sz = sz] createObjects'_psp_distinct[where sz = sz] + mapM_x_wp_inv + | assumption)+ + apply (intro conjI range_cover_le[where n = "Suc n"] | simp)+ + apply ((simp add:objBits_simps pageBits_def range_cover_def word_bits_def)+)[5] + by ((clarsimp simp: apiGetObjectSize_def bit_simps toAPIType_def + getObjectSize_def objBits_simps + createObjects_def Arch_createNewCaps_def unless_def + split: apiobject_type.splits + | wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap + createObjects'_psp_aligned createObjects'_psp_distinct + mapM_x_wp_inv + | assumption | clarsimp simp: word_bits_def + | intro conjI range_cover_le[where n = "Suc n"] range_cover.aligned)+) + +lemma objSize_eq_capBits: + "Types_H.getObjectSize ty us = APIType_capBits ty us" + by (cases ty; + clarsimp simp: getObjectSize_def objBits_simps bit_simps + APIType_capBits_def apiGetObjectSize_def ptBits_def + split: apiobject_type.splits) + +lemma createNewCaps_ret_len: + "\K (n < 2 ^ word_bits \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv s. n = length rv\" + including no_pre + apply (rule hoare_name_pre_state) + apply clarsimp + apply (case_tac ty) + apply (simp_all add:createNewCaps_def AARCH64_H.toAPIType_def) + apply (rule hoare_pre) + apply wpc + apply ((wp+)|simp add:Arch_createNewCaps_def AARCH64_H.toAPIType_def + unat_of_nat_minus_1 + [where 'a=machine_word_len, folded word_bits_def] | + erule hoare_strengthen_post[OF createObjects_ret],clarsimp+ | intro conjI impI)+ + apply (rule hoare_pre, + ((wp+) + | simp add: Arch_createNewCaps_def toAPIType_def unat_of_nat_minus_1 + | erule hoare_strengthen_post[OF createObjects_ret],clarsimp+ + | intro conjI impI)+)+ + done + +lemma no_overlap_check: + "\range_cover ptr sz bits n; pspace_no_overlap' ptr sz s; + pspace_aligned' s;n\ 0\ + \ case_option (return ()) + (\x. assert (fst x < ptr)) + (fst (lookupAround2 (ptr + of_nat (shiftL n bits - Suc 0)) + (ksPSpace s))) s = + return () s" + apply (clarsimp split:option.splits simp:assert_def lookupAround2_char1 not_less) + apply (rule ccontr) + apply (frule(1) pspace_no_overlapD') + apply (erule_tac x = a in in_empty_interE) + apply clarsimp + apply (drule(1) pspace_alignedD') + apply (erule is_aligned_no_overflow) + apply clarsimp + apply (erule order_trans) + apply (frule range_cover_cell_subset[where x = "of_nat n - 1"]) + apply (rule gt0_iff_gem1[THEN iffD1]) + apply (simp add:word_gt_0) + apply (rule range_cover_not_zero) + apply simp + apply assumption + apply (clarsimp simp:shiftL_nat field_simps) + apply (erule impE) + apply (frule range_cover_subset_not_empty[rotated,where x = "of_nat n - 1"]) + apply (rule gt0_iff_gem1[THEN iffD1]) + apply (simp add:word_gt_0) + apply (rule range_cover_not_zero) + apply simp + apply assumption + apply (clarsimp simp:field_simps) + apply simp + done + +lemma new_caps_addrs_append: + "\range_cover ptr sz (objBitsKO va + us) (Suc n)\ \ + new_cap_addrs (unat (of_nat n + (1::machine_word) << us)) ptr val = + new_cap_addrs (unat (((of_nat n)::machine_word) << us)) ptr val @ + new_cap_addrs (unat ((2::machine_word) ^ us)) + ((((of_nat n)::machine_word) << objBitsKO val + us) + ptr) val" + apply (subst add.commute) + apply (clarsimp simp:new_cap_addrs_def) + apply (subst upt_add_eq_append'[where j="unat (((of_nat n)::machine_word) << us)"]) + prefer 3 + apply simp + apply (subst upt_lhs_sub_map) + apply (simp add:Fun.comp_def field_simps) + apply (subst unat_sub[symmetric]) + apply (simp add:shiftl_t2n) + apply (subst mult.commute) + apply (subst mult.commute[where a = "2 ^ us"])+ + apply (rule word_mult_le_mono1) + apply (simp add:word_le_nat_alt) + apply (subst of_nat_Suc[symmetric]) + apply (frule range_cover.unat_of_nat_n) + apply (drule range_cover.unat_of_nat_n[OF range_cover_le[where n = n]]) + apply simp + apply simp + apply (simp add: p2_gt_0) + apply (simp add:range_cover_def word_bits_def) + apply (subst word_bits_def[symmetric]) + apply (subst of_nat_Suc[symmetric]) + apply (subst range_cover.unat_of_nat_n) + apply simp + apply (subst unat_power_lower) + apply (simp add:range_cover_def) + apply (frule range_cover.range_cover_n_le(2)) + apply (subst mult.commute) + apply (rule le_less_trans[OF nat_le_power_trans[where m = sz]]) + apply (erule le_trans) + apply simp + apply (simp add:range_cover_def) + apply (simp add:range_cover_def[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: power_add [symmetric] shiftl_t2n field_simps) + apply simp + apply (frule range_cover_le[where n = n]) + apply simp + apply (drule range_cover_rel[where sbit'= "objBitsKO va"]) + apply simp+ + apply (drule range_cover_rel[where sbit'= "objBitsKO va"]) + apply simp+ + apply (drule range_cover.unat_of_nat_n)+ + apply (simp add:shiftl_t2n) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps ) + done + +lemma modify_comp: + "modify (ksPSpace_update (\a. f (g a))) = + (do modify (ksPSpace_update (\a. (g a))); + modify (ksPSpace_update (\a. f a)) + od)" + by (clarsimp simp:simpler_modify_def bind_def Fun.comp_def) + +lemma modify_objs_commute: + "monad_commute (K ((set lst1) \ (set lst2) = {})) + (modify (ksPSpace_update (foldr (\addr map. map(addr \ val)) lst1))) + (modify (ksPSpace_update (foldr (\addr map. map(addr \ val)) lst2)))" + apply (clarsimp simp:monad_commute_def simpler_modify_def bind_def return_def) + apply (case_tac s,simp) + apply (rule ext) + apply (clarsimp simp:foldr_upd_app_if) + done + +lemma new_cap_addrs_disjoint: + "\range_cover ptr sz (objBitsKO val + us) (Suc (Suc n))\ + \ set (new_cap_addrs (2^us) + (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) val) \ + set (new_cap_addrs (unat ((1::machine_word) + of_nat n << us)) ptr val) = {}" + apply (frule range_cover.unat_of_nat_n_shift[where gbits = us,symmetric]) + apply simp + apply (frule range_cover_rel[where sbit' = "objBitsKO val"]) + apply (simp add:field_simps)+ + apply (frule new_cap_addrs_distinct) + apply (subst (asm) add.commute[where b = 2])+ + apply (subst (asm) new_caps_addrs_append[where n = "Suc n",simplified]) + apply (simp add:field_simps) + apply (clarsimp simp:field_simps Int_ac range_cover_def) + done + +lemma pspace_no_overlap'_modify: + "\K (range_cover ptr sz (objBitsKO val + us) (Suc (Suc n)) \ ptr \ 0) and + pspace_no_overlap' (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us)\ + modify (ksPSpace_update + (foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat ((1::machine_word) + of_nat n << us)) ptr val))) + \\r. pspace_no_overlap' + (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us)\" + proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (clarsimp simp:simpler_modify_def valid_def pspace_no_overlap'_def) + apply (frule(1) range_cover_tail_mask) + apply (simp add:field_simps) + apply (drule_tac x = x in spec) + apply (clarsimp simp:foldr_upd_app_if split:if_splits) + apply (frule obj_range'_subset_strong[rotated]) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (rule range_cover_rel,assumption) + apply simp + apply clarsimp + apply (frule range_cover.unat_of_nat_n_shift[where gbits = us,symmetric]) + apply simp+ + apply (simp add:field_simps) + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (frule(1) range_cover_ptr_le) + apply (subgoal_tac + "\ ptr + (1 + of_nat n << us + objBitsKO val) \ ptr + (1 + of_nat n << us) * 2 ^ objBitsKO val - 1") + apply (clarsimp simp:blah field_simps) + apply (clarsimp simp: not_le) + apply (rule word_leq_le_minus_one) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply (rule neq_0_no_wrap) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply simp + done +qed + +lemma createObjects_Cons: + "\range_cover ptr sz (objBitsKO val + us) (Suc (Suc n)); + pspace_distinct' s;pspace_aligned' s; + pspace_no_overlap' ptr sz s;pspace_aligned' s; ptr \ 0\ + \ createObjects' ptr (Suc (Suc n)) val us s = + (do createObjects' ptr (Suc n) val us; + createObjects' (((1 + of_nat n) << (objBitsKO val + us)) + ptr) + (Suc 0) val us + od) s" + supply option.case_cong[cong] subst_all [simp del] + apply (clarsimp simp:createObjects'_def split_def bind_assoc) + apply (subgoal_tac "is_aligned (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) (objBitsKO val + us)") + prefer 2 + apply (clarsimp simp:field_simps) + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply (rule monad_eq_split[where Q ="\x s'. s' = s \ ptr && mask (objBitsKO val + us) = 0"]) + apply (clarsimp simp:is_aligned_mask[symmetric]) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[rotated,where gbits = us]) + apply simp+ + apply (simp add:word_le_sub1) + apply (subst new_cap_addrs_fold') + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule range_cover_not_zero_shift[rotated,where gbits = us]) + apply simp+ + apply (simp add:word_le_sub1) + apply (subst new_cap_addrs_fold') + apply (rule word_1_le_power) + apply (simp add:range_cover_def) + apply (rule monad_eq_split[where Q ="\r s'. r = ksPSpace s \ s' = s"]) + apply (rule monad_eq_split2[where Q = "\r s'. s' = s"]) + apply (simp add:field_simps) + apply (subst no_overlap_check) + apply (erule range_cover_le) + apply simp+ + apply (subst no_overlap_check) + apply (erule range_cover_le) + apply simp+ + apply clarsimp + apply (simp add:new_caps_addrs_append[where n = "Suc n",simplified]) + apply (subst modify_specify2[where g = return,simplified]) + apply (subst modify_specify2) + apply (subst modify_specify) + apply (simp add:modify_comp) + apply (subst monad_commute_simple[OF modify_objs_commute,where g= "\x y. return ()",simplified]) + apply (frule range_cover.sz(1)) + apply (frule range_cover.sz(2)) + apply clarsimp + apply (erule new_cap_addrs_disjoint) + apply (rule monad_eq_split2[where Q = + "\r. pspace_no_overlap' (((1::machine_word) + of_nat n << objBitsKO val + us) + ptr) + (objBitsKO val + us) and pspace_aligned'"]) + apply (simp add:shiftl_t2n field_simps) + apply (clarsimp) + apply (rule sym) + apply (clarsimp simp:gets_def get_def) + apply (subst bind_def,simp) + apply (subst monad_eq) + apply (rule no_overlap_check) + apply (erule range_cover_full) + apply (simp add:range_cover_def word_bits_def) + apply (simp add:field_simps) + apply simp+ + apply (clarsimp simp:simpler_modify_def) + apply wp + apply (clarsimp simp del:fun_upd_apply) + apply (rule conjI) + apply (rule use_valid[OF _ pspace_no_overlap'_modify[where sz = sz]]) + apply (simp add:simpler_modify_def) + apply (clarsimp simp:field_simps) + apply (rule pspace_no_overlap'_le) + apply (erule pspace_no_overlap'_tail) + apply simp+ + apply (simp add:range_cover_def) + apply (erule range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (subst data_map_insert_def[symmetric]) + apply (drule(2) retype_aligned_distinct'(2)) + prefer 2 + apply (simp cong: kernel_state.fold_congs) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (rule range_cover_le[OF range_cover_rel,OF _ _ _ le_refl]) + apply simp+ + apply (drule range_cover.unat_of_nat_n_shift[where gbits = us]) + apply simp + apply simp + apply (wp haskell_assert_wp | wpc)+ + apply simp + apply (wp unless_wp |clarsimp)+ + apply (drule range_cover.aligned) + apply (simp add:is_aligned_mask) + done + +lemma doMachineOp_ksArchState_commute: + "monad_commute \ (doMachineOp f) (gets (g \ ksArchState))" + apply (clarsimp simp:monad_commute_def gets_def return_def get_def bind_def) + apply (intro conjI set_eqI iffI) + apply (clarsimp simp: doMachineOp_def select_f_def gets_def get_def bind_def + return_def simpler_modify_def) + apply (erule bexI[rotated]) + apply clarsimp + apply (clarsimp simp: doMachineOp_def select_f_def gets_def get_def bind_def return_def + simpler_modify_def) + apply (erule bexI[rotated]) + apply clarsimp+ + done + +lemma gsCNodes_upd_createObjects'_comm: + "do _ \ modify (gsCNodes_update f); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (gsCNodes_update f); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +lemma gsUserPages_upd_createObjects'_comm: + "do _ \ modify (gsUserPages_update f); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (gsUserPages_update f); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +lemma ksArchState_upd_createObjects'_comm: + "do _ \ modify (\s. ksArchState_update (f (ksArchState s)) s); + x \ createObjects' ptr n obj us; + m x + od = + do x \ createObjects' ptr n obj us; + _ \ modify (\s. ksArchState_update (f (ksArchState s)) s); + m x + od" + apply (rule ext) + apply (case_tac x) + by (auto simp: createObjects'_def split_def bind_assoc return_def unless_def + when_def simpler_gets_def alignError_def fail_def assert_def + simpler_modify_def bind_def + split: option.splits) + +(* FIXME: move *) +lemma ef_dmo': + "empty_fail f \ empty_fail (doMachineOp f)" + by (auto simp: empty_fail_def doMachineOp_def split_def select_f_def + simpler_modify_def simpler_gets_def return_def bind_def image_def) + +(* FIXME: move *) +lemma dmo'_when_fail_comm: + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. when P fail >>= (\_. m x)) = + when P fail >>= (\_. doMachineOp f >>= m)" + apply (rule ext) + apply (cut_tac ef_dmo'[OF assms]) + apply (auto simp add: empty_fail_def when_def fail_def return_def + bind_def split_def image_def, fastforce) + done + +(* FIXME: move *) +lemma dmo'_gets_ksPSpace_comm: + "doMachineOp f >>= (\_. gets ksPSpace >>= m) = + gets ksPSpace >>= (\x. doMachineOp f >>= (\_. m x))" + apply (rule ext) + apply (clarsimp simp: doMachineOp_def simpler_modify_def simpler_gets_def + return_def select_f_def bind_def split_def image_def) + apply (rule conjI) + apply (rule set_eqI, clarsimp) + apply (rule iffI; clarsimp) + apply (metis eq_singleton_redux prod_injects(2)) + apply (intro exI conjI bexI[rotated], simp+)[1] + apply (rule iffI; clarsimp; intro exI conjI bexI[rotated], simp+)[1] + done + +lemma dmo'_ksPSpace_update_comm': + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. modify (ksPSpace_update g) >>= (\_. m x)) = + modify (ksPSpace_update g) >>= (\_. doMachineOp f >>= m)" +proof - + have ksMachineState_ksPSpace_update: + "\s. ksMachineState (ksPSpace_update g s) = ksMachineState s" + by simp + have updates_independent: + "\f. ksPSpace_update g \ ksMachineState_update f = + ksMachineState_update f \ ksPSpace_update g" + by (rule ext) simp + from assms + show ?thesis + apply (simp add: doMachineOp_def split_def bind_assoc) + apply (simp add: gets_modify_comm2[OF ksMachineState_ksPSpace_update]) + apply (rule arg_cong_bind1) + apply (simp add: empty_fail_def select_f_walk[OF empty_fail_modify] + modify_modify_bind updates_independent) + done +qed + +lemma dmo'_createObjects'_comm: + assumes ef: "empty_fail f" + shows "do _ \ doMachineOp f; x \ createObjects' ptr n obj us; m x od = + do x \ createObjects' ptr n obj us; _ \ doMachineOp f; m x od" + apply (simp add: createObjects'_def bind_assoc split_def unless_def + alignError_def dmo'_when_fail_comm[OF ef] + dmo'_gets_ksPSpace_comm + dmo'_ksPSpace_update_comm'[OF ef, symmetric]) + apply (rule arg_cong_bind1) + apply (rule arg_cong_bind1) + apply (rename_tac u w) + apply (case_tac "fst (lookupAround2 (ptr + of_nat (shiftL n (objBitsKO obj + + us) - Suc 0)) w)", clarsimp+) + apply (simp add: assert_into_when dmo'_when_fail_comm[OF ef]) + done + +lemma dmo'_gsUserPages_upd_comm: + assumes "empty_fail f" + shows "doMachineOp f >>= (\x. modify (gsUserPages_update g) >>= (\_. m x)) = + modify (gsUserPages_update g) >>= (\_. doMachineOp f >>= m)" +proof - + have ksMachineState_ksPSpace_update: + "\s. ksMachineState (gsUserPages_update g s) = ksMachineState s" + by simp + have updates_independent: + "\f. gsUserPages_update g \ ksMachineState_update f = + ksMachineState_update f \ gsUserPages_update g" + by (rule ext) simp + from assms + show ?thesis + apply (simp add: doMachineOp_def split_def bind_assoc) + apply (simp add: gets_modify_comm2[OF ksMachineState_ksPSpace_update]) + apply (rule arg_cong_bind1) + apply (simp add: empty_fail_def select_f_walk[OF empty_fail_modify] + modify_modify_bind updates_independent) + done +qed + +lemma rewrite_step: + assumes rewrite: "\s. P s \ f s = f' s" + shows "P s \ ( f >>= g ) s = (f' >>= g ) s" + by (simp add:bind_def rewrite) + +lemma rewrite_through_step: + assumes rewrite: "\s r. P s \ f r s = f' r s" + assumes hoare: "\Q\ g \\r. P\" + shows "Q s \ + (do x \ g; + y \ f x; + h x y od) s = + (do x \ g; + y \ f' x; + h x y od) s" + apply (rule monad_eq_split[where Q = "\r. P"]) + apply (simp add:bind_def rewrite) + apply (rule hoare) + apply simp + done + +lemma threadSet_commute: + assumes preserve: "\P and tcb_at' ptr \ f \\r. tcb_at' ptr\" + assumes commute: "monad_commute P' f + ( modify (ksPSpace_update + (\ps. ps(ptr \ + case ps ptr of Some (KOTCB tcb) \ KOTCB (tcbDomain_update (\_. r) tcb)))))" + shows "monad_commute (tcb_at' ptr and P and P') f (threadSet (tcbDomain_update (\_. r)) ptr)" + apply (clarsimp simp add: monad_commute_def) + apply (subst rewrite_through_step[where h = "\x y. return (x,())",simplified bind_assoc]) + apply (erule threadSet_det) + apply (rule preserve) + apply simp + apply (subst rewrite_step[OF threadSet_det]) + apply assumption + apply simp + using commute + apply (simp add:monad_commute_def) + done + +lemma createObjects_setDomain_commute: + "monad_commute + (\s. range_cover ptr' (objBitsKO (KOTCB makeObject)) + (objBitsKO (KOTCB makeObject) + 0) (Suc 0) \ + pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr' (objBitsKO (KOTCB makeObject)) s \ + tcb_at' ptr s \ is_aligned ptr' (objBitsKO (KOTCB makeObject))) + (createObjects' ptr' (Suc 0) (KOTCB makeObject) 0) + (threadSet (tcbDomain_update (\_. r)) ptr)" + apply (rule monad_commute_guard_imp) + apply (rule threadSet_commute) + apply (wp createObjects_orig_obj_at'[where sz = "(objBitsKO (KOTCB makeObject))"]) + apply clarsimp + apply assumption + apply (simp add:placeNewObject_def2[where val = "makeObject::tcb",simplified,symmetric]) + apply (rule placeNewObject_modify_commute) + apply (clarsimp simp: objBits_simps' typ_at'_def word_bits_def + obj_at'_def ko_wp_at'_def projectKO_opt_tcb) + apply (clarsimp split:Structures_H.kernel_object.splits) + done + + +lemma createObjects_setDomains_commute: + "monad_commute + (\s. \x\ set xs. tcb_at' (f x) s \ + range_cover ptr (objBitsKO (KOTCB makeObject)) (objBitsKO (KOTCB makeObject)) (Suc 0) \ + pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' ptr (objBitsKO (KOTCB makeObject)) s \ + is_aligned ptr (objBitsKO (KOTCB makeObject))) + (mapM_x (threadSet (tcbDomain_update (\_. r))) (map f xs)) + (createObjects' ptr (Suc 0) (KOTCB makeObject) 0)" +proof (induct xs) + case Nil + show ?case + apply (simp add:monad_commute_def mapM_x_Nil) + done +next + case (Cons x xs) + show ?case + apply (simp add:mapM_x_Cons) + apply (rule monad_commute_guard_imp) + apply (rule commute_commute[OF monad_commute_split]) + apply (rule commute_commute[OF Cons.hyps]) + apply (rule createObjects_setDomain_commute) + apply (wp hoare_vcg_ball_lift) + apply clarsimp + done +qed + +lemma createObjects'_pspace_no_overlap2: + "\pspace_no_overlap' (ptr + (1 + of_nat n << gz)) sz + and K (gz = (objBitsKO val) + us) + and K (range_cover ptr sz gz (Suc (Suc n)) \ ptr \ 0)\ + createObjects' ptr (Suc n) val us + \\addrs s. pspace_no_overlap' (ptr + (1 + of_nat n << gz)) sz s\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + show ?thesis + apply (rule hoare_gen_asm)+ + apply (clarsimp simp:createObjects'_def split_def new_cap_addrs_fold') + apply (subst new_cap_addrs_fold') + apply clarsimp + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (drule_tac gbits = us in range_cover_not_zero_shift[rotated]) + apply simp+ + apply (simp add:word_le_sub1) + apply (wp haskell_assert_wp unless_wp |wpc + |simp add:alignError_def del:fun_upd_apply)+ + apply (rule conjI) + apply (rule impI) + apply (subgoal_tac + "pspace_no_overlap' (ptr + (1 + of_nat n << objBitsKO val + us)) + sz + (s\ksPSpace := foldr (\addr map. map(addr \ val)) + (new_cap_addrs (unat (1 + of_nat n << us)) ptr val) (ksPSpace s)\)") + apply (intro conjI impI allI) + apply assumption+ + apply (subst pspace_no_overlap'_def) + apply (intro allI impI) + apply (subst (asm) foldr_upd_app_if) + apply (subst range_cover_tail_mask) + apply simp+ + apply (clarsimp split:if_splits) + apply (drule obj_range'_subset_strong[rotated]) + apply (rule range_cover_rel[OF range_cover_le[where n = "Suc n"]],assumption) + apply simp+ + apply (drule range_cover.unat_of_nat_n_shift + [OF range_cover_le[where n = "Suc n"],where gbits = us]) + apply simp+ + apply (simp add:shiftl_t2n field_simps)+ + apply (simp add:obj_range'_def) + apply (erule disjoint_subset) + apply (clarsimp simp:blah) + apply (thin_tac "x \ y" for x y) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (drule_tac range_cover_no_0[rotated,where p = "Suc n"]) + apply simp + apply simp + apply (simp add:field_simps) + apply (simp add: power_add[symmetric]) + apply (simp add: word_neq_0_conv) + apply (simp add: power_add[symmetric] field_simps) + apply (frule range_cover_subset[where p = "Suc n"]) + apply simp + apply simp + apply (drule(1) pspace_no_overlapD') + apply (subst (asm) range_cover_tail_mask) + apply simp+ + apply (simp add:word_le_sub1 shiftl_t2n field_simps mask_def) + apply auto + done +qed + +lemma new_cap_addrs_def2: + "n < 2^64 \ new_cap_addrs (Suc n) ptr obj = map (\n. ptr + (n << objBitsKO obj)) [0.e.of_nat n]" + by (simp add:new_cap_addrs_def upto_enum_word unat_of_nat Fun.comp_def) + +lemma createTCBs_tcb_at': + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz + (objBitsKO (KOTCB makeObject)) (Suc n) \ + createObjects' ptr (Suc n) (KOTCB makeObject) 0 + \\rv s. + (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)\" + apply (simp add:createObjects'_def split_def alignError_def) + apply (wp unless_wp |wpc)+ + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac "(\x\of_nat n. + tcb_at' (ptr + x * 2^tcbBlockSizeBits) (s\ksPSpace := + foldr (\addr. data_map_insert addr (KOTCB makeObject)) + (new_cap_addrs (Suc n) ptr (KOTCB makeObject)) + (ksPSpace s)\))") + apply (subst (asm) new_cap_addrs_def2) + apply (drule range_cover.weak) + apply simp + apply simp + apply (clarsimp simp: retype_obj_at_disj') + apply (clarsimp simp: new_cap_addrs_def image_def) + apply (drule_tac x = "unat x" in bspec) + apply (simp add:objBits_simps' shiftl_t2n) + apply (rule unat_less_helper) + apply (rule ccontr) + apply simp + apply (simp add: objBits_simps shiftl_t2n) + done + +lemma createNewCaps_Cons: + assumes cover:"range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (Suc n))" + and "valid_pspace' s" "valid_arch_state' s" + and "pspace_no_overlap' ptr sz s" + and "ptr \ 0" + shows "createNewCaps ty ptr (Suc (Suc n)) us d s + = (do x \ createNewCaps ty ptr (Suc n) us d; + r \ RetypeDecls_H.createObject ty + (((1 + of_nat n) << Types_H.getObjectSize ty us) + ptr) us d; + return (x @ [r]) + od) s" +proof - + have append :"[0.e.(1::machine_word) + of_nat n] = [0.e.of_nat n] @ [1 + of_nat n]" + using cover + apply - + apply (frule range_cover_not_zero[rotated]) + apply simp + apply (frule range_cover.unat_of_nat_n) + apply (drule range_cover_le[where n = "Suc n"]) + apply simp + apply (frule range_cover_not_zero[rotated]) + apply simp + apply (frule range_cover.unat_of_nat_n) + apply (subst upto_enum_red'[where X = "2 + of_nat n",simplified]) + apply (simp add:field_simps word_le_sub1) + apply clarsimp + apply (subst upto_enum_red'[where X = "1 + of_nat n",simplified]) + apply (simp add:field_simps word_le_sub1) + apply simp + done + + have conj_impI: + "\A B C. \C;C\B\ \ B \ C" + by simp + + have suc_of_nat: "(1::machine_word) + of_nat n = of_nat (1 + n)" + by simp + + have gsUserPages_update[simp]: + "\f. (\ks. ks \gsUserPages := f (gsUserPages ks)\) = gsUserPages_update f" + by (rule ext) simp + have gsCNodes_update[simp]: + "\f. (\ks. ks \gsCNodes := f (gsCNodes ks)\) = gsCNodes_update f" + by (rule ext) simp + have ksArchState_update[simp]: + "\f. (\ks. ks \ksArchState := f (ksArchState ks)\) = ksArchState_update f" + by (rule ext) simp + + have if_eq[simp]: + "!!x a b pgsz. (if a = ptr + (1 + of_nat n << b) then Some pgsz + else if a \ (\n. ptr + (n << b)) ` {x. x \ of_nat n} + then Just pgsz else x a) = + (if a \ (\n. ptr + (n << b)) ` {x. x \ 1 + of_nat n} + then Just pgsz else x a)" + apply (simp only: Just_def if3_fold2) + apply (rule_tac x="x a" in fun_cong) + apply (rule arg_cong2[where f=If, OF _ refl]) + apply (subgoal_tac "{x. x \ (1::machine_word) + of_nat n} = + {1 + of_nat n} \ {x. x \ of_nat n}") + apply (simp add: add.commute) + apply safe + apply (clarsimp simp: word_le_less_eq[of _ "1 + of_nat n"]) + apply (metis plus_one_helper add.commute) + using cover + apply - + apply (drule range_cover_le[where n = "Suc n"], simp) + apply (simp only: suc_of_nat word_le_nat_alt Suc_eq_plus1) + apply (frule range_cover.unat_of_nat_n) + apply simp + apply (drule range_cover_le[where n=n], simp) + apply (frule range_cover.unat_of_nat_n, simp) + done + + show ?thesis + using assms + apply (clarsimp simp:valid_pspace'_def) + apply (frule range_cover.aligned) + apply (frule(3) pspace_no_overlap'_tail) + apply simp + apply (drule_tac ptr = "ptr + x" for x + in pspace_no_overlap'_le[where sz' = "Types_H.getObjectSize ty us"]) + apply (simp add:range_cover_def word_bits_def) + apply (erule range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: createNewCaps_def) + apply (case_tac ty) + apply (simp add: AARCH64_H.toAPIType_def Arch_createNewCaps_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: bind_assoc AARCH64_H.toAPIType_def) + \ \Untyped\ + apply (simp add: bind_assoc AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def]) + \ \TCB, EP, NTFN\ + apply (simp add: bind_assoc + AARCH64_H.getObjectSize_def + sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat append mapM_x_append2 + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + apply (subst monad_eq) + apply (rule createObjects_Cons) + apply (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+ + apply (rule_tac Q = "\r s. pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' (ptr + (2^tcbBlockSizeBits + of_nat n * 2^tcbBlockSizeBits)) (objBitsKO (KOTCB makeObject)) s \ + range_cover (ptr + 2^tcbBlockSizeBits) sz + (objBitsKO (KOTCB makeObject)) (Suc n) + \ (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s)" + in monad_eq_split2) + apply simp + apply (subst monad_commute_simple[symmetric]) + apply (rule commute_commute[OF curDomain_commute]) + apply wpsimp+ + apply (rule_tac Q = "\r s. r = (ksCurDomain s) \ + pspace_aligned' s \ + pspace_distinct' s \ + pspace_no_overlap' (ptr + (2^tcbBlockSizeBits + of_nat n * 2^tcbBlockSizeBits)) (objBitsKO (KOTCB makeObject)) s \ + range_cover (ptr + 2^tcbBlockSizeBits) sz + (objBitsKO (KOTCB makeObject)) (Suc n) + \ (\x\set [0.e.of_nat n]. tcb_at' (ptr + x * 2^tcbBlockSizeBits) s) + " in monad_eq_split) + apply (subst monad_commute_simple[symmetric]) + apply (rule createObjects_setDomains_commute) + apply (clarsimp simp:objBits_simps) + apply (rule conj_impI) + apply (erule aligned_add_aligned) + apply (rule aligned_add_aligned[where n = tcbBlockSizeBits]) + apply (simp add:is_aligned_def objBits_defs) + apply (cut_tac is_aligned_shift[where m = tcbBlockSizeBits and k = "of_nat n", + unfolded shiftl_t2n,simplified]) + apply (simp add:field_simps)+ + apply (erule range_cover_full) + apply (simp add: word_bits_conv objBits_defs) + apply (rule_tac Q = "\x s. (ksCurDomain s) = r" in monad_eq_split2) + apply simp + apply (rule_tac Q = "\x s. (ksCurDomain s) = r" in monad_eq_split) + apply (subst rewrite_step[where f = curDomain and + P ="\s. ksCurDomain s = r" and f' = "return r"]) + apply (simp add:curDomain_def bind_def gets_def get_def) + apply simp + apply (simp add:mapM_x_singleton) + apply wp + apply simp + apply (wp mapM_x_wp') + apply simp + apply (simp add:curDomain_def,wp) + apply simp + apply (wp createObjects'_psp_aligned[where sz = sz] + createObjects'_psp_distinct[where sz = sz]) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_post_imp[OF _ + createObjects'_pspace_no_overlap[unfolded shiftl_t2n, + where gz = tcbBlockSizeBits and sz = sz, simplified]]) + apply (simp add:objBits_simps field_simps) + apply (simp add: objBits_simps) + apply (wp createTCBs_tcb_at'[where sz = sz]) + apply (clarsimp simp:objBits_simps word_bits_def field_simps) + apply (frule range_cover_le[where n = "Suc n"],simp+) + apply (drule range_cover_offset[where p = 1,rotated]) + apply simp + apply (simp add: objBits_defs) + apply (((simp add: bind_assoc + AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + , subst monad_eq, rule createObjects_Cons + , (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+)+)[2] + + apply (in_case "CapTableObject") + apply (simp add: bind_assoc + AARCH64_H.getObjectSize_def + mapM_def sequence_def Retype_H.createObject_def + AARCH64_H.toAPIType_def + createObjects_def AARCH64_H.createObject_def + Arch_createNewCaps_def comp_def + apiGetObjectSize_def shiftl_t2n field_simps + shiftL_nat mapM_x_def sequence_x_def append + fromIntegral_def integral_inv[unfolded Fun.comp_def])+ + apply (subst monad_eq, rule createObjects_Cons) + apply (simp add: field_simps shiftl_t2n bind_assoc pageBits_def + objBits_simps placeNewObject_def2)+ + apply (subst gsCNodes_update gsCNodes_upd_createObjects'_comm)+ + apply (simp add: modify_modify_bind) + apply (rule fun_cong[where x=s]) + apply (rule arg_cong_bind1)+ + apply (rule arg_cong_bind[OF _ refl]) + apply (rule arg_cong[where f=modify, OF ext], simp) + apply (rule arg_cong2[where f=gsCNodes_update, OF ext refl]) + apply (rule ext) + apply simp + + apply (in_case "HugePageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + AARCH64_H.toAPIType_def + AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def bit_simps + add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def + pageBits_def add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "VSpaceObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def createObjects_def + bind_assoc AARCH64_H.toAPIType_def AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n + getObjectSize_def bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: field_simps updatePTType_def bind_assoc gets_modify_def + getObjectSize_def placeNewObject_def2 objBits_simps append) + apply (subst ksArchState_update ksArchState_upd_createObjects'_comm + | simp add: modify_modify_bind o_def + | simp only: o_def cong: if_cong)+ + apply (rule bind_apply_cong, simp) + apply (rule bind_apply_cong, simp) + apply (rule monad_eq_split_tail, simp) + apply (rule fun_cong, rule arg_cong[where f=modify]) + apply (simp flip: if_eq) + apply (simp cong: if_cong del: if_eq) + apply (rule ext) + apply (rename_tac s', case_tac s') + apply (rename_tac ksArch ksMachine, case_tac ksArch) + apply fastforce + + apply (in_case "SmallPageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + toAPIType_def + AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n bit_simps + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps bit_simps + getObjectSize_def add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + AARCH64_H.getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + AARCH64_H.getObjectSize_def + pageBits_def add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "LargePageObject") + apply (simp add: Arch_createNewCaps_def + Retype_H.createObject_def createObjects_def bind_assoc + toAPIType_def AARCH64_H.createObject_def placeNewDataObject_def) + apply (intro conjI impI) + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps bit_simps + getObjectSize_def add.commute append) + apply ((subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+)[1] + apply (subst monad_eq, rule createObjects_Cons) + apply (simp_all add: field_simps shiftl_t2n pageBits_def + AARCH64_H.getObjectSize_def objBits_simps)[6] + apply (simp add: bind_assoc placeNewObject_def2 objBits_simps + getObjectSize_def bit_simps add.commute append) + apply (subst gsUserPages_update gsCNodes_update + gsUserPages_upd_createObjects'_comm + dmo'_createObjects'_comm dmo'_gsUserPages_upd_comm + | simp add: modify_modify_bind o_def)+ + + apply (in_case "PageTableObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def createObjects_def bind_assoc + AARCH64_H.toAPIType_def AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n + getObjectSize_def bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: field_simps updatePTType_def bind_assoc gets_modify_def + getObjectSize_def placeNewObject_def2 objBits_simps append) + apply (subst ksArchState_update ksArchState_upd_createObjects'_comm + | simp add: modify_modify_bind o_def + | simp only: o_def cong: if_cong)+ + apply (rule bind_apply_cong, simp) + apply (rule bind_apply_cong, simp) + apply (rule monad_eq_split_tail, simp) + apply (rule fun_cong, rule arg_cong[where f=modify]) + apply (simp flip: if_eq) + apply (simp cong: if_cong del: if_eq) + apply (rule ext) + apply (rename_tac s', case_tac s') + apply (rename_tac ksArch ksMachine, case_tac ksArch) + apply fastforce + apply (in_case "VCPUObject") + apply (simp add: Arch_createNewCaps_def Retype_H.createObject_def + createObjects_def bind_assoc AARCH64_H.toAPIType_def + AARCH64_H.createObject_def) + apply (subst monad_eq, rule createObjects_Cons) + apply ((simp add: field_simps shiftl_t2n getObjectSize_def + bit_simps objBits_simps ptBits_def)+)[6] + apply (simp add: bind_assoc placeNewObject_def2) + apply (simp add: add_ac bit_simps getObjectSize_def objBits_simps append) + done +qed + +lemma createObject_def2: + "(RetypeDecls_H.createObject ty ptr us dev >>= (\x. return [x])) = + createNewCaps ty ptr (Suc 0) us dev" + apply (clarsimp simp:createObject_def createNewCaps_def placeNewObject_def2) + apply (case_tac ty; simp add: toAPIType_def) + defer + apply ((clarsimp simp: Arch_createNewCaps_def createObjects_def shiftL_nat + AARCH64_H.createObject_def placeNewDataObject_def + placeNewObject_def2 objBits_simps bind_assoc + clearMemory_def clearMemoryVM_def fun_upd_def[symmetric] + word_size mapM_x_singleton storeWordVM_def + updatePTType_def gets_modify_def)+)[6] + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (clarsimp simp: Arch_createNewCaps_def createObjects_def shiftL_nat + AARCH64_H.createObject_def placeNewObject_def2 objBits_simps bind_assoc + clearMemory_def clearMemoryVM_def word_size mapM_x_singleton + storeWordVM_def)+ + done + + +lemma createNewObjects_def2: + "\dslots \ []; length ( dslots ) < 2^word_bits; + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s; + \slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s; + pspace_no_overlap' ptr sz s; + caps_no_overlap'' ptr sz s; + caps_overlap_reserved' + {ptr..ptr + of_nat (length dslots) * 2 ^ Types_H.getObjectSize ty us - 1} s; + valid_pspace' s; + distinct dslots; + valid_arch_state' s; + range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots); + ptr \ 0; sz \ maxUntypedSizeBits; + ksCurDomain s \ maxDomain\ + \ createNewObjects ty parent dslots ptr us d s = + insertNewCaps ty parent dslots ptr us d s" + apply (clarsimp simp:insertNewCaps_def createNewObjects_def neq_Nil_conv) + proof - + fix y ys + have list_inc: "\n. [0.e.Suc n] = [0 .e. n] @ [n+1]" + by simp + assume le: "Suc (length (ys::machine_word list)) < 2 ^ word_bits" + assume list_nc: "\slot \ set ys. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s" + assume dist: "distinct ys" + assume extra: "y\ set ys" "cte_wp_at' (\c. cteCap c = capability.NullCap) y s" + assume not_0: "ptr \ 0" + assume sz_limit: "sz \ maxUntypedSizeBits" + assume kscd: "ksCurDomain s \ maxDomain" + assume valid_psp: "valid_pspace' s" + assume valid_arch_state: "valid_arch_state' s" + assume psp_no_overlap: "pspace_no_overlap' ptr sz s" + assume caps_no_overlap: "caps_no_overlap'' ptr sz s" + assume caps_reserved: "caps_overlap_reserved' + {ptr..ptr + (1 + of_nat (length ys)) * 2 ^ (Types_H.getObjectSize ty us) - 1} s" + assume range_cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (Suc (length ys))" + assume unt_at: "cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s" + show "zipWithM_x + (\num slot. + RetypeDecls_H.createObject ty ((num << Types_H.getObjectSize ty us) + ptr) us d >>= + insertNewCap parent slot) + [0.e.of_nat (length ys)] (y # ys) s = + (createNewCaps ty ptr (Suc (length ys)) us d >>= zipWithM_x (insertNewCap parent) (y # ys)) s" + using le list_nc dist extra range_cover not_0 sz_limit caps_reserved + proof (induct ys arbitrary: y rule:rev_induct) + case Nil + show ?case + by (clarsimp simp:zipWithM_x_def zipWith_def + sequence_x_def createObject_def2[symmetric]) + next + case (snoc a as b) + have caps_r:"caps_overlap_reserved' + {ptr..ptr + (1 + of_nat (length as)) * 2 ^ Types_H.getObjectSize ty us - 1} s" + using snoc.prems + apply - + apply (erule caps_overlap_reserved'_subseteq) + apply (cut_tac is_aligned_no_overflow + [where ptr = "ptr + ((1 + of_nat (length as)) << APIType_capBits ty us)" + and sz = " Types_H.getObjectSize ty us"]) + apply (clarsimp simp: power_add[symmetric] shiftl_t2n field_simps objSize_eq_capBits ) + apply (rule order_trans[OF word_sub_1_le]) + apply (drule(1) range_cover_no_0[where p = "Suc (length as)"]) + apply simp + apply (simp add:word_arith_nat_Suc power_add[symmetric] field_simps) + apply (simp add:shiftl_t2n) + apply (rule aligned_add_aligned[OF range_cover.aligned]) + apply (simp add:objSize_eq_capBits)+ + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def objSize_eq_capBits)+ + done + show ?case + apply simp + using snoc.prems + apply (subst upto_enum_inc_1_len) + apply (rule word_of_nat_less) + apply (simp add:word_bits_def minus_one_norm) + apply (subst append_Cons[symmetric]) + apply (subst zipWithM_x_append1) + apply (clarsimp simp:unat_of_nat64 bind_assoc) + apply (subst monad_eq) + apply (rule snoc.hyps) + apply (simp add:caps_r | rule range_cover_le)+ + apply (simp add:snoc.hyps bind_assoc) + apply (rule sym) + apply (subst monad_eq) + apply (erule createNewCaps_Cons[OF _ valid_psp valid_arch_state psp_no_overlap not_0]) + apply (rule sym) + apply (simp add:bind_assoc del:upto_enum_nat) + apply (rule_tac Q = "(\r s. (\cap\set r. cap \ capability.NullCap) \ + cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s \ + cte_wp_at' (\c. cteCap c = capability.NullCap) b s \ + (\slot\set as. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) \ + pspace_no_overlap' (ptr + (1 + of_nat (length as) << Types_H.getObjectSize ty us)) + (Types_H.getObjectSize ty us) s + \ valid_pspace' s \ valid_arch_state' s \ Q r s)" for Q in monad_eq_split) + apply (subst append_Cons[symmetric]) + apply (subst zipWithM_x_append1) + apply clarsimp + apply assumption + apply (clarsimp simp:field_simps) + apply (subst monad_commute_simple[OF commute_commute]) + apply (rule new_cap_object_commute) + apply (clarsimp) + apply (frule_tac p = "1 + length as" in range_cover_no_0[rotated]) + apply clarsimp + apply simp + apply (subst (asm) Abs_fnat_hom_add[symmetric]) + apply (intro conjI) + apply (simp add:range_cover_def word_bits_def) + apply (rule aligned_add_aligned[OF range_cover.aligned],simp) + apply (rule is_aligned_shiftl_self) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def) + apply (clarsimp simp:field_simps shiftl_t2n) + apply (clarsimp simp:createNewCaps_def) + apply (wp createNewCaps_not_nc createNewCaps_pspace_no_overlap'[where sz = sz] + createNewCaps_cte_wp_at'[where sz = sz] hoare_vcg_ball_lift + createNewCaps_valid_pspace[where sz = sz] + createNewCaps_obj_at'[where sz=sz]) + apply simp + apply (rule range_cover_le) + apply (simp add:objSize_eq_capBits caps_r)+ + apply (wp createNewCaps_ret_len createNewCaps_valid_arch_state) + apply (frule range_cover_le[where n = "Suc (length as)"]) + apply simp+ + using psp_no_overlap caps_r valid_psp unt_at caps_no_overlap valid_arch_state + apply (clarsimp simp: valid_pspace'_def objSize_eq_capBits) + apply (auto simp: kscd) + done + qed +qed + +lemma createNewObjects_corres_helper: +assumes check: "distinct dslots" + and cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots)" + and not_0: "ptr \ 0" "length dslots \ 0" + and sz_limit: "sz \ maxUntypedSizeBits" + and c: "corres r P P' f (insertNewCaps ty parent dslots ptr us d)" + and imp: "\s. P' s \ (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent s + \ (\slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + \ pspace_no_overlap' ptr sz s + \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr..ptr + of_nat (length dslots) * + 2^ (Types_H.getObjectSize ty us) - 1} s + \ valid_pspace' s \ valid_arch_state' s \ ksCurDomain s \ maxDomain)" + shows "corres r P P' f (createNewObjects ty parent dslots ptr us d)" + using check cover not_0 sz_limit + apply (clarsimp simp:corres_underlying_def) + apply (frule imp) + apply (frule range_cover.range_cover_le_n_less(1)[where 'a=machine_word_len, folded word_bits_def, OF _ le_refl]) + apply clarsimp + apply (simp add:createNewObjects_def2) + using c + apply (clarsimp simp:corres_underlying_def) + apply (drule(1) bspec) + apply clarsimp + done + +lemma createNewObjects_wp_helper: + assumes check: "distinct dslots" + and cover: "range_cover ptr sz (Types_H.getObjectSize ty us) (length dslots)" + and not_0: "ptr \ 0" "length dslots \ 0" + and sz_limit: "sz \ maxUntypedSizeBits" + shows "\P\ insertNewCaps ty parent dslots ptr us d \Q\ + \ \P and (cte_wp_at' (\c. isUntypedCap (cteCap c)) parent + and (\s. \slot \ set dslots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + and pspace_no_overlap' ptr sz + and caps_no_overlap'' ptr sz + and valid_pspace' + and valid_arch_state' + and caps_overlap_reserved' + {ptr..ptr + of_nat (length dslots) * 2^ (Types_H.getObjectSize ty us) - 1} and (\s. ksCurDomain s \ maxDomain)) + \ (createNewObjects ty parent dslots ptr us d) \Q\" + using assms + apply (clarsimp simp:valid_def) + apply (drule_tac x = s in spec) + apply (frule range_cover.range_cover_le_n_less(1)[where 'a=machine_word_len, folded word_bits_def, OF _ le_refl]) + apply (simp add:createNewObjects_def2[symmetric]) + apply (drule(1) bspec) + apply clarsimp + done + +lemma createObject_def3: + "createObject = + (\ty ptr us d. createNewCaps ty ptr (Suc 0) us d >>= (\m. return (hd m)))" + apply (rule ext)+ + apply (simp add:createObject_def2[symmetric]) + done + +crunches updatePTType + for pspace_no_overlap'[wp]: "pspace_no_overlap' p n" + +lemma ArchCreateObject_pspace_no_overlap': + "\\s. pspace_no_overlap' + (ptr + (of_nat n << APIType_capBits ty userSize)) sz s \ + pspace_aligned' s \ pspace_distinct' s \ + range_cover ptr sz (APIType_capBits ty userSize) (n + 2) \ ptr \ 0\ + AARCH64_H.createObject ty + (ptr + (of_nat n << APIType_capBits ty userSize)) userSize d + \\archCap. pspace_no_overlap' + (ptr + (1 + of_nat n << APIType_capBits ty userSize)) sz\" + apply (rule hoare_pre) + apply (clarsimp simp:AARCH64_H.createObject_def) + apply wpc + apply (wp doMachineOp_psp_no_overlap + createObjects'_pspace_no_overlap2 hoare_when_weak_wp + createObjects'_psp_aligned[where sz = sz] + createObjects'_psp_distinct[where sz = sz] + | simp add: placeNewObject_def2 word_shiftl_add_distrib + | simp add: placeNewObject_def2 word_shiftl_add_distrib + | simp add: placeNewDataObject_def placeNewObject_def2 word_shiftl_add_distrib + field_simps split del: if_split + | clarsimp simp add: add.assoc[symmetric],wp createObjects'_pspace_no_overlap2[where n =0 and sz = sz,simplified] + | clarsimp simp add: APIType_capBits_def objBits_simps pageBits_def)+ + apply (clarsimp simp: conj_comms) + apply (frule(1) range_cover_no_0[where p = n]) + apply simp + apply (subgoal_tac "is_aligned (ptr + (of_nat n << APIType_capBits ty userSize)) + (APIType_capBits ty userSize) ") + prefer 2 + apply (rule aligned_add_aligned[OF range_cover.aligned],assumption) + apply (simp add:is_aligned_shiftl_self range_cover_sz') + apply (simp add: APIType_capBits_def) + apply (frule range_cover_offset[rotated,where p = n]) + apply simp+ + apply (frule range_cover_le[where n = "Suc (Suc 0)"]) + apply simp + apply (frule pspace_no_overlap'_le2) + apply (rule range_cover_compare_offset) + apply simp+ + apply (clarsimp simp:word_shiftl_add_distrib + ,simp add:field_simps) + apply (clarsimp simp:add.assoc[symmetric]) + apply (rule range_cover_tail_mask[where n =0,simplified]) + apply (drule range_cover_offset[rotated,where p = n]) + apply simp + apply (clarsimp simp:shiftl_t2n field_simps) + apply (metis numeral_2_eq_2) + apply (simp add:shiftl_t2n field_simps) + apply (intro conjI allI) + apply (clarsimp simp: field_simps word_bits_conv + APIType_capBits_def shiftl_t2n objBits_simps bit_simps + | rule conjI | erule range_cover_le,simp)+ + done + +lemma to_from_apiTypeD: "toAPIType ty = Some x \ ty = fromAPIType x" + by (cases ty) (auto simp add: fromAPIType_def + toAPIType_def) + +lemma createObject_pspace_no_overlap': + "\\s. pspace_no_overlap' + (ptr + (of_nat n << APIType_capBits ty userSize)) sz s \ + pspace_aligned' s \ pspace_distinct' s + \ range_cover ptr sz (APIType_capBits ty userSize) (n + 2) + \ ptr \ 0\ + createObject ty (ptr + (of_nat n << APIType_capBits ty userSize)) userSize d + \\rv s. pspace_no_overlap' + (ptr + (1 + of_nat n << APIType_capBits ty userSize)) sz s\" + apply (rule hoare_pre) + apply (clarsimp simp:createObject_def) + apply wpc + apply (wp ArchCreateObject_pspace_no_overlap') + apply wpc + apply wp + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 curDomain_def word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply (simp add:placeNewObject_def2) + apply (wp doMachineOp_psp_no_overlap createObjects'_pspace_no_overlap2 + | simp add: placeNewObject_def2 word_shiftl_add_distrib + field_simps)+ + apply (simp add:add.assoc[symmetric]) + apply (wp createObjects'_pspace_no_overlap2 + [where n =0 and sz = sz,simplified]) + apply clarsimp + apply (frule(1) range_cover_no_0[where p = n]) + apply simp + apply (frule pspace_no_overlap'_le2) + apply (rule range_cover_compare_offset) + apply simp+ + apply (clarsimp simp:word_shiftl_add_distrib + ,simp add:field_simps) + apply (clarsimp simp:add.assoc[symmetric]) + apply (rule range_cover_tail_mask[where n =0,simplified]) + apply (drule range_cover_offset[rotated,where p = n]) + apply simp + apply (clarsimp simp:shiftl_t2n field_simps) + apply (metis numeral_2_eq_2) + apply (simp add:shiftl_t2n field_simps) + apply (frule range_cover_offset[rotated,where p = n]) + apply simp+ + apply (auto simp: word_shiftl_add_distrib field_simps shiftl_t2n elim: range_cover_le, + auto simp add: APIType_capBits_def fromAPIType_def objBits_def + dest!: to_from_apiTypeD) + done + +crunches updatePTType + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + +lemma createObject_pspace_aligned_distinct': + "\pspace_aligned' and K (is_aligned ptr (APIType_capBits ty us)) + and pspace_distinct' and pspace_no_overlap' ptr (APIType_capBits ty us) + and K (ty = APIObjectType apiobject_type.CapTableObject \ us < 28)\ + createObject ty ptr us d + \\xa s. pspace_aligned' s \ pspace_distinct' s\" + apply (rule hoare_pre) + apply (wp placeNewObject_pspace_aligned' unless_wp + placeNewObject_pspace_distinct' + | simp add: AARCH64_H.createObject_def Retype_H.createObject_def objBits_simps + curDomain_def placeNewDataObject_def + split del: if_split + | wpc | intro conjI impI)+ + apply (auto simp: APIType_capBits_def objBits_simps' bit_simps word_bits_def + AARCH64_H.toAPIType_def + split: AARCH64_H.object_type.splits apiobject_type.splits) + done + +declare objSize_eq_capBits [simp] + +lemma createNewObjects_Cons: + assumes dlength: "length dest < 2 ^ word_bits" + shows "createNewObjects ty src (dest @ [lt]) ptr us d = + do createNewObjects ty src dest ptr us d; + (RetypeDecls_H.createObject ty ((of_nat (length dest) << APIType_capBits ty us) + ptr) us d + >>= insertNewCap src lt) + od" + proof - + from dlength + have expand:"dest\[] \ [(0::machine_word) .e. of_nat (length dest)] + = [0.e.of_nat (length dest - 1)] @ [of_nat (length dest)]" + apply (cases dest) + apply clarsimp+ + apply (rule upto_enum_inc_1_len) + apply (rule word_of_nat_less) + apply (simp add: word_bits_conv minus_one_norm) + done + + have length:"\length dest < 2 ^ word_bits;dest \ []\ + \ length [(0::machine_word) .e. of_nat (length dest - 1)] = length dest" + proof (induct dest) + case Nil thus ?case by simp + next + case (Cons x xs) + thus ?case by (simp add:unat_of_nat64) + qed + + show ?thesis + using dlength + apply (case_tac "dest = []") + apply (simp add: zipWithM_x_def createNewObjects_def + sequence_x_def zipWith_def) + apply (clarsimp simp:createNewObjects_def) + apply (subst expand) + apply simp + apply (subst zipWithM_x_append1) + apply (rule length) + apply (simp add:field_simps)+ + done +qed + +lemma updateNewFreeIndex_cteCaps_of[wp]: + "\\s. P (cteCaps_of s)\ updateNewFreeIndex slot \\rv s. P (cteCaps_of s)\" + by (simp add: cteCaps_of_def, wp) + +lemma insertNewCap_wps[wp]: + "\pspace_aligned'\ insertNewCap parent slot cap \\rv. pspace_aligned'\" + "\pspace_distinct'\ insertNewCap parent slot cap \\rv. pspace_distinct'\" + "\\s. P ((cteCaps_of s)(slot \ cap))\ + insertNewCap parent slot cap + \\rv s. P (cteCaps_of s)\" + apply (simp_all add: insertNewCap_def) + apply (wp hoare_drop_imps + | simp add: o_def)+ + apply (fastforce elim!: rsubst[where P=P]) + done + +crunch typ_at'[wp]: insertNewCap "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +end +end diff --git a/proof/refine/AARCH64/EmptyFail.thy b/proof/refine/AARCH64/EmptyFail.thy new file mode 100644 index 0000000000..3744f2b4a7 --- /dev/null +++ b/proof/refine/AARCH64/EmptyFail.thy @@ -0,0 +1,137 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory EmptyFail +imports Bits_R +begin + +(* Collect empty_fail lemmas here. naming convention is emtpy_fail_NAME. + Unless there is a good reason, they should all be [intro!, wp, simp] *) + +lemma empty_fail_projectKO [simp, intro!]: + "empty_fail (projectKO v)" + unfolding empty_fail_def projectKO_def + by (simp add: return_def fail_def split: option.splits) + +lemma empty_fail_alignCheck [intro!, wp, simp]: + "empty_fail (alignCheck a b)" + unfolding alignCheck_def + by (fastforce simp: alignError_def) + +lemma empty_fail_magnitudeCheck [intro!, wp, simp]: + "empty_fail (magnitudeCheck a b c)" + unfolding magnitudeCheck_def + by (fastforce split: option.splits) + +lemma empty_fail_loadObject_default [intro!, wp, simp]: + shows "empty_fail (loadObject_default x b c d)" + by (auto simp: loadObject_default_def + split: option.splits) + +lemma empty_fail_threadGet [intro!, wp, simp]: + "empty_fail (threadGet f p)" + by (fastforce simp: threadGet_def getObject_def split_def) + +lemma empty_fail_getCTE [intro!, wp, simp]: + "empty_fail (getCTE slot)" + apply (simp add: getCTE_def getObject_def split_def) + apply (intro empty_fail_bind, simp_all) + apply (simp add: loadObject_cte typeError_def alignCheck_def alignError_def + magnitudeCheck_def + split: Structures_H.kernel_object.split) + apply (auto split: option.split) + done + +lemma empty_fail_updateObject_cte [intro!, wp, simp]: + "empty_fail (updateObject (v :: cte) ko a b c)" + by (fastforce simp: updateObject_cte typeError_def unless_def split: kernel_object.splits ) + +lemma empty_fail_setCTE [intro!, wp, simp]: + "empty_fail (setCTE p cte)" + unfolding setCTE_def + by (fastforce simp: setObject_def split_def) + +lemma empty_fail_updateCap [intro!, wp, simp]: + "empty_fail (updateCap p f)" + unfolding updateCap_def by auto + +lemma empty_fail_updateMDB [intro!, wp, simp]: + "empty_fail (updateMDB a b)" + unfolding updateMDB_def Let_def by auto + +lemma empty_fail_getSlotCap [intro!, wp, simp]: + "empty_fail (getSlotCap a)" + unfolding getSlotCap_def by fastforce + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma empty_fail_getObject: + assumes "\b c d. empty_fail (loadObject x b c d::'a :: pspace_storable kernel)" + shows "empty_fail (getObject x :: 'a :: pspace_storable kernel)" + apply (simp add: getObject_def split_def) + apply (safe intro!: assms) + done + +lemma empty_fail_updateTrackedFreeIndex [intro!, wp, simp]: + shows "empty_fail (updateTrackedFreeIndex p idx)" + by (fastforce simp add: updateTrackedFreeIndex_def) + +lemma empty_fail_updateNewFreeIndex [intro!, wp, simp]: + shows "empty_fail (updateNewFreeIndex p)" + apply (simp add: updateNewFreeIndex_def) + apply safe + apply (simp split: capability.split) + done + +lemma empty_fail_insertNewCap [intro!, wp, simp]: + "empty_fail (insertNewCap p p' cap)" + unfolding insertNewCap_def by fastforce + +lemma empty_fail_getIRQSlot [intro!, wp, simp]: + "empty_fail (getIRQSlot irq)" + by (fastforce simp: getIRQSlot_def getInterruptState_def locateSlot_conv) + +lemma empty_fail_getObject_ntfn [intro!, wp, simp]: + "empty_fail (getObject p :: Structures_H.notification kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getNotification [intro!, wp, simp]: + "empty_fail (getNotification ep)" + by (simp add: getNotification_def) + +lemma empty_fail_lookupIPCBuffer [intro!, wp, simp]: + "empty_fail (lookupIPCBuffer a b)" + by (clarsimp simp: lookupIPCBuffer_def Let_def getThreadBufferSlot_def locateSlot_conv + split: capability.splits arch_capability.splits | wp | wpc | safe)+ + +lemma empty_fail_updateObject_default [intro!, wp, simp]: + "empty_fail (updateObject_default v ko a b c)" + by (fastforce simp: updateObject_default_def typeError_def unless_def split: kernel_object.splits ) + +lemma empty_fail_threadSet [intro!, wp, simp]: + "empty_fail (threadSet f p)" + by (fastforce simp: threadSet_def getObject_def setObject_def split_def) + +lemma empty_fail_getThreadState[iff]: + "empty_fail (getThreadState t)" + by (simp add: getThreadState_def) + +declare empty_fail_stateAssert [wp] + +lemma empty_fail_getSchedulerAction [intro!, wp, simp]: + "empty_fail getSchedulerAction" + by (simp add: getSchedulerAction_def getObject_def split_def) + +lemma empty_fail_scheduleSwitchThreadFastfail [intro!, wp, simp]: + "empty_fail (scheduleSwitchThreadFastfail a b c d)" + by (simp add: scheduleSwitchThreadFastfail_def split: if_splits) + +lemma empty_fail_curDomain [intro!, wp, simp]: + "empty_fail curDomain" + by (simp add: curDomain_def) + +end +end diff --git a/proof/refine/AARCH64/EmptyFail_H.thy b/proof/refine/AARCH64/EmptyFail_H.thy new file mode 100644 index 0000000000..7b60006d01 --- /dev/null +++ b/proof/refine/AARCH64/EmptyFail_H.thy @@ -0,0 +1,317 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory EmptyFail_H +imports Refine +begin + +crunch_ignore (empty_fail) + (add: handleE' getCTE getObject updateObject + CSpaceDecls_H.resolveAddressBits + doMachineOp suspend restart schedule) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas forM_empty_fail[intro!, wp, simp] = empty_fail_mapM[simplified forM_def[symmetric]] +lemmas forM_x_empty_fail[intro!, wp, simp] = empty_fail_mapM_x[simplified forM_x_def[symmetric]] +lemmas forME_x_empty_fail[intro!, wp, simp] = empty_fail_mapME_x[simplified forME_x_def[symmetric]] + +lemma withoutPreemption_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (withoutPreemption m)" + by simp + +lemma withoutFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (withoutFailure m)" + by simp + +lemma catchFailure_empty_fail[intro!, wp, simp]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchFailure f g)" + by (simp add: empty_fail_catch) + +lemma emptyOnFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (emptyOnFailure m)" + by (simp add: emptyOnFailure_def empty_fail_catch) + +lemma rethrowFailure_empty_fail [intro!, wp, simp]: + "empty_fail m \ empty_fail (rethrowFailure f m)" + by (wpsimp simp:rethrowFailure_def o_def) + +lemma unifyFailure_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (unifyFailure f)" + by (simp add: unifyFailure_def) + +lemma lookupErrorOnFailure_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (lookupErrorOnFailure isSource f)" + by (simp add: lookupErrorOnFailure_def) + +lemma setObject_empty_fail [intro!, wp, simp]: + assumes x: "(\a b c. empty_fail (updateObject v a x b c))" + shows "empty_fail (setObject x v)" + by (wpsimp simp: setObject_def split_def wp: x) + +lemma asUser_empty_fail [intro!, wp, simp]: + "empty_fail f \ empty_fail (asUser t f)" + unfolding asUser_def + by (wpsimp | simp add: empty_fail_def)+ + +lemma capFaultOnFailure_empty_fail [intro!, wp, simp]: + "empty_fail m \ empty_fail (capFaultOnFailure cptr rp m)" + by (simp add: capFaultOnFailure_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: locateSlotCap + +lemma resolveAddressBits_spec_empty_fail: + notes spec_empty_fail_bindE'[wp_split] + shows + "spec_empty_fail (CSpace_H.resolveAddressBits a b c) s" +proof (induct arbitrary: s rule: resolveAddressBits.induct) + case (1 a b c s) + show ?case + apply (simp add: resolveAddressBits.simps) + apply (wp | simp | wpc | intro impI conjI | rule drop_spec_empty_fail)+ + apply (rule use_spec_empty_fail) + apply (rule 1 | simp add: in_monad | rule drop_spec_empty_fail | force)+ + done + qed + +lemmas resolveAddressBits_empty_fail[intro!, wp, simp] = + resolveAddressBits_spec_empty_fail[THEN use_spec_empty_fail] + +declare ef_dmo'[intro!, wp, simp] + +lemma empty_fail_getObject_ep [intro!, wp, simp]: + "empty_fail (getObject p :: endpoint kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_tcb [intro!, wp, simp]: + shows "empty_fail (getObject x :: tcb kernel)" + by (auto intro: empty_fail_getObject) + +lemma getEndpoint_empty_fail [intro!, wp, simp]: + "empty_fail (getEndpoint ep)" + by (simp add: getEndpoint_def) + +lemma constOnFailure_empty_fail[intro!, wp, simp]: + "empty_fail m \ empty_fail (constOnFailure x m)" + by (simp add: constOnFailure_def const_def empty_fail_catch) + +lemma ArchRetypeDecls_H_deriveCap_empty_fail[intro!, wp, simp]: + "isPageTableCap y \ isFrameCap y \ isASIDControlCap y \ isASIDPoolCap y \ isVCPUCap y + \ empty_fail (Arch.deriveCap x y)" + apply (simp add: AARCH64_H.deriveCap_def) + by (auto simp: isCap_simps) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: ensureNoChildren + +lemma deriveCap_empty_fail[intro!, wp, simp]: + "empty_fail (RetypeDecls_H.deriveCap slot y)" + apply (simp add: Retype_H.deriveCap_def) + apply (clarsimp simp: empty_fail_bindE) + apply (case_tac "capCap y") + apply (simp_all add: isCap_simps) + done + +crunch (empty_fail) empty_fail[intro!, wp, simp]: setExtraBadge, cteInsert + +lemma transferCapsToSlots_empty_fail[intro!, wp, simp]: + "empty_fail (transferCapsToSlots ep buffer n caps slots mi)" + apply (induct caps arbitrary: slots n mi) + apply simp + apply (simp add: Let_def split_def + split del: if_split) + apply (simp | wp | wpc | safe)+ + done + +crunch (empty_fail) empty_fail[intro!, wp, simp]: lookupTargetSlot, ensureEmptySlot, lookupSourceSlot, lookupPivotSlot + +lemma decodeCNodeInvocation_empty_fail[intro!, wp, simp]: + "empty_fail (decodeCNodeInvocation label args cap exs)" + apply (rule_tac label=label and args=args and exs=exs in decode_cnode_cases2) + apply (simp_all add: decodeCNodeInvocation_def + split_def cnode_invok_case_cleanup unlessE_whenE + cong: if_cong bool.case_cong list.case_cong) + by (simp | wp | wpc | safe)+ + +lemma empty_fail_getObject_ap [intro!, wp, simp]: + "empty_fail (getObject p :: asidpool kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_pte [intro!, wp, simp]: + "empty_fail (getObject p :: pte kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_getObject_vcpu [intro!, wp, simp]: + "empty_fail (getObject p :: vcpu kernel)" + by (simp add: empty_fail_getObject) + +lemma empty_fail_lookupPTSlotFromLevel[intro!, wp, simp]: + "empty_fail (lookupPTSlotFromLevel level pt vPtr)" +proof (induct level arbitrary: pt) + case 0 + then show ?case by (subst lookupPTSlotFromLevel.simps, simp) +next + case (Suc level) + then show ?case + by (subst lookupPTSlotFromLevel.simps) (wpsimp simp: checkPTAt_def pteAtIndex_def) +qed + +(* FIXME AARCH64 this and empty_fail_pt_type_exhausted are needed to effectively crunch decodeARMMMUInvocation, + so should be moved much higher and then deployed to other crunches of decodeARMMMUInvocation, + which are hand-held at present *) +lemma empty_fail_arch_cap_exhausted: + "\\ isFrameCap cap; \ isPageTableCap cap; \ isASIDControlCap cap; \ isASIDPoolCap cap; + \ isVCPUCap cap\ + \ empty_fail undefined" + by (cases cap; simp add: isCap_simps) + +(* FIXME AARCH64 move somewhere high up, see empty_fail_arch_cap_exhausted *) +lemma empty_fail_pt_type_exhausted: + "\ pt_t \ NormalPT_T; pt_t \ VSRootPT_T \ + \ False" + by (case_tac pt_t; simp) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: decodeARMMMUInvocation + (simp: Let_def pteAtIndex_def + wp: empty_fail_catch empty_fail_pt_type_exhausted empty_fail_arch_cap_exhausted) + +lemma ignoreFailure_empty_fail[intro!, wp, simp]: + "empty_fail x \ empty_fail (ignoreFailure x)" + by (simp add: ignoreFailure_def empty_fail_catch) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: cancelIPC, setThreadState, tcbSchedDequeue, setupReplyMaster, isStopped, possibleSwitchTo, tcbSchedAppend +(simp: Let_def setNotification_def setBoundNotification_def) + +crunch (empty_fail) "_H_empty_fail"[intro!, wp, simp]: "ThreadDecls_H.suspend" + (ignore_del: ThreadDecls_H.suspend) + +lemma ThreadDecls_H_restart_empty_fail[intro!, wp, simp]: + "empty_fail (ThreadDecls_H.restart target)" + by (fastforce simp: restart_def) + +lemma vcpuUpdate_empty_fail[intro!, wp, simp]: + "empty_fail (vcpuUpdate p f)" + by (fastforce simp: vcpuUpdate_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: vcpuEnable, vcpuRestore + (simp: uncurry_def) + +lemma empty_fail_lookupPTFromLevel[intro!, wp, simp]: + "empty_fail (lookupPTFromLevel level ptPtr vPtr target)" + by (induct level arbitrary: ptPtr; subst lookupPTFromLevel.simps; simp; wpsimp) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: finaliseCap, preemptionPoint, capSwapForDelete +(wp: empty_fail_catch simp: Let_def ignore: lookupPTFromLevel) + +lemmas finalise_spec_empty_fail_induct = finaliseSlot'.induct[where P= + "\sl exp s. spec_empty_fail (finaliseSlot' sl exp) s"] + +lemma spec_empty_fail_If: + "\ P \ spec_empty_fail f s; \ P \ spec_empty_fail g s \ + \ spec_empty_fail (if P then f else g) s" + by (simp split: if_split) + +lemma spec_empty_whenE': + "\ P \ spec_empty_fail f s \ \ spec_empty_fail (whenE P f) s" + by (simp add: whenE_def spec_empty_returnOk) + +lemma finaliseSlot_spec_empty_fail: + notes spec_empty_fail_bindE'[rotated, wp_split] + shows "spec_empty_fail (finaliseSlot x b) s" +unfolding finaliseSlot_def +proof (induct rule: finalise_spec_empty_fail_induct) + case (1 x b s) + show ?case + apply (subst finaliseSlot'_simps_ext) + apply (simp only: split_def Let_def K_bind_def fun_app_def) + apply (wp spec_empty_whenE' spec_empty_fail_If | wpc + | rule 1[unfolded Let_def K_bind_def split_def fun_app_def, + simplified], (simp | intro conjI)+ + | rule drop_spec_empty_fail | simp)+ + done +qed + +lemmas finaliseSlot_empty_fail[intro!, wp, simp] = + finaliseSlot_spec_empty_fail[THEN use_spec_empty_fail] + +lemma checkCapAt_empty_fail[intro!, wp, simp]: + "empty_fail action \ empty_fail (checkCapAt cap ptr action)" + by (fastforce simp: checkCapAt_def) + +lemma assertDerived_empty_fail[intro!, wp, simp]: + "empty_fail f \ empty_fail (assertDerived src cap f)" + by (fastforce simp: assertDerived_def) + +crunch (empty_fail) empty_fail[intro!, wp, simp]: cteDelete + +lemma spec_empty_fail_unlessE': + "\ \ P \ spec_empty_fail f s \ \ spec_empty_fail (unlessE P f) s" + by (simp add:unlessE_def spec_empty_returnOk) + +lemma cteRevoke_spec_empty_fail: + notes spec_empty_fail_bindE'[wp_split] + shows "spec_empty_fail (cteRevoke p) s" +proof (induct rule: cteRevoke.induct) + case (1 p s) + show ?case + apply (simp add: cteRevoke.simps) + apply (wp spec_empty_whenE' spec_empty_fail_unlessE' | rule drop_spec_empty_fail, wp)+ + apply (rule 1, auto simp add: in_monad) + done +qed + +lemmas cteRevoke_empty_fail[intro!, wp, simp] = + cteRevoke_spec_empty_fail[THEN use_spec_empty_fail] + +lemma Syscall_H_syscall_empty_fail[intro!, wp, simp]: + "\empty_fail a; \x. empty_fail (b x); \x. empty_fail (c x); + \x. empty_fail (d x); \x. empty_fail (e x)\ + \ empty_fail (syscall a b c d e)" + apply (simp add:syscall_def) + apply (wp | wpc | simp)+ + done + +lemma catchError_empty_fail[intro!, wp, simp]: + "\ empty_fail f; \x. empty_fail (g x) \ \ empty_fail (catchError f g)" + by fastforce + +crunch (empty_fail) empty_fail[intro!, wp, simp]: + chooseThread, getDomainTime, nextDomain, isHighestPrio + (wp: empty_fail_catch) + +lemma ThreadDecls_H_schedule_empty_fail[intro!, wp, simp]: + "empty_fail schedule" + apply (simp add: schedule_def) + apply (clarsimp simp: scheduleChooseNewThread_def split: if_split | wp | wpc)+ + done + +crunch (empty_fail) empty_fail[wp, simp]: setMRs, setMessageInfo +(wp: empty_fail_catch simp: const_def Let_def) + +crunch (empty_fail) empty_fail: decodeVCPUInjectIRQ, decodeVCPUWriteReg, decodeVCPUReadReg, doFlush, + decodeVCPUAckVPPI + (simp: Let_def) + +crunch (empty_fail) empty_fail: callKernel + (wp: empty_fail_catch) + +theorem call_kernel_serial: + "\ (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and + (\s. scheduler_action s = resume_cur_thread) and + (\s. 0 < domain_time s \ valid_domain_list s)) s; + \s'. (s, s') \ state_relation \ + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) s' \ + \ fst (call_kernel event s) \ {}" + apply (cut_tac m = "call_kernel event" in corres_underlying_serial) + apply (rule kernel_corres) + apply (rule callKernel_empty_fail) + apply auto + done + +end + +end diff --git a/proof/refine/AARCH64/Finalise_R.thy b/proof/refine/AARCH64/Finalise_R.thy new file mode 100644 index 0000000000..363adae063 --- /dev/null +++ b/proof/refine/AARCH64/Finalise_R.thy @@ -0,0 +1,4451 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Finalise_R +imports + IpcCancel_R + InterruptAcc_R + Retype_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare doUnbindNotification_def[simp] + +text \Properties about empty_slot/emptySlot\ + +lemma case_Null_If: + "(case c of NullCap \ a | _ \ b) = (if c = NullCap then a else b)" + by (case_tac c, simp_all) + +crunches emptySlot + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + (simp: case_Null_If) + +lemma updateCap_cte_wp_at_cases: + "\\s. (ptr = ptr' \ cte_wp_at' (P \ cteCap_update (K cap)) ptr' s) \ (ptr \ ptr' \ cte_wp_at' P ptr' s)\ + updateCap ptr cap + \\rv. cte_wp_at' P ptr'\" + apply (clarsimp simp: valid_def) + apply (drule updateCap_stuff) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def) + done + +crunches postCapDeletion, updateTrackedFreeIndex + for cte_wp_at'[wp]: "cte_wp_at' P p" + +lemma updateFreeIndex_cte_wp_at: + "\\s. cte_at' p s \ P (cte_wp_at' (if p = p' then P' + o (cteCap_update (capFreeIndex_update (K idx))) else P') p' s)\ + updateFreeIndex p idx + \\rv s. P (cte_wp_at' P' p' s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def + split del: if_split) + apply (rule hoare_pre) + apply (wp updateCap_cte_wp_at' getSlotCap_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (cases "p' = p", simp_all) + apply (case_tac cte, simp) + done + +lemma emptySlot_cte_wp_cap_other: + "\(\s. cte_wp_at' (\c. P (cteCap c)) p s) and K (p \ p')\ + emptySlot p' opt + \\rv s. cte_wp_at' (\c. P (cteCap c)) p s\" + apply (rule hoare_gen_asm) + apply (simp add: emptySlot_def clearUntypedFreeIndex_def getSlotCap_def) + apply (rule hoare_pre) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + updateFreeIndex_cte_wp_at getCTE_wp' hoare_vcg_all_lift + | simp add: | wpc + | wp (once) hoare_drop_imps)+ + done + +lemmas clearUntypedFreeIndex_typ_ats[wp] = typ_at_lifts[OF clearUntypedFreeIndex_typ_at'] + +crunch tcb_at'[wp]: postCapDeletion "tcb_at' t" +crunch ct[wp]: emptySlot "\s. P (ksCurThread s)" +crunch cur_tcb'[wp]: clearUntypedFreeIndex "cur_tcb'" + (wp: cur_tcb_lift) + +crunch ksRQ[wp]: emptySlot "\s. P (ksReadyQueues s)" +crunch ksRQL1[wp]: emptySlot "\s. P (ksReadyQueuesL1Bitmap s)" +crunch ksRQL2[wp]: emptySlot "\s. P (ksReadyQueuesL2Bitmap s)" +crunch obj_at'[wp]: postCapDeletion "obj_at' P p" + +lemmas postCapDeletion_valid_queues[wp] = + valid_queues_lift [OF postCapDeletion_obj_at' + postCapDeletion_pred_tcb_at' + postCapDeletion_ksRQ] + +crunch inQ[wp]: clearUntypedFreeIndex "\s. P (obj_at' (inQ d p) t s)" +crunch tcbDomain[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbDomain tcb)) t" +crunch tcbPriority[wp]: clearUntypedFreeIndex "obj_at' (\tcb. P (tcbPriority tcb)) t" + +lemma emptySlot_queues [wp]: + "\Invariants_H.valid_queues\ emptySlot sl opt \\rv. Invariants_H.valid_queues\" + unfolding emptySlot_def + by (wp | wpcw | wp valid_queues_lift | simp)+ + +crunch nosch[wp]: emptySlot "\s. P (ksSchedulerAction s)" +crunch ksCurDomain[wp]: emptySlot "\s. P (ksCurDomain s)" + +lemma emptySlot_sch_act_wf [wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + emptySlot sl opt + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp sch_act_wf_lift tcb_in_cur_domain'_lift | wpcw | simp)+ + done + +lemma updateCap_valid_objs' [wp]: + "\valid_objs' and valid_cap' cap\ + updateCap ptr cap \\r. valid_objs'\" + unfolding updateCap_def + by (wp setCTE_valid_objs getCTE_wp) (clarsimp dest!: cte_at_cte_wp_atD) + +lemma updateFreeIndex_valid_objs' [wp]: + "\valid_objs'\ clearUntypedFreeIndex ptr \\r. valid_objs'\" + apply (simp add: clearUntypedFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp' | wpc | simp add: updateTrackedFreeIndex_def)+ + done + +crunch valid_objs'[wp]: emptySlot "valid_objs'" + +crunch state_refs_of'[wp]: setInterruptState "\s. P (state_refs_of' s)" + (simp: state_refs_of'_pspaceI) +crunch state_refs_of'[wp]: emptySlot "\s. P (state_refs_of' s)" + (wp: crunch_wps) +crunch state_hyp_refs_of'[wp]: setInterruptState "\s. P (state_hyp_refs_of' s)" + (simp: state_hyp_refs_of'_pspaceI) +crunch state_hyp_refs_of'[wp]: emptySlot "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) + +lemma mdb_chunked2D: + "\ mdb_chunked m; m \ p \ p'; m \ p' \ p''; + m p = Some (CTE cap nd); m p'' = Some (CTE cap'' nd''); + sameRegionAs cap cap''; p \ p'' \ + \ \cap' nd'. m p' = Some (CTE cap' nd') \ sameRegionAs cap cap'" + apply (subgoal_tac "\cap' nd'. m p' = Some (CTE cap' nd')") + apply (clarsimp simp add: mdb_chunked_def) + apply (drule spec[where x=p]) + apply (drule spec[where x=p'']) + apply clarsimp + apply (drule mp, erule trancl_into_trancl2) + apply (erule trancl.intros(1)) + apply (simp add: is_chunk_def) + apply (drule spec, drule mp, erule trancl.intros(1)) + apply (drule mp, rule trancl_into_rtrancl) + apply (erule trancl.intros(1)) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold) + apply (case_tac z, simp) + done + +lemma nullPointer_eq_0_simp[simp]: + "(nullPointer = 0) = True" + "(0 = nullPointer) = True" + by (simp add: nullPointer_def)+ + +lemma no_0_no_0_lhs_trancl [simp]: + "no_0 m \ \ m \ 0 \\<^sup>+ x" + by (rule, drule tranclD, clarsimp simp: next_unfold') + +lemma no_0_no_0_lhs_rtrancl [simp]: + "\ no_0 m; x \ 0 \ \ \ m \ 0 \\<^sup>* x" + by (clarsimp dest!: rtranclD) + +end +locale mdb_empty = + mdb_ptr?: mdb_ptr m _ _ slot s_cap s_node + for m slot s_cap s_node + + + fixes n + defines "n \ + modify_map + (modify_map + (modify_map + (modify_map m (mdbPrev s_node) + (cteMDBNode_update (mdbNext_update (%_. (mdbNext s_node))))) + (mdbNext s_node) + (cteMDBNode_update + (\mdb. mdbFirstBadged_update (%_. (mdbFirstBadged mdb \ mdbFirstBadged s_node)) + (mdbPrev_update (%_. (mdbPrev s_node)) mdb)))) + slot (cteCap_update (%_. capability.NullCap))) + slot (cteMDBNode_update (const nullMDBNode))" +begin +interpretation Arch . (*FIXME: arch_split*) + +lemmas m_slot_prev = m_p_prev +lemmas m_slot_next = m_p_next +lemmas prev_slot_next = prev_p_next +lemmas next_slot_prev = next_p_prev + +lemma n_revokable: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then \ mdbRevocable node + else mdbRevocable node = mdbRevocable node'))" + by (auto simp add: n_def modify_map_if nullMDBNode_def split: if_split_asm) + +lemma m_revokable: + "m p = Some (CTE cap node) \ + (\cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then \ mdbRevocable node' + else mdbRevocable node' = mdbRevocable node))" + apply (clarsimp simp add: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply (cases "p=slot", simp) + apply (cases "p=mdbNext s_node", simp) + apply (cases "p=mdbPrev s_node", simp) + apply clarsimp + apply simp + apply (cases "p=mdbPrev s_node", simp) + apply simp + done + +lemma no_0_n: + "no_0 n" + using no_0 by (simp add: n_def) + +lemma n_next: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then mdbNext node = 0 + else if p = mdbPrev s_node + then mdbNext node = mdbNext s_node + else mdbNext node = mdbNext node'))" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbPrev s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemma n_prev: + "n p = Some (CTE cap node) \ + (\cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then mdbPrev node = 0 + else if p = mdbNext s_node + then mdbPrev node = mdbPrev s_node + else mdbPrev node = mdbPrev node'))" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemma n_cap: + "n p = Some (CTE cap node) \ + \cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then cap = NullCap + else cap' = cap)" + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases node) + apply auto + done + +lemma m_cap: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then cap' = NullCap + else cap' = cap)" + apply (clarsimp simp: n_def modify_map_cases initMDBNode_def) + apply (cases node) + apply clarsimp + apply (cases "p=slot", simp) + apply clarsimp + apply (cases "mdbNext s_node = p", simp) + apply fastforce + apply simp + apply (cases "mdbPrev s_node = p", simp) + apply fastforce + done + +lemma n_badged: + "n p = Some (CTE cap node) \ + \cap' node'. m p = Some (CTE cap' node') \ + (if p = slot + then \ mdbFirstBadged node + else if p = mdbNext s_node + then mdbFirstBadged node = (mdbFirstBadged node' \ mdbFirstBadged s_node) + else mdbFirstBadged node = mdbFirstBadged node')" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (auto simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + done + +lemma m_badged: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then \ mdbFirstBadged node' + else if p = mdbNext s_node + then mdbFirstBadged node' = (mdbFirstBadged node \ mdbFirstBadged s_node) + else mdbFirstBadged node' = mdbFirstBadged node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply (insert no_0_n)[1] + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply (cases "p = mdbNext s_node") + apply (clarsimp simp: n_def modify_map_if nullMDBNode_def split: if_split_asm) + apply clarsimp + apply (cases "p = mdbPrev s_node") + apply (auto simp: n_def modify_map_if initMDBNode_def split: if_split_asm) + done + +lemmas slot = m_p + +lemma m_next: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then mdbNext node' = 0 + else if p = mdbPrev s_node + then mdbNext node' = mdbNext s_node + else mdbNext node' = mdbNext node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if) + apply (cases "p = mdbPrev s_node") + apply (simp add: n_def modify_map_if) + apply simp + apply (simp add: n_def modify_map_if) + apply (cases "mdbNext s_node = p") + apply fastforce + apply fastforce + done + +lemma m_prev: + "m p = Some (CTE cap node) \ + \cap' node'. n p = Some (CTE cap' node') \ + (if p = slot + then mdbPrev node' = 0 + else if p = mdbNext s_node + then mdbPrev node' = mdbPrev s_node + else mdbPrev node' = mdbPrev node)" + apply (subgoal_tac "p \ 0") + prefer 2 + apply clarsimp + apply (cases "p = slot") + apply (clarsimp simp: n_def modify_map_if) + apply (cases "p = mdbPrev s_node") + apply (simp add: n_def modify_map_if) + apply simp + apply (simp add: n_def modify_map_if) + apply (cases "mdbNext s_node = p") + apply fastforce + apply fastforce + done + +lemma n_nextD: + "n \ p \ p' \ + if p = slot then p' = 0 + else if p = mdbPrev s_node + then m \ p \ slot \ p' = mdbNext s_node + else m \ p \ p'" + apply (clarsimp simp: mdb_next_unfold split del: if_split cong: if_cong) + apply (case_tac z) + apply (clarsimp split del: if_split) + apply (drule n_next) + apply (elim exE conjE) + apply (simp split: if_split_asm) + apply (frule dlist_prevD [OF m_slot_prev]) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma n_next_eq: + "n \ p \ p' = + (if p = slot then p' = 0 + else if p = mdbPrev s_node + then m \ p \ slot \ p' = mdbNext s_node + else m \ p \ p')" + apply (rule iffI) + apply (erule n_nextD) + apply (clarsimp simp: mdb_next_unfold split: if_split_asm) + apply (simp add: n_def modify_map_if slot) + apply hypsubst_thin + apply (case_tac z) + apply simp + apply (drule m_next) + apply clarsimp + apply (case_tac z) + apply simp + apply (drule m_next) + apply clarsimp + done + +lemma n_prev_eq: + "n \ p \ p' = + (if p' = slot then p = 0 + else if p' = mdbNext s_node + then m \ slot \ p' \ p = mdbPrev s_node + else m \ p \ p')" + apply (rule iffI) + apply (clarsimp simp: mdb_prev_def split del: if_split cong: if_cong) + apply (case_tac z) + apply (clarsimp split del: if_split) + apply (drule n_prev) + apply (elim exE conjE) + apply (simp split: if_split_asm) + apply (frule dlist_nextD [OF m_slot_next]) + apply (clarsimp simp: mdb_prev_def) + apply (clarsimp simp: mdb_prev_def split: if_split_asm) + apply (simp add: n_def modify_map_if slot) + apply hypsubst_thin + apply (case_tac z) + apply clarsimp + apply (drule m_prev) + apply clarsimp + apply (case_tac z) + apply simp + apply (drule m_prev) + apply clarsimp + done + +lemma valid_dlist_n: + "valid_dlist n" using dlist + apply (clarsimp simp: valid_dlist_def2 [OF no_0_n]) + apply (simp add: n_next_eq n_prev_eq m_slot_next m_slot_prev cong: if_cong) + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp simp: next_slot_prev prev_slot_next) + apply (fastforce dest!: dlist_prev_src_unique) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + apply (case_tac "mdbNext s_node = 0") + apply simp + apply (subgoal_tac "m \ slot \ c'") + prefer 2 + apply fastforce + apply (clarsimp simp: mdb_next_unfold slot) + apply (frule next_slot_prev) + apply (drule (1) dlist_prev_src_unique, simp) + apply simp + apply clarsimp + apply (rule conjI, clarsimp) + apply (fastforce dest: dlist_next_src_unique) + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + apply (clarsimp simp: mdb_prev_def slot) + apply (clarsimp simp: valid_dlist_def2 [OF no_0]) + done + +lemma caps_contained_n: + "caps_contained' n" + using valid + apply (clarsimp simp: valid_mdb_ctes_def caps_contained'_def) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (erule disjE, clarsimp) + apply clarsimp + apply fastforce + done + +lemma chunked: + "mdb_chunked m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma valid_badges: + "valid_badges m" + using valid .. + +lemma valid_badges_n: + "valid_badges n" +proof - + from valid_badges + show ?thesis + apply (simp add: valid_badges_def2) + apply clarsimp + apply (drule_tac p=p in n_cap) + apply (frule n_cap) + apply (drule n_badged) + apply (clarsimp simp: n_next_eq) + apply (case_tac "p=slot", simp) + apply clarsimp + apply (case_tac "p'=slot", simp) + apply clarsimp + apply (case_tac "p = mdbPrev s_node") + apply clarsimp + apply (insert slot)[1] + (* using mdb_chunked to show cap in between is same as on either side *) + apply (subgoal_tac "capMasterCap s_cap = capMasterCap cap'") + prefer 2 + apply (thin_tac "\p. P p" for P) + apply (drule mdb_chunked2D[OF chunked]) + apply (fastforce simp: mdb_next_unfold) + apply assumption+ + apply (simp add: sameRegionAs_def3) + apply (intro disjI1) + apply (fastforce simp:isCap_simps capMasterCap_def split:capability.splits) + apply clarsimp + apply clarsimp + apply (erule sameRegionAsE, auto simp: isCap_simps capMasterCap_def split:capability.splits)[1] + (* instantiating known valid_badges on both sides to transitively + give the link we need *) + apply (frule_tac x="mdbPrev s_node" in spec) + apply simp + apply (drule spec, drule spec, drule spec, + drule(1) mp, drule(1) mp) + apply simp + apply (drule_tac x=slot in spec) + apply (drule_tac x="mdbNext s_node" in spec) + apply simp + apply (drule mp, simp(no_asm) add: mdb_next_unfold) + apply simp + apply (cases "capBadge s_cap", simp_all)[1] + apply clarsimp + apply (case_tac "p' = mdbNext s_node") + apply clarsimp + apply (frule vdlist_next_src_unique[where y=slot]) + apply (simp add: mdb_next_unfold slot) + apply clarsimp + apply (rule dlist) + apply clarsimp + apply clarsimp + apply fastforce + done +qed + +lemma to_slot_eq [simp]: + "m \ p \ slot = (p = mdbPrev s_node \ p \ 0)" + apply (rule iffI) + apply (frule dlist_nextD0, simp) + apply (clarsimp simp: mdb_prev_def slot mdb_next_unfold) + apply (clarsimp intro!: prev_slot_next) + done + +lemma n_parent_of: + "\ n \ p parentOf p'; p \ slot; p' \ slot \ \ m \ p parentOf p'" + apply (clarsimp simp: parentOf_def) + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (frule_tac p=p in n_cap) + apply (frule_tac p=p in n_badged) + apply (drule_tac p=p in n_revokable) + apply (clarsimp) + apply (frule_tac p=p' in n_cap) + apply (frule_tac p=p' in n_badged) + apply (drule_tac p=p' in n_revokable) + apply (clarsimp split: if_split_asm; + clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma m_parent_of: + "\ m \ p parentOf p'; p \ slot; p' \ slot; p\p'; p'\mdbNext s_node \ \ n \ p parentOf p'" + apply (clarsimp simp add: parentOf_def) + apply (case_tac cte, case_tac cte') + apply clarsimp + apply (frule_tac p=p in m_cap) + apply (frule_tac p=p in m_badged) + apply (drule_tac p=p in m_revokable) + apply clarsimp + apply (frule_tac p=p' in m_cap) + apply (frule_tac p=p' in m_badged) + apply (drule_tac p=p' in m_revokable) + apply clarsimp + apply (simp split: if_split_asm; + clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma m_parent_of_next: + "\ m \ p parentOf mdbNext s_node; m \ p parentOf slot; p \ slot; p\mdbNext s_node \ + \ n \ p parentOf mdbNext s_node" + using slot + apply (clarsimp simp add: parentOf_def) + apply (case_tac cte'a, case_tac cte) + apply clarsimp + apply (frule_tac p=p in m_cap) + apply (frule_tac p=p in m_badged) + apply (drule_tac p=p in m_revokable) + apply (frule_tac p="mdbNext s_node" in m_cap) + apply (frule_tac p="mdbNext s_node" in m_badged) + apply (drule_tac p="mdbNext s_node" in m_revokable) + apply (frule_tac p="slot" in m_cap) + apply (frule_tac p="slot" in m_badged) + apply (drule_tac p="slot" in m_revokable) + apply (clarsimp simp: isMDBParentOf_def isCap_simps split: if_split_asm cong: if_cong) + done + +lemma parency_n: + assumes "n \ p \ p'" + shows "m \ p \ p' \ p \ slot \ p' \ slot" +using assms +proof induct + case (direct_parent c') + moreover + hence "p \ slot" + by (clarsimp simp: n_next_eq) + moreover + from direct_parent + have "c' \ slot" + by (clarsimp simp add: n_next_eq split: if_split_asm) + ultimately + show ?case + apply simp + apply (simp add: n_next_eq split: if_split_asm) + prefer 2 + apply (erule (1) subtree.direct_parent) + apply (erule (2) n_parent_of) + apply clarsimp + apply (frule n_parent_of, simp, simp) + apply (rule subtree.trans_parent[OF _ m_slot_next], simp_all) + apply (rule subtree.direct_parent) + apply (erule prev_slot_next) + apply simp + apply (clarsimp simp: parentOf_def slot) + apply (case_tac cte'a) + apply (case_tac ctea) + apply clarsimp + apply (frule(2) mdb_chunked2D [OF chunked prev_slot_next m_slot_next]) + apply (clarsimp simp: isMDBParentOf_CTE) + apply simp + apply (simp add: slot) + apply (clarsimp simp add: isMDBParentOf_CTE) + apply (insert valid_badges) + apply (simp add: valid_badges_def2) + apply (drule spec[where x=slot]) + apply (drule spec[where x="mdbNext s_node"]) + apply (simp add: slot m_slot_next) + apply (insert valid_badges) + apply (simp add: valid_badges_def2) + apply (drule spec[where x="mdbPrev s_node"]) + apply (drule spec[where x=slot]) + apply (simp add: slot prev_slot_next) + apply (case_tac cte, case_tac cte') + apply (rename_tac cap'' node'') + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule n_cap, drule n_badged) + apply (frule n_cap, drule n_badged) + apply clarsimp + apply (case_tac cap'', simp_all add: isCap_simps)[1] + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + done +next + case (trans_parent c c') + moreover + hence "p \ slot" + by (clarsimp simp: n_next_eq) + moreover + from trans_parent + have "c' \ slot" + by (clarsimp simp add: n_next_eq split: if_split_asm) + ultimately + show ?case + apply clarsimp + apply (simp add: n_next_eq split: if_split_asm) + prefer 2 + apply (erule (2) subtree.trans_parent) + apply (erule n_parent_of, simp, simp) + apply clarsimp + apply (rule subtree.trans_parent) + apply (rule subtree.trans_parent, assumption) + apply (rule prev_slot_next) + apply clarsimp + apply clarsimp + apply (frule n_parent_of, simp, simp) + apply (clarsimp simp: parentOf_def slot) + apply (case_tac cte'a) + apply (rename_tac cap node) + apply (case_tac ctea) + apply clarsimp + apply (subgoal_tac "sameRegionAs cap s_cap") + prefer 2 + apply (insert chunked)[1] + apply (simp add: mdb_chunked_def) + apply (erule_tac x="p" in allE) + apply (erule_tac x="mdbNext s_node" in allE) + apply simp + apply (drule isMDBParent_sameRegion)+ + apply clarsimp + apply (subgoal_tac "m \ p \\<^sup>+ slot") + prefer 2 + apply (rule trancl_trans) + apply (erule subtree_mdb_next) + apply (rule r_into_trancl) + apply (rule prev_slot_next) + apply clarsimp + apply (subgoal_tac "m \ p \\<^sup>+ mdbNext s_node") + prefer 2 + apply (erule trancl_trans) + apply fastforce + apply simp + apply (erule impE) + apply clarsimp + apply clarsimp + apply (thin_tac "s \ t" for s t) + apply (simp add: is_chunk_def) + apply (erule_tac x=slot in allE) + apply (erule impE, fastforce) + apply (erule impE, fastforce) + apply (clarsimp simp: slot) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (insert valid_badges, simp add: valid_badges_def2) + apply (drule spec[where x=slot], drule spec[where x="mdbNext s_node"]) + apply (simp add: slot m_slot_next) + apply (case_tac cte, case_tac cte') + apply (rename_tac cap'' node'') + apply (clarsimp simp: isMDBParentOf_CTE) + apply (frule n_cap, drule n_badged) + apply (frule n_cap, drule n_badged) + apply (clarsimp split: if_split_asm) + apply (drule subtree_mdb_next) + apply (drule no_loops_tranclE[OF no_loops]) + apply (erule notE, rule trancl_into_rtrancl) + apply (rule trancl.intros(2)[OF _ m_slot_next]) + apply (rule trancl.intros(1), rule prev_slot_next) + apply simp + apply (case_tac cap'', simp_all add: isCap_simps)[1] + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (rule m_slot_next) + apply simp + apply (erule n_parent_of, simp, simp) + done +qed + +lemma parency_m: + assumes "m \ p \ p'" + shows "p \ slot \ (if p' \ slot then n \ p \ p' else m \ p \ mdbNext s_node \ n \ p \ mdbNext s_node)" +using assms +proof induct + case (direct_parent c) + thus ?case + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (subgoal_tac "mdbPrev s_node \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (erule m_parent_of, simp, simp) + apply clarsimp + apply clarsimp + apply (drule dlist_next_src_unique) + apply fastforce + apply clarsimp + apply simp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_next_eq) + apply (drule subtree_parent) + apply (clarsimp simp: parentOf_def) + apply (drule subtree_parent) + apply (erule (1) m_parent_of_next) + apply clarsimp + apply clarsimp + done +next + case (trans_parent c c') + thus ?case + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (cases "c=slot") + apply simp + apply (erule impE) + apply (erule subtree.trans_parent) + apply fastforce + apply (clarsimp simp: slot mdb_next_unfold) + apply (clarsimp simp: slot mdb_next_unfold) + apply (clarsimp simp: slot mdb_next_unfold) + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (subgoal_tac "mdbPrev s_node \ 0") + prefer 2 + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + apply (erule m_parent_of, simp, simp) + apply clarsimp + apply (drule subtree_mdb_next) + apply (drule trancl_trans) + apply (erule r_into_trancl) + apply simp + apply clarsimp + apply (drule dlist_next_src_unique) + apply fastforce + apply clarsimp + apply simp + apply clarsimp + apply (erule subtree.trans_parent) + apply (simp add: n_next_eq) + apply clarsimp + apply (rule m_parent_of_next, erule subtree_parent, assumption, assumption) + apply clarsimp + done +qed + +lemma parency: + "n \ p \ p' = (p \ slot \ p' \ slot \ m \ p \ p')" + by (auto dest!: parency_n parency_m) + +lemma descendants: + "descendants_of' p n = + (if p = slot then {} else descendants_of' p m - {slot})" + by (auto simp add: parency descendants_of'_def) + +lemma n_tranclD: + "n \ p \\<^sup>+ p' \ m \ p \\<^sup>+ p' \ p' \ slot" + apply (erule trancl_induct) + apply (clarsimp simp add: n_next_eq split: if_split_asm) + apply (rule mdb_chain_0D) + apply (rule chain) + apply (clarsimp simp: slot) + apply (blast intro: trancl_trans prev_slot_next) + apply fastforce + apply (clarsimp simp: n_next_eq split: if_split_asm) + apply (erule trancl_trans) + apply (blast intro: trancl_trans prev_slot_next) + apply (fastforce intro: trancl_trans) + done + +lemma m_tranclD: + "m \ p \\<^sup>+ p' \ + if p = slot then n \ mdbNext s_node \\<^sup>* p' + else if p' = slot then n \ p \\<^sup>+ mdbNext s_node + else n \ p \\<^sup>+ p'" + using no_0_n + apply - + apply (erule trancl_induct) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule r_into_trancl) + apply (clarsimp simp: n_next_eq) + apply clarsimp + apply (rule conjI) + apply (insert m_slot_next)[1] + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply (rule r_into_trancl) + apply (clarsimp simp: n_next_eq) + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule prev_slot_next) + apply (drule trancl_trans, erule r_into_trancl) + apply simp + apply clarsimp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n_next_eq) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rtrancl_trans) + apply (rule r_into_rtrancl) + apply (simp add: n_next_eq) + apply (rule conjI) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + apply clarsimp + apply clarsimp + apply (simp split: if_split_asm) + apply (clarsimp simp: mdb_next_unfold slot) + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (clarsimp simp add: n_next_eq) + apply (rule context_conjI) + apply (clarsimp simp: mdb_next_unfold) + apply (drule prev_slot_next) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma n_trancl_eq: + "n \ p \\<^sup>+ p' = (m \ p \\<^sup>+ p' \ (p = slot \ p' = 0) \ p' \ slot)" + using no_0_n + apply - + apply (rule iffI) + apply (frule n_tranclD) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n_next_eq) + apply (simp add: rtrancl_eq_or_trancl) + apply clarsimp + apply (drule m_tranclD) + apply (simp split: if_split_asm) + apply (rule r_into_trancl) + apply (simp add: n_next_eq) + done + +lemma n_rtrancl_eq: + "n \ p \\<^sup>* p' = + (m \ p \\<^sup>* p' \ + (p = slot \ p' = 0 \ p' = slot) \ + (p' = slot \ p = slot))" + by (auto simp: rtrancl_eq_or_trancl n_trancl_eq) + +lemma mdb_chain_0_n: + "mdb_chain_0 n" + using chain + apply (clarsimp simp: mdb_chain_0_def) + apply (drule bspec) + apply (fastforce simp: n_def modify_map_if split: if_split_asm) + apply (simp add: n_trancl_eq) + done + +lemma mdb_chunked_n: + "mdb_chunked n" + using chunked + apply (clarsimp simp: mdb_chunked_def) + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", clarsimp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n_trancl_eq n_rtrancl_eq) + apply (rule conjI) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + done + +lemma untyped_mdb_n: + "untyped_mdb' n" + using untyped_mdb + apply (simp add: untyped_mdb'_def descendants_of'_def parency) + apply clarsimp + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", simp) + apply clarsimp + done + +lemma untyped_inc_n: + "untyped_inc' n" + using untyped_inc + apply (simp add: untyped_inc'_def descendants_of'_def parency) + apply clarsimp + apply (drule n_cap)+ + apply (clarsimp split: if_split_asm) + apply (case_tac "p=slot", simp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +lemmas vn_prev [dest!] = valid_nullcaps_prev [OF _ slot no_0 dlist nullcaps] +lemmas vn_next [dest!] = valid_nullcaps_next [OF _ slot no_0 dlist nullcaps] + +lemma nullcaps_n: "valid_nullcaps n" +proof - + from valid have "valid_nullcaps m" .. + thus ?thesis + apply (clarsimp simp: valid_nullcaps_def nullMDBNode_def nullPointer_def) + apply (frule n_cap) + apply (frule n_next) + apply (frule n_badged) + apply (frule n_revokable) + apply (drule n_prev) + apply (case_tac n) + apply (insert slot) + apply (fastforce split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" + apply(insert valid) + apply(clarsimp simp: ut_revocable'_def) + apply(frule n_cap) + apply(drule n_revokable) + apply(clarsimp simp: isCap_simps split: if_split_asm) + apply(simp add: valid_mdb_ctes_def ut_revocable'_def) + done + +lemma class_links_n: "class_links n" + using valid slot + apply (clarsimp simp: valid_mdb_ctes_def class_links_def) + apply (case_tac cte, case_tac cte') + apply (drule n_nextD) + apply (clarsimp simp: split: if_split_asm) + apply (simp add: no_0_n) + apply (drule n_cap)+ + apply clarsimp + apply (frule spec[where x=slot], + drule spec[where x="mdbNext s_node"], + simp, simp add: m_slot_next) + apply (drule spec[where x="mdbPrev s_node"], + drule spec[where x=slot], simp) + apply (drule n_cap)+ + apply clarsimp + apply (fastforce split: if_split_asm) + done + +lemma distinct_zombies_m: "distinct_zombies m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma distinct_zombies_n[simp]: + "distinct_zombies n" + using distinct_zombies_m + apply (simp add: n_def distinct_zombies_nonCTE_modify_map) + apply (subst modify_map_apply[where p=slot]) + apply (simp add: modify_map_def slot) + apply simp + apply (rule distinct_zombies_sameMasterE) + apply (simp add: distinct_zombies_nonCTE_modify_map) + apply (simp add: modify_map_def slot) + apply simp + done + +lemma irq_control_n [simp]: "irq_control n" + using slot + apply (clarsimp simp: irq_control_def) + apply (frule n_revokable) + apply (drule n_cap) + apply (clarsimp split: if_split_asm) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (drule n_cap) + apply (clarsimp simp: if_split_asm) + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma reply_masters_rvk_fb_m: "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n [simp]: "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + apply (simp add: reply_masters_rvk_fb_def n_def + ball_ran_modify_map_eq + modify_map_comp[symmetric]) + apply (subst ball_ran_modify_map_eq) + apply (frule bspec, rule ranI, rule slot) + apply (simp add: nullMDBNode_def isCap_simps modify_map_def + slot) + apply (subst ball_ran_modify_map_eq) + apply (clarsimp simp add: modify_map_def) + apply fastforce + apply (simp add: ball_ran_modify_map_eq) + done + +lemma vmdb_n: "valid_mdb_ctes n" + by (simp add: valid_mdb_ctes_def valid_dlist_n + no_0_n mdb_chain_0_n valid_badges_n + caps_contained_n mdb_chunked_n + untyped_mdb_n untyped_inc_n + nullcaps_n ut_rev_n class_links_n) + +end + +context begin interpretation Arch . +crunches postCapDeletion, clearUntypedFreeIndex + for ctes_of[wp]: "\s. P (ctes_of s)" + +lemma emptySlot_mdb [wp]: + "\valid_mdb'\ + emptySlot sl opt + \\_. valid_mdb'\" + unfolding emptySlot_def + apply (simp only: case_Null_If valid_mdb'_def) + apply (wp updateCap_ctes_of_wp getCTE_wp' + opt_return_pres_lift | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp) + apply (case_tac cte) + apply (rename_tac cap node) + apply (simp) + apply (subgoal_tac "mdb_empty (ctes_of s) sl cap node") + prefer 2 + apply (rule mdb_empty.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + apply (simp add: valid_mdb_ctes_def) + apply (rule mdb_ptr_axioms.intro) + apply (simp add: cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: valid_mdb_ctes_def) + apply (erule mdb_empty.vmdb_n[unfolded const_def]) + done +end + +lemma if_live_then_nonz_cap'_def2: + "if_live_then_nonz_cap' = + (\s. \ptr. ko_wp_at' live' ptr s \ + (\p zr. (option_map zobj_refs' o cteCaps_of s) p = Some zr \ ptr \ zr))" + by (fastforce simp: if_live_then_nonz_cap'_def ex_nonz_cap_to'_def cte_wp_at_ctes_of + cteCaps_of_def) + +lemma updateMDB_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + updateMDB p m + \\rv s. P (ko_wp_at' live' p' s)\" + unfolding updateMDB_def Let_def + apply (rule hoare_pre, wp) + apply simp + done + +lemma updateCap_ko_wp_at_live[wp]: + "\\s. P (ko_wp_at' live' p' s)\ + updateCap p cap + \\rv s. P (ko_wp_at' live' p' s)\" + unfolding updateCap_def + by wp + +primrec + threadCapRefs :: "capability \ machine_word set" +where + "threadCapRefs (ThreadCap r) = {r}" +| "threadCapRefs (ReplyCap t m x) = {}" +| "threadCapRefs NullCap = {}" +| "threadCapRefs (UntypedCap d r n i) = {}" +| "threadCapRefs (EndpointCap r badge x y z t) = {}" +| "threadCapRefs (NotificationCap r badge x y) = {}" +| "threadCapRefs (CNodeCap r b g gsz) = {}" +| "threadCapRefs (Zombie r b n) = {}" +| "threadCapRefs (ArchObjectCap ac) = {}" +| "threadCapRefs (IRQHandlerCap irq) = {}" +| "threadCapRefs (IRQControlCap) = {}" +| "threadCapRefs (DomainCap) = {}" + +definition + "isFinal cap p m \ + \isUntypedCap cap \ + (\p' c. m p' = Some c \ + p \ p' \ \isUntypedCap c \ + \ sameObjectAs cap c)" + +lemma not_FinalE: + "\ \ isFinal cap sl cps; isUntypedCap cap \ P; + \p c. \ cps p = Some c; p \ sl; \ isUntypedCap c; sameObjectAs cap c \ \ P + \ \ P" + by (fastforce simp: isFinal_def) + +definition + "removeable' sl \ \s cap. + (\p. p \ sl \ cte_wp_at' (\cte. capMasterCap (cteCap cte) = capMasterCap cap) p s) + \ ((\p \ cte_refs' cap (irq_node' s). p \ sl \ cte_wp_at' (\cte. cteCap cte = NullCap) p s) + \ (\p \ zobj_refs' cap. ko_wp_at' (Not \ live') p s) + \ (\t \ threadCapRefs cap. \p. t \ set (ksReadyQueues s p)))" + +lemma not_Final_removeable: + "\ isFinal cap sl (cteCaps_of s) + \ removeable' sl s cap" + apply (erule not_FinalE) + apply (clarsimp simp: removeable'_def isCap_simps) + apply (clarsimp simp: cteCaps_of_def sameObjectAs_def2 removeable'_def + cte_wp_at_ctes_of) + apply fastforce + done + +context begin interpretation Arch . +crunch ko_wp_at'[wp]: postCapDeletion "\s. P (ko_wp_at' P' p s)" +crunch cteCaps_of[wp]: postCapDeletion "\s. P (cteCaps_of s)" + (simp: cteCaps_of_def o_def) +end + +crunch ko_at_live[wp]: clearUntypedFreeIndex "\s. P (ko_wp_at' live' ptr s)" + +lemma clearUntypedFreeIndex_cteCaps_of[wp]: + "\\s. P (cteCaps_of s)\ + clearUntypedFreeIndex sl \\y s. P (cteCaps_of s)\" + by (simp add: cteCaps_of_def, wp) + +lemma emptySlot_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s\ + emptySlot sl opt + \\rv. if_live_then_nonz_cap'\" + apply (simp add: emptySlot_def case_Null_If if_live_then_nonz_cap'_def2 + del: comp_apply) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + getCTE_wp opt_return_pres_lift + clearUntypedFreeIndex_ctes_of + clearUntypedFreeIndex_cteCaps_of + hoare_vcg_ex_lift + | wp (once) hoare_vcg_imp_lift + | simp add: cte_wp_at_ctes_of del: comp_apply)+ + apply (clarsimp simp: modify_map_same imp_conjR[symmetric]) + apply (drule spec, drule(1) mp) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def split: if_split_asm) + apply (case_tac "p \ sl") + apply blast + apply (simp add: removeable'_def cteCaps_of_def) + apply (erule disjE) + apply (clarsimp simp: cte_wp_at_ctes_of modify_map_def + dest!: capMaster_same_refs) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma setIRQState_irq_node'[wp]: + "\\s. P (irq_node' s)\ setIRQState state irq \\_ s. P (irq_node' s)\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply wp + apply simp + done + +context begin interpretation Arch . +crunch irq_node'[wp]: emptySlot "\s. P (irq_node' s)" +end + +lemma emptySlot_ifunsafe'[wp]: + "\\s. if_unsafe_then_cap' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s\ + emptySlot sl opt + \\rv. if_unsafe_then_cap'\" + apply (simp add: ifunsafe'_def3) + apply (rule hoare_pre, rule hoare_use_eq_irq_node'[OF emptySlot_irq_node']) + apply (simp add: emptySlot_def case_Null_If) + apply (wp opt_return_pres_lift | simp add: o_def)+ + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of)+ + apply (clarsimp simp: tree_cte_cteCap_eq[unfolded o_def] + modify_map_same + modify_map_comp[symmetric] + split: option.split_asm if_split_asm + dest!: modify_map_K_D) + apply (clarsimp simp: modify_map_def) + apply (drule_tac x=cref in spec, clarsimp) + apply (case_tac "cref' \ sl") + apply (rule_tac x=cref' in exI) + apply (clarsimp simp: modify_map_def) + apply (simp add: removeable'_def) + apply (erule disjE) + apply (clarsimp simp: modify_map_def) + apply (subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.split_asm dest!: capMaster_same_refs) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas ctes_of_valid'[elim] = ctes_of_valid_cap''[where r=cte for cte] + +crunch valid_idle'[wp]: setInterruptState "valid_idle'" + (simp: valid_idle'_def) + +context begin interpretation Arch . +crunch valid_idle'[wp]: emptySlot "valid_idle'" +crunches deletedIRQHandler, getSlotCap, clearUntypedFreeIndex, updateMDB, getCTE, updateCap + for ksArch[wp]: "\s. P (ksArchState s)" +crunch ksIdle[wp]: emptySlot "\s. P (ksIdleThread s)" +crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" +end + +lemma emptySlot_cteCaps_of: + "\\s. P ((cteCaps_of s)(p \ NullCap))\ + emptySlot p opt + \\rv s. P (cteCaps_of s)\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp opt_return_pres_lift getCTE_cteCap_wp + clearUntypedFreeIndex_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + apply (auto elim!: rsubst[where P=P] + simp: modify_map_def fun_upd_def[symmetric] o_def + fun_upd_idem cteCaps_of_def + split: option.splits) + done + +context begin interpretation Arch . + +crunch cteCaps_of[wp]: deletedIRQHandler "\s. P (cteCaps_of s)" + +lemma deletedIRQHandler_valid_global_refs[wp]: + "\valid_global_refs'\ deletedIRQHandler irq \\rv. valid_global_refs'\" + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF deletedIRQHandler_irq_node']) + apply (rule hoare_use_eq [where f=ksIdleThread, OF deletedIRQHandler_ksIdle]) + apply (rule hoare_use_eq [where f=ksArchState, OF deletedIRQHandler_ksArch]) + apply (rule hoare_use_eq[where f="gsMaxObjectSize"], wp) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule deletedIRQHandler_cteCaps_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done + +lemma clearUntypedFreeIndex_valid_global_refs[wp]: + "\valid_global_refs'\ clearUntypedFreeIndex irq \\rv. valid_global_refs'\" + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF clearUntypedFreeIndex_irq_node']) + apply (rule hoare_use_eq [where f=ksIdleThread, OF clearUntypedFreeIndex_ksIdle]) + apply (rule hoare_use_eq [where f=ksArchState, OF clearUntypedFreeIndex_ksArch]) + apply (rule hoare_use_eq[where f="gsMaxObjectSize"], wp) + apply (simp add: valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule clearUntypedFreeIndex_cteCaps_of) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done + +crunch valid_global_refs[wp]: global.postCapDeletion "valid_global_refs'" + +lemma emptySlot_valid_global_refs[wp]: + "\valid_global_refs' and cte_at' sl\ emptySlot sl opt \\rv. valid_global_refs'\" + apply (clarsimp simp: emptySlot_def) + apply (wpsimp wp: getCTE_wp hoare_drop_imps hoare_vcg_ex_lift simp: cte_wp_at_ctes_of) + apply (clarsimp simp: valid_global_refs'_def global_refs'_def) + apply (frule(1) cte_at_valid_cap_sizes_0) + apply (clarsimp simp: valid_refs'_cteCaps valid_cap_sizes_cteCaps ball_ran_eq) + done +end + +lemmas doMachineOp_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF doMachineOp_ctes doMachineOp_ksInterruptState] + +lemma deletedIRQHandler_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (IRQHandlerCap irq \ ran (cteCaps_of s))\ + deletedIRQHandler irq + \\rv. valid_irq_handlers'\" + apply (simp add: deletedIRQHandler_def setIRQState_def setInterruptState_def getInterruptState_def) + apply wp + apply (clarsimp simp: valid_irq_handlers'_def irq_issued'_def ran_def cteCaps_of_def) + done + +context begin interpretation Arch . + +lemma postCapDeletion_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s \ (cap \ NullCap \ cap \ ran (cteCaps_of s))\ + postCapDeletion cap + \\rv. valid_irq_handlers'\" + by (wpsimp simp: Retype_H.postCapDeletion_def AARCH64_H.postCapDeletion_def) + +definition + "post_cap_delete_pre' cap sl cs \ case cap of + IRQHandlerCap irq \ irq \ maxIRQ \ (\sl'. sl \ sl' \ cs sl' \ Some cap) + | _ \ False" + +end + +crunch ksInterruptState[wp]: clearUntypedFreeIndex "\s. P (ksInterruptState s)" + +lemma emptySlot_valid_irq_handlers'[wp]: + "\\s. valid_irq_handlers' s + \ (\sl'. info \ NullCap \ sl' \ sl \ cteCaps_of s sl' \ Some info)\ + emptySlot sl info + \\rv. valid_irq_handlers'\" + apply (simp add: emptySlot_def case_Null_If) + apply (wp | wpc)+ + apply (unfold valid_irq_handlers'_def irq_issued'_def) + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of + | wps clearUntypedFreeIndex_ksInterruptState)+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of ran_def modify_map_def + split: option.split) + apply auto + done + +declare setIRQState_irq_states' [wp] + +context begin interpretation Arch . +crunch irq_states' [wp]: emptySlot valid_irq_states' + +crunch no_0_obj' [wp]: emptySlot no_0_obj' + (wp: crunch_wps) + +crunch valid_queues'[wp]: setInterruptState "valid_queues'" + (simp: valid_queues'_def) + +crunch valid_queues'[wp]: emptySlot "valid_queues'" + +end + +lemma deletedIRQHandler_irqs_masked'[wp]: + "\irqs_masked'\ deletedIRQHandler irq \\_. irqs_masked'\" + apply (simp add: deletedIRQHandler_def setIRQState_def getInterruptState_def setInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (simp add: irqs_masked'_def) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +crunch irqs_masked'[wp]: emptySlot "irqs_masked'" + +lemma setIRQState_umm: + "\\s. P (underlying_memory (ksMachineState s))\ + setIRQState irqState irq + \\_ s. P (underlying_memory (ksMachineState s))\" + by (simp add: setIRQState_def maskInterrupt_def + setInterruptState_def getInterruptState_def + | wp dmo_lift')+ + +crunch umm[wp]: emptySlot "\s. P (underlying_memory (ksMachineState s))" + (wp: setIRQState_umm) + +lemma emptySlot_vms'[wp]: + "\valid_machine_state'\ emptySlot slot irq \\_. valid_machine_state'\" + by (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunch pspace_domain_valid[wp]: emptySlot "pspace_domain_valid" + +crunch ksDomSchedule[wp]: emptySlot "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: emptySlot "\s. P (ksDomScheduleIdx s)" + +lemma deletedIRQHandler_ct_not_inQ[wp]: + "\ct_not_inQ\ deletedIRQHandler irq \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF deletedIRQHandler_nosch]) + apply (rule hoare_weaken_pre) + apply (wps deletedIRQHandler_ct) + apply (simp add: deletedIRQHandler_def setIRQState_def) + apply (wp) + apply (simp add: comp_def) + done + +crunch ct_not_inQ[wp]: emptySlot "ct_not_inQ" + +crunch tcbDomain[wp]: emptySlot "obj_at' (\tcb. P (tcbDomain tcb)) t" + +lemma emptySlot_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ emptySlot sl opt \\_. ct_idle_or_in_cur_domain'\" + by (wp ct_idle_or_in_cur_domain'_lift2 tcb_in_cur_domain'_lift | simp)+ + +crunch gsUntypedZeroRanges[wp]: postCapDeletion "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: crunch_simps) + +lemma untypedZeroRange_modify_map_isUntypedCap: + "m sl = Some v \ \ isUntypedCap v \ \ isUntypedCap (f v) + \ (untypedZeroRange \\<^sub>m modify_map m sl f) = (untypedZeroRange \\<^sub>m m)" + by (simp add: modify_map_def map_comp_def fun_eq_iff untypedZeroRange_def) + +lemma emptySlot_untyped_ranges[wp]: + "\untyped_ranges_zero' and valid_objs' and valid_mdb'\ + emptySlot sl opt \\rv. untyped_ranges_zero'\" + apply (simp add: emptySlot_def case_Null_If) + apply (rule hoare_pre) + apply (rule hoare_seq_ext) + apply (rule untyped_ranges_zero_lift) + apply (wp getCTE_cteCap_wp clearUntypedFreeIndex_cteCaps_of + | wpc | simp add: clearUntypedFreeIndex_def updateTrackedFreeIndex_def + getSlotCap_def + split: option.split)+ + apply (clarsimp simp: modify_map_comp[symmetric] modify_map_same) + apply (case_tac "\ isUntypedCap (the (cteCaps_of s sl))") + apply (case_tac "the (cteCaps_of s sl)", + simp_all add: untyped_ranges_zero_inv_def + untypedZeroRange_modify_map_isUntypedCap isCap_simps)[1] + apply (clarsimp simp: isCap_simps untypedZeroRange_def modify_map_def) + apply (strengthen untyped_ranges_zero_fun_upd[mk_strg I E]) + apply simp + apply (simp add: untypedZeroRange_def isCap_simps) + done + +crunch valid_arch'[wp]: emptySlot valid_arch_state' + (wp: crunch_wps) + +crunches deletedIRQHandler, updateMDB, updateCap, clearUntypedFreeIndex + for valid_arch'[wp]: valid_arch_state' + (wp: valid_arch_state_lift' crunch_wps) + +crunches global.postCapDeletion + for valid_arch'[wp]: valid_arch_state' + +lemma emptySlot_invs'[wp]: + "\\s. invs' s \ cte_wp_at' (\cte. removeable' sl s (cteCap cte)) sl s + \ (info \ NullCap \ post_cap_delete_pre' info sl (cteCaps_of s) )\ + emptySlot sl info + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift cur_tcb_lift) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: post_cap_delete_pre'_def cteCaps_of_def + split: capability.split_asm arch_capability.split_asm) + by auto + +lemma deletedIRQHandler_corres: + "corres dc \ \ + (deleted_irq_handler irq) + (deletedIRQHandler irq)" + apply (simp add: deleted_irq_handler_def deletedIRQHandler_def) + apply (rule setIRQState_corres) + apply (simp add: irq_state_relation_def) + done + +lemma arch_postCapDeletion_corres: + "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (AARCH64_H.postCapDeletion cap')" + by (clarsimp simp: arch_post_cap_deletion_def AARCH64_H.postCapDeletion_def) + +lemma postCapDeletion_corres: + "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" + apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) + +lemma set_cap_trans_state: + "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" + apply (cases p) + apply (clarsimp simp add: set_cap_def in_monad set_object_def get_object_def) + apply (case_tac y) + apply (auto simp add: in_monad set_object_def well_formed_cnode_n_def split: if_split_asm) + done + +lemma clearUntypedFreeIndex_noop_corres: + "corres dc \ (cte_at' (cte_map slot)) + (return ()) (clearUntypedFreeIndex (cte_map slot))" + apply (simp add: clearUntypedFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' (cte_map slot)"]) + apply (rule corres_trivial, simp) + apply (wp getCTE_wp' | wpc + | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + apply (clarsimp simp: state_relation_def) + apply (rule no_fail_pre) + apply (wp no_fail_getSlotCap getCTE_wp' + | wpc | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + done + +lemma clearUntypedFreeIndex_valid_pspace'[wp]: + "\valid_pspace'\ clearUntypedFreeIndex slot \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp | simp add: valid_mdb'_def)+ + done + +lemma emptySlot_corres: + "cap_relation info info' \ corres dc (einvs and cte_at slot) (invs' and cte_at' (cte_map slot)) + (empty_slot slot info) (emptySlot (cte_map slot) info')" + unfolding emptySlot_def empty_slot_def + apply (simp add: case_Null_If) + apply (rule corres_guard_imp) + apply (rule corres_split_noop_rhs[OF clearUntypedFreeIndex_noop_corres]) + apply (rule_tac R="\cap. einvs and cte_wp_at ((=) cap) slot" and + R'="\cte. valid_pspace' and cte_wp_at' ((=) cte) (cte_map slot)" in + corres_split[OF get_cap_corres]) + defer + apply (wp get_cap_wp getCTE_wp')+ + apply (simp add: cte_wp_at_ctes_of) + apply (wp hoare_vcg_imp_lift' clearUntypedFreeIndex_valid_pspace') + apply fastforce + apply (fastforce simp: cte_wp_at_ctes_of) + apply simp + apply (rule conjI, clarsimp) + defer + apply clarsimp + apply (rule conjI, clarsimp) + apply clarsimp + apply (simp only: bind_assoc[symmetric]) + apply (rule corres_underlying_split[where r'=dc, OF _ postCapDeletion_corres]) + defer + apply wpsimp+ + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) + apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (rule conjI, clarsimp) + apply (erule (2) valid_dlistEp) + apply simp + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply (clarsimp simp: in_monad bind_assoc exec_gets) + apply (subgoal_tac "mdb_empty_abs a") + prefer 2 + apply (rule mdb_empty_abs.intro) + apply (rule vmdb_abs.intro) + apply fastforce + apply (frule mdb_empty_abs'.intro) + apply (simp add: mdb_empty_abs'.empty_slot_ext_det_def2 update_cdt_list_def set_cdt_list_def exec_gets set_cdt_def bind_assoc exec_get exec_put set_original_def modify_def del: fun_upd_apply | subst bind_def, simp, simp add: mdb_empty_abs'.empty_slot_ext_det_def2)+ + apply (simp add: put_def) + apply (simp add: exec_gets exec_get exec_put del: fun_upd_apply | subst bind_def)+ + apply (clarsimp simp: state_relation_def) + apply (drule updateMDB_the_lot, fastforce simp: pspace_relations_def, fastforce, fastforce) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + valid_mdb'_def valid_mdb_ctes_def) + apply (elim conjE) + apply (drule (4) updateMDB_the_lot, elim conjE) + apply clarsimp + apply (drule_tac s'=s''a and c=cap.NullCap in set_cap_not_quite_corres) + subgoal by simp + subgoal by simp + subgoal by simp + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + apply fastforce + subgoal by fastforce + subgoal by fastforce + subgoal by fastforce + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + subgoal by simp + subgoal by simp + subgoal by simp + subgoal by simp + apply (rule refl) + apply clarsimp + apply (drule updateCap_stuff, elim conjE, erule (1) impE) + apply clarsimp + apply (drule updateMDB_the_lot, force simp: pspace_relations_def, assumption+, simp) + apply (rule bexI) + prefer 2 + apply (simp only: trans_state_update[symmetric]) + apply (rule set_cap_trans_state) + apply (rule set_cap_revokable_update) + apply (erule set_cap_cdt_update) + apply clarsimp + apply (thin_tac "ctes_of t = s" for t s)+ + apply (thin_tac "ksMachineState t = p" for t p)+ + apply (thin_tac "ksCurThread t = p" for t p)+ + apply (thin_tac "ksReadyQueues t = p" for t p)+ + apply (thin_tac "ksSchedulerAction t = p" for t p)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac rv') + apply (rename_tac s_cap s_node) + apply (subgoal_tac "cte_at slot a") + prefer 2 + apply (fastforce elim: cte_wp_at_weakenE) + apply (subgoal_tac "mdb_empty (ctes_of b) (cte_map slot) s_cap s_node") + prefer 2 + apply (rule mdb_empty.intro) + apply (rule mdb_ptr.intro) + apply (rule vmdb.intro) + subgoal by (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def) + apply (rule mdb_ptr_axioms.intro) + subgoal by simp + apply (clarsimp simp: ghost_relation_typ_at set_cap_a_type_inv) + apply (simp add: pspace_relations_def) + apply (rule conjI) + apply (clarsimp simp: data_at_def ghost_relation_typ_at set_cap_a_type_inv) + apply (rule conjI) + prefer 2 + apply (rule conjI) + apply (clarsimp simp: cdt_list_relation_def) + apply(frule invs_valid_pspace, frule invs_mdb) + apply(subgoal_tac "no_mloop (cdt a) \ finite_depth (cdt a)") + prefer 2 + subgoal by(simp add: finite_depth valid_mdb_def) + apply(subgoal_tac "valid_mdb_ctes (ctes_of b)") + prefer 2 + subgoal by(simp add: mdb_empty_def mdb_ptr_def vmdb_def) + apply(clarsimp simp: valid_pspace_def) + + apply(case_tac "cdt a slot") + apply(simp add: next_slot_eq[OF mdb_empty_abs'.next_slot_no_parent]) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a)") + subgoal by (simp) + apply(clarsimp) + apply(frule(1) mdb_empty.n_next) + apply(clarsimp) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(simp split: if_split_asm) + apply(drule cte_map_inj_eq) + apply(drule cte_at_next_slot) + apply(assumption)+ + apply(simp) + apply(subgoal_tac "(ab, bc) = slot") + prefer 2 + apply(drule_tac cte="CTE s_cap s_node" in valid_mdbD2') + subgoal by (clarsimp simp: valid_mdb_ctes_def no_0_def) + subgoal by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply(clarsimp) + apply(rule cte_map_inj_eq) + apply(assumption) + apply(drule(3) cte_at_next_slot', assumption) + apply(assumption)+ + apply(simp) + apply(drule_tac p="(aa, bb)" in no_parent_not_next_slot) + apply(assumption)+ + apply(clarsimp) + + apply(simp add: next_slot_eq[OF mdb_empty_abs'.next_slot] split del: if_split) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a)") + subgoal by (simp) + apply(case_tac "(aa, bb) = slot", simp) + apply(case_tac "next_slot (aa, bb) (cdt_list a) (cdt a) = Some slot") + apply(simp) + apply(case_tac "next_slot ac (cdt_list a) (cdt a)", simp) + apply(simp) + apply(frule(1) mdb_empty.n_next) + apply(clarsimp) + apply(erule_tac x=aa in allE', erule_tac x=bb in allE) + apply(erule_tac x=ac in allE, erule_tac x=bd in allE) + apply(clarsimp split: if_split_asm) + apply(drule(1) no_self_loop_next) + apply(simp) + apply(drule_tac cte="CTE cap' node'" in valid_mdbD1') + apply(fastforce simp: valid_mdb_ctes_def no_0_def) + subgoal by (simp add: valid_mdb'_def) + apply(clarsimp) + apply(simp) + apply(frule(1) mdb_empty.n_next) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(clarsimp split: if_split_asm) + apply(drule(1) no_self_loop_prev) + apply(clarsimp) + apply(drule_tac cte="CTE s_cap s_node" in valid_mdbD2') + apply(clarsimp simp: valid_mdb_ctes_def no_0_def) + apply clarify + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(drule(3) cte_at_next_slot') + apply(assumption)+ + apply(simp) + apply(erule disjE) + apply(drule cte_map_inj_eq) + apply(drule(3) cte_at_next_slot) + apply(assumption)+ + apply(simp) + subgoal by (simp) + apply (simp add: revokable_relation_def) + apply (clarsimp simp: in_set_cap_cte_at) + apply (rule conjI) + apply clarsimp + apply (drule(1) mdb_empty.n_revokable) + subgoal by clarsimp + apply clarsimp + apply (drule (1) mdb_empty.n_revokable) + apply (subgoal_tac "null_filter (caps_of_state a) (aa,bb) \ None") + prefer 2 + apply (drule set_cap_caps_of_state_monad) + subgoal by (force simp: null_filter_def) + apply clarsimp + apply (subgoal_tac "cte_at (aa, bb) a") + prefer 2 + apply (drule null_filter_caps_of_stateD, erule cte_wp_cte_at) + apply (drule (2) cte_map_inj_ps, fastforce) + subgoal by simp + apply (clarsimp simp add: cdt_relation_def) + apply (subst mdb_empty_abs.descendants, assumption) + apply (subst mdb_empty.descendants, assumption) + apply clarsimp + apply (frule_tac p="(aa, bb)" in in_set_cap_cte_at) + apply clarsimp + apply (frule (2) cte_map_inj_ps, fastforce) + apply simp + apply (case_tac "slot \ descendants_of (aa,bb) (cdt a)") + apply (subst inj_on_image_set_diff) + apply (rule inj_on_descendants_cte_map) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply fastforce + subgoal by simp + subgoal by simp + apply simp + apply (subgoal_tac "cte_map slot \ descendants_of' (cte_map (aa,bb)) (ctes_of b)") + subgoal by simp + apply (erule_tac x=aa in allE, erule allE, erule (1) impE) + apply (drule_tac s="cte_map ` u" for u in sym) + apply clarsimp + apply (drule cte_map_inj_eq, assumption) + apply (erule descendants_of_cte_at, fastforce) + apply fastforce + apply fastforce + apply fastforce + apply simp + done + + + +text \Some facts about is_final_cap/isFinalCapability\ + +lemma isFinalCapability_inv: + "\P\ isFinalCapability cap \\_. P\" + apply (simp add: isFinalCapability_def Let_def + split del: if_split cong: if_cong) + apply (rule hoare_pre, wp) + apply (rule hoare_post_imp [where Q="\s. P"], simp) + apply wp + apply simp + done + +definition + final_matters' :: "capability \ bool" +where + "final_matters' cap \ case cap of + EndpointCap ref bdg s r g gr \ True + | NotificationCap ref bdg s r \ True + | ThreadCap ref \ True + | CNodeCap ref bits gd gs \ True + | Zombie ptr zb n \ True + | IRQHandlerCap irq \ True + | ArchObjectCap acap \ (case acap of + FrameCap ref rghts sz d mapdata \ False + | ASIDControlCap \ False + | _ \ True) + | _ \ False" + +lemma final_matters_Master: + "final_matters' (capMasterCap cap) = final_matters' cap" + by (simp add: capMasterCap_def split: capability.split arch_capability.split, + simp add: final_matters'_def) + +lemma final_matters_sameRegion_sameObject: + "final_matters' cap \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (rule iffI) + apply (erule sameRegionAsE) + apply (simp add: sameObjectAs_def3) + apply (clarsimp simp: isCap_simps sameObjectAs_sameRegionAs final_matters'_def + split:capability.splits arch_capability.splits)+ + done + +lemma final_matters_sameRegion_sameObject2: + "\ final_matters' cap'; \ isUntypedCap cap; \ isIRQHandlerCap cap'; \ isArchIOPortCap cap' \ + \ sameRegionAs cap cap' = sameObjectAs cap cap'" + apply (rule iffI) + apply (erule sameRegionAsE) + apply (simp add: sameObjectAs_def3) + apply (fastforce simp: isCap_simps final_matters'_def) + apply simp + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (clarsimp simp: final_matters'_def isCap_simps) + apply (erule sameObjectAs_sameRegionAs) + done + +lemma notFinal_prev_or_next: + "\ \ isFinal cap x (cteCaps_of s); mdb_chunked (ctes_of s); + valid_dlist (ctes_of s); no_0 (ctes_of s); + ctes_of s x = Some (CTE cap node); final_matters' cap \ + \ (\cap' node'. ctes_of s (mdbPrev node) = Some (CTE cap' node') + \ sameObjectAs cap cap') + \ (\cap' node'. ctes_of s (mdbNext node) = Some (CTE cap' node') + \ sameObjectAs cap cap')" + apply (erule not_FinalE) + apply (clarsimp simp: isCap_simps final_matters'_def) + apply (clarsimp simp: mdb_chunked_def cte_wp_at_ctes_of cteCaps_of_def + del: disjCI) + apply (erule_tac x=x in allE, erule_tac x=p in allE) + apply simp + apply (case_tac z, simp add: sameObjectAs_sameRegionAs) + apply (elim conjE disjE, simp_all add: is_chunk_def) + apply (rule disjI2) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply (drule spec[where x="mdbNext node"]) + apply simp + apply (drule mp[where P="ctes_of s \ x \\<^sup>+ mdbNext node"]) + apply (rule trancl.intros(1), simp add: mdb_next_unfold) + apply clarsimp + apply (drule rtranclD) + apply (erule disjE, clarsimp+) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold final_matters_sameRegion_sameObject) + apply (rule disjI1) + apply clarsimp + apply (drule tranclD2) + apply clarsimp + apply (frule vdlist_nextD0) + apply clarsimp + apply assumption + apply (clarsimp simp: mdb_prev_def) + apply (drule rtranclD) + apply (erule disjE, clarsimp+) + apply (drule spec, drule(1) mp) + apply (drule mp, rule trancl_into_rtrancl, erule trancl.intros(1)) + apply clarsimp + apply (drule iffD1 [OF final_matters_sameRegion_sameObject, rotated]) + apply (subst final_matters_Master[symmetric]) + apply (subst(asm) final_matters_Master[symmetric]) + apply (clarsimp simp: sameObjectAs_def3) + apply (clarsimp simp: sameObjectAs_def3) + done + +lemma isFinal: + "\\s. valid_mdb' s \ cte_wp_at' ((=) cte) x s + \ final_matters' (cteCap cte) + \ Q (isFinal (cteCap cte) x (cteCaps_of s)) s\ + isFinalCapability cte + \Q\" + unfolding isFinalCapability_def + apply (cases cte) + apply (rename_tac cap node) + apply (unfold Let_def) + apply (simp only: if_False) + apply (wp getCTE_wp') + apply (cases "mdbPrev (cteMDBNode cte) = nullPointer") + apply simp + apply (clarsimp simp: valid_mdb_ctes_def valid_mdb'_def + cte_wp_at_ctes_of) + apply (rule conjI, clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s], simp) + apply (rule classical) + apply (drule(5) notFinal_prev_or_next) + apply clarsimp + apply (clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s]) + apply (rule sym, rule iffI) + apply (rule classical) + apply (drule(5) notFinal_prev_or_next) + apply clarsimp + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (case_tac cte) + apply clarsimp + apply (clarsimp simp add: isFinal_def) + apply (erule_tac x="mdbNext node" in allE) + apply simp + apply (erule impE) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (drule (1) mdb_chain_0_no_loops) + apply simp + apply (clarsimp simp: sameObjectAs_def3 isCap_simps) + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of + valid_mdb_ctes_def valid_mdb'_def) + apply (case_tac cte) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rsubst[where P="\x. Q x s" for s]) + apply clarsimp + apply (clarsimp simp: isFinal_def cteCaps_of_def) + apply (erule_tac x="mdbPrev node" in allE) + apply simp + apply (erule impE) + apply clarsimp + apply (drule (1) mdb_chain_0_no_loops) + apply (subgoal_tac "ctes_of s (mdbNext node) = Some (CTE cap node)") + apply clarsimp + apply (erule (1) valid_dlistEp) + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply (clarsimp simp add: sameObjectAs_def3 isCap_simps) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule rsubst[where P="\x. Q x s" for s], simp) + apply (rule classical, drule(5) notFinal_prev_or_next) + apply (clarsimp simp: sameObjectAs_sym nullPointer_def) + apply (clarsimp simp: nullPointer_def) + apply (erule rsubst[where P="\x. Q x s" for s]) + apply (rule sym, rule iffI) + apply (rule classical, drule(5) notFinal_prev_or_next) + apply (clarsimp simp: sameObjectAs_sym) + apply auto[1] + apply (clarsimp simp: isFinal_def cteCaps_of_def) + apply (case_tac cte) + apply (erule_tac x="mdbNext node" in allE) + apply simp + apply (erule impE) + apply clarsimp + apply (drule (1) mdb_chain_0_no_loops) + apply simp + apply clarsimp + apply (clarsimp simp: isCap_simps sameObjectAs_def3) + done +end + +lemma (in vmdb) isFinal_no_subtree: + "\ m \ sl \ p; isFinal cap sl (option_map cteCap o m); + m sl = Some (CTE cap n); final_matters' cap \ \ False" + apply (erule subtree.induct) + apply (case_tac "c'=sl", simp) + apply (clarsimp simp: isFinal_def parentOf_def mdb_next_unfold cteCaps_of_def) + apply (erule_tac x="mdbNext n" in allE) + apply simp + apply (clarsimp simp: isMDBParentOf_CTE final_matters_sameRegion_sameObject) + apply (clarsimp simp: isCap_simps sameObjectAs_def3) + apply clarsimp + done + +lemma isFinal_no_descendants: + "\ isFinal cap sl (cteCaps_of s); ctes_of s sl = Some (CTE cap n); + valid_mdb' s; final_matters' cap \ + \ descendants_of' sl (ctes_of s) = {}" + apply (clarsimp simp add: descendants_of'_def cteCaps_of_def) + apply (erule(3) vmdb.isFinal_no_subtree[rotated]) + apply unfold_locales[1] + apply (simp add: valid_mdb'_def) + done + +lemma (in vmdb) isFinal_untypedParent: + assumes x: "m slot = Some cte" "isFinal (cteCap cte) slot (option_map cteCap o m)" + "final_matters' (cteCap cte) \ \ isIRQHandlerCap (cteCap cte)" + shows + "m \ x \ slot \ + (\cte'. m x = Some cte' \ isUntypedCap (cteCap cte') \ RetypeDecls_H.sameRegionAs (cteCap cte') (cteCap cte))" + apply (cases "x=slot", simp) + apply (insert x) + apply (frule subtree_mdb_next) + apply (drule subtree_parent) + apply (drule tranclD) + apply clarsimp + apply (clarsimp simp: mdb_next_unfold parentOf_def isFinal_def) + apply (case_tac cte') + apply (rename_tac c' n') + apply (cases cte) + apply (rename_tac c n) + apply simp + apply (erule_tac x=x in allE) + apply clarsimp + apply (drule isMDBParent_sameRegion) + apply simp + apply (rule classical, simp) + apply (simp add: final_matters_sameRegion_sameObject2 + sameObjectAs_sym) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma no_fail_isFinalCapability [wp]: + "no_fail (valid_mdb' and cte_wp_at' ((=) cte) p) (isFinalCapability cte)" + apply (simp add: isFinalCapability_def) + apply (clarsimp simp: Let_def split del: if_split) + apply (rule no_fail_pre, wp getCTE_wp') + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of nullPointer_def) + apply (rule conjI) + apply clarsimp + apply (erule (2) valid_dlistEp) + apply simp + apply clarsimp + apply (rule conjI) + apply (erule (2) valid_dlistEn) + apply simp + apply clarsimp + apply (rule valid_dlistEn, assumption+) + apply (erule (2) valid_dlistEp) + apply simp + done + +lemma corres_gets_lift: + assumes inv: "\P. \P\ g \\_. P\" + assumes res: "\Q'\ g \\r s. r = g' s\" + assumes Q: "\s. Q s \ Q' s" + assumes nf: "no_fail Q g" + shows "corres r P Q f (gets g') \ corres r P Q f g" + apply (clarsimp simp add: corres_underlying_def simpler_gets_def) + apply (drule (1) bspec) + apply (rule conjI) + apply clarsimp + apply (rule bexI) + prefer 2 + apply assumption + apply simp + apply (frule in_inv_by_hoareD [OF inv]) + apply simp + apply (drule use_valid, rule res) + apply (erule Q) + apply simp + apply (insert nf) + apply (clarsimp simp: no_fail_def) + done + +lemma obj_refs_Master: + "\ cap_relation cap cap'; P cap \ + \ obj_refs cap = + (if capClass (capMasterCap cap') = PhysicalClass + \ \ isUntypedCap (capMasterCap cap') + then {capUntypedPtr (capMasterCap cap')} else {})" + by (clarsimp simp: isCap_simps + split: cap_relation_split_asm arch_cap.split_asm) + +lemma isFinalCapability_corres': + "final_matters' (cteCap cte) \ + corres (=) (invs and cte_wp_at ((=) cap) ptr) + (invs' and cte_wp_at' ((=) cte) (cte_map ptr)) + (is_final_cap cap) (isFinalCapability cte)" + apply (rule corres_gets_lift) + apply (rule isFinalCapability_inv) + apply (rule isFinal[where x="cte_map ptr"]) + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule refl) + apply (rule no_fail_pre, wp, fastforce) + apply (simp add: is_final_cap_def) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def state_relation_def) + apply (frule (1) pspace_relation_ctes_ofI) + apply fastforce + apply fastforce + apply clarsimp + apply (rule iffI) + apply (simp add: is_final_cap'_def2 isFinal_def) + apply clarsimp + apply (subgoal_tac "obj_refs cap \ {} \ cap_irqs cap \ {} \ arch_gen_refs cap \ {}") + prefer 2 + apply (erule_tac x=a in allE) + apply (erule_tac x=b in allE) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply (subgoal_tac "ptr = (a,b)") + prefer 2 + apply (erule_tac x="fst ptr" in allE) + apply (erule_tac x="snd ptr" in allE) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp simp: isCap_simps) + apply (cases cap, auto)[1] + apply clarsimp + apply (drule_tac x=p' in pspace_relation_cte_wp_atI, assumption) + apply fastforce + apply clarsimp + apply (erule_tac x=aa in allE) + apply (erule_tac x=ba in allE) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: sameObjectAs_def3 obj_refs_Master cap_irqs_relation_Master + arch_gen_refs_relation_Master gen_obj_refs_Int + cong: if_cong + split: capability.split_asm) + apply (clarsimp simp: isFinal_def is_final_cap'_def3) + apply (rule_tac x="fst ptr" in exI) + apply (rule_tac x="snd ptr" in exI) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_def final_matters'_def + gen_obj_refs_Int + split: cap_relation_split_asm arch_cap.split_asm) + apply clarsimp + apply (drule_tac p="(a,b)" in cte_wp_at_norm) + apply clarsimp + apply (frule_tac slot="(a,b)" in pspace_relation_ctes_ofI, assumption) + apply fastforce + apply fastforce + apply clarsimp + apply (frule_tac p="(a,b)" in cte_wp_valid_cap, fastforce) + apply (erule_tac x="cte_map (a,b)" in allE) + apply simp + apply (erule impCE, simp, drule cte_map_inj_eq) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply fastforce + apply fastforce + apply (erule invs_distinct) + apply simp + apply (frule_tac p=ptr in cte_wp_valid_cap, fastforce) + apply (clarsimp simp: cte_wp_at_def gen_obj_refs_Int) + apply (rule conjI) + apply (rule classical) + apply (frule(1) zombies_finalD2[OF _ _ _ invs_zombies], + simp?, clarsimp, assumption+) + subgoal by (clarsimp simp: sameObjectAs_def3 isCap_simps valid_cap_def valid_arch_cap_def + valid_arch_cap_ref_def obj_at_def is_obj_defs a_type_def + final_matters'_def + split: cap.split_asm arch_cap.split_asm option.split_asm if_split_asm, + simp_all add: is_cap_defs) + apply (rule classical) + apply (clarsimp simp: cap_irqs_def cap_irq_opt_def sameObjectAs_def3 isCap_simps + acap_relation_def + split: cap.split_asm arch_cap.split_asm) + done + +lemma isFinalCapability_corres: + "corres (\rv rv'. final_matters' (cteCap cte) \ rv = rv') + (invs and cte_wp_at ((=) cap) ptr) + (invs' and cte_wp_at' ((=) cte) (cte_map ptr)) + (is_final_cap cap) (isFinalCapability cte)" + apply (cases "final_matters' (cteCap cte)") + apply simp + apply (erule isFinalCapability_corres') + apply (subst bind_return[symmetric], + rule corres_symb_exec_r) + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: in_monad is_final_cap_def simpler_gets_def) + apply (wp isFinalCapability_inv)+ + apply fastforce + done + +text \Facts about finalise_cap/finaliseCap and + cap_delete_one/cteDelete in no particular order\ + + +definition + finaliseCapTrue_standin_simple_def: + "finaliseCapTrue_standin cap fin \ finaliseCap cap fin True" + +context +begin + +declare if_cong [cong] + +lemmas finaliseCapTrue_standin_def + = finaliseCapTrue_standin_simple_def + [unfolded finaliseCap_def, simplified] + +lemmas cteDeleteOne_def' + = eq_reflection [OF cteDeleteOne_def] +lemmas cteDeleteOne_def + = cteDeleteOne_def'[folded finaliseCapTrue_standin_simple_def] + +crunches cteDeleteOne, suspend, prepareThreadDelete + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (wp: crunch_wps getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def + ignore_del: setObject) + +end + +lemmas cancelAllIPC_typs[wp] = typ_at_lifts [OF cancelAllIPC_typ_at'] +lemmas cancelAllSignals_typs[wp] = typ_at_lifts [OF cancelAllSignals_typ_at'] +lemmas suspend_typs[wp] = typ_at_lifts [OF suspend_typ_at'] + +definition + cap_has_cleanup' :: "capability \ bool" +where + "cap_has_cleanup' cap \ case cap of + IRQHandlerCap _ \ True + | ArchObjectCap acap \ False + | _ \ False" + +lemmas cap_has_cleanup'_simps[simp] = cap_has_cleanup'_def[split_simps capability.split] + +lemma finaliseCap_cases[wp]: + "\\\ + finaliseCap cap final flag + \\rv s. fst rv = NullCap \ (snd rv \ NullCap \ final \ cap_has_cleanup' cap \ snd rv = cap) + \ + isZombie (fst rv) \ final \ \ flag \ snd rv = NullCap + \ capUntypedPtr (fst rv) = capUntypedPtr cap + \ (isThreadCap cap \ isCNodeCap cap \ isZombie cap)\" + apply (simp add: finaliseCap_def AARCH64_H.finaliseCap_def Let_def + getThreadCSpaceRoot + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply ((wp | simp add: isCap_simps split del: if_split + | wpc + | simp only: valid_NullCap fst_conv snd_conv)+)[1] + apply (simp only: simp_thms fst_conv snd_conv option.simps if_cancel + o_def) + apply (intro allI impI conjI TrueI) + apply (auto simp add: isCap_simps cap_has_cleanup'_def) + done + +crunch aligned'[wp]: finaliseCap "pspace_aligned'" + (simp: crunch_simps assertE_def unless_def o_def + wp: getObject_inv loadObject_default_inv crunch_wps) + +crunch distinct'[wp]: finaliseCap "pspace_distinct'" + (simp: crunch_simps assertE_def unless_def o_def + wp: getObject_inv loadObject_default_inv crunch_wps) + +crunch typ_at'[wp]: finaliseCap "\s. P (typ_at' T p s)" + (simp: crunch_simps assertE_def + wp: getObject_inv loadObject_default_inv crunch_wps) +lemmas finaliseCap_typ_ats[wp] = typ_at_lifts[OF finaliseCap_typ_at'] + +lemma unmapPageTable_it'[wp]: + "unmapPageTable asid vaddr pt \\s. P (ksIdleThread s)\" + unfolding unmapPageTable_def by wpsimp + +crunch it'[wp]: finaliseCap "\s. P (ksIdleThread s)" + (wp: mapM_x_wp_inv mapM_wp' hoare_drop_imps getObject_inv loadObject_default_inv + simp: crunch_simps updateObject_default_def o_def) + +lemma ntfn_q_refs_of'_mult: + "ntfn_q_refs_of' ntfn = (case ntfn of Structures_H.WaitingNtfn q \ set q | _ \ {}) \ {NTFNSignal}" + by (cases ntfn, simp_all) + +lemma tcb_st_not_Bound: + "(p, NTFNBound) \ tcb_st_refs_of' ts" + "(p, TCBBound) \ tcb_st_refs_of' ts" + by (auto simp: tcb_st_refs_of'_def split: Structures_H.thread_state.split) + +lemma unbindNotification_invs[wp]: + "\invs'\ unbindNotification tcb \\rv. invs'\" + apply (simp add: unbindNotification_def invs'_def valid_state'_def) + apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (case_tac ntfnPtr, clarsimp, wp, clarsimp) + apply clarsimp + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ + untyped_ranges_zero_lift | clarsimp simp: cteCaps_of_def o_def)+ + apply (rule conjI) + apply (clarsimp elim!: obj_atE' + dest!: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at' conj_comms) + apply (frule bound_tcb_ex_cap'', clarsimp+) + apply (frule(1) sym_refs_bound_tcb_atD') + apply (frule(1) sym_refs_obj_atD') + apply (clarsimp simp: refs_of_rev') + apply normalise_obj_at' + apply (subst delta_sym_refs, assumption) + apply (auto split: if_split_asm)[1] + apply (auto simp: tcb_st_not_Bound ntfn_q_refs_of'_mult split: if_split_asm)[1] + apply (frule obj_at_valid_objs', clarsimp+) + apply (simp add: valid_ntfn'_def valid_obj'_def live'_def + split: ntfn.splits) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def live'_def) + done + +lemma ntfn_bound_tcb_at': + "\sym_refs (state_refs_of' s); valid_objs' s; ko_at' ntfn ntfnptr s; + ntfnBoundTCB ntfn = Some tcbptr; P (Some ntfnptr)\ + \ bound_tcb_at' P tcbptr s" + apply (drule_tac x=ntfnptr in sym_refsD[rotated]) + apply (clarsimp simp: obj_at'_def) + apply (fastforce simp: state_refs_of'_def) + apply (auto simp: pred_tcb_at'_def obj_at'_def valid_obj'_def valid_ntfn'_def + state_refs_of'_def refs_of_rev' + simp del: refs_of_simps + split: option.splits if_split_asm) + done + + +lemma unbindMaybeNotification_invs[wp]: + "\invs'\ unbindMaybeNotification ntfnptr \\rv. invs'\" + apply (simp add: unbindMaybeNotification_def invs'_def valid_state'_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp sbn'_valid_pspace'_inv sbn_sch_act' sbn_valid_queues valid_irq_node_lift + irqs_masked_lift setBoundNotification_ct_not_inQ + untyped_ranges_zero_lift + | wpc | clarsimp simp: cteCaps_of_def o_def)+ + apply safe[1] + defer 3 + defer 7 + apply (fold_subgoals (prefix))[8] + subgoal premises prems using prems + by (auto simp: pred_tcb_at' valid_pspace'_def valid_obj'_def valid_ntfn'_def + ko_wp_at'_def live'_def + elim!: obj_atE' valid_objsE' if_live_then_nonz_capE' + split: option.splits ntfn.splits) + apply (rule delta_sym_refs, assumption) + apply (fold_subgoals (prefix))[2] + subgoal premises prems using prems by (fastforce simp: symreftype_inverse' ntfn_q_refs_of'_def + split: ntfn.splits if_split_asm + dest!: ko_at_state_refs_ofD')+ + apply (rule delta_sym_refs, assumption) + apply (clarsimp split: if_split_asm) + apply (frule ko_at_state_refs_ofD', simp) + apply (clarsimp split: if_split_asm) + apply (frule_tac P="(=) (Some ntfnptr)" in ntfn_bound_tcb_at', simp_all add: valid_pspace'_def)[1] + subgoal by (fastforce simp: ntfn_q_refs_of'_def state_refs_of'_def tcb_ntfn_is_bound'_def + tcb_st_refs_of'_def + dest!: bound_tcb_at_state_refs_ofD' + split: ntfn.splits thread_state.splits) + apply (frule ko_at_state_refs_ofD', simp) + done + +(* Ugh, required to be able to split out the abstract invs *) +lemma finaliseCap_True_invs[wp]: + "\invs'\ finaliseCap cap final True \\rv. invs'\" + apply (simp add: finaliseCap_def Let_def) + apply safe + apply (wp irqs_masked_lift| simp | wpc)+ + done + +lemma invalidateASIDEntry_invs'[wp]: + "invalidateASIDEntry asid \invs'\" + unfolding invalidateASIDEntry_def + by wpsimp + +lemma invs_asid_update_strg': + "invs' s \ tab = armKSASIDTable (ksArchState s) \ + invs' (s\ksArchState := armKSASIDTable_update + (\_. tab (asid := None)) (ksArchState s)\)" + apply (simp add: invs'_def) + apply (simp add: valid_state'_def) + apply (simp add: valid_global_refs'_def global_refs'_def valid_arch_state'_def + valid_asid_table'_def valid_machine_state'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def + cong: option.case_cong) + apply (auto simp add: ran_def split: if_split_asm) + done + +crunches invalidateTLBByASID + for asidTable[wp]: "\s. P (armKSASIDTable (ksArchState s))" + +lemma deleteASIDPool_invs[wp]: + "\invs'\ deleteASIDPool asid pool \\rv. invs'\" + apply (simp add: deleteASIDPool_def) + apply wp + apply (simp del: fun_upd_apply) + apply (strengthen invs_asid_update_strg') + apply (wp mapM_wp' getObject_inv loadObject_default_inv + | simp)+ + done + +lemma deleteASID_invs'[wp]: + "deleteASID asid pd \invs'\" + unfolding deleteASID_def + by (wpsimp wp: getASID_wp hoare_drop_imps simp: getPoolPtr_def) + +lemma valid_objs_valid_tcb': + "\ valid_objs' s ; ko_at' (t :: tcb) p s \ \ valid_tcb' t s" + by (fastforce simp add: obj_at'_def ran_def valid_obj'_def valid_objs'_def) + +lemmas archThreadSet_typ_ats[wp] = typ_at_lifts [OF archThreadSet_typ_at'] + +lemma archThreadSet_valid_objs'[wp]: + "\valid_objs' and (\s. \tcb. ko_at' tcb t s \ valid_arch_tcb' (f (tcbArch tcb)) s)\ + archThreadSet f t \\_. valid_objs'\" + unfolding archThreadSet_def + apply (wp setObject_tcb_valid_objs getObject_tcb_wp) + apply clarsimp + apply normalise_obj_at' + apply (drule (1) valid_objs_valid_tcb') + apply (clarsimp simp: valid_obj'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +crunch no_0_obj'[wp]: archThreadSet no_0_obj' + +lemma archThreadSet_ctes_of[wp]: + "archThreadSet f t \\s. P (ctes_of s)\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp) + apply normalise_obj_at' + apply (auto simp: tcb_cte_cases_def cteSizeBits_def) + done + +crunch ksCurDomain[wp]: archThreadSet "\s. P (ksCurDomain s)" + (wp: setObject_cd_inv) + +lemma archThreadSet_obj_at': + "(\tcb. P tcb \ P (tcb \ tcbArch:= f (tcbArch tcb)\)) \ archThreadSet f t \obj_at' P t'\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp setObject_tcb_strongest) + apply normalise_obj_at' + apply auto + done + +lemma archThreadSet_tcbDomain[wp]: + "archThreadSet f t \obj_at' (\tcb. x = tcbDomain tcb) t'\" + by (wpsimp wp: archThreadSet_obj_at') + +lemma archThreadSet_inQ[wp]: + "archThreadSet f t' \\s. P (obj_at' (inQ d p) t s)\" + unfolding obj_at'_real_def archThreadSet_def + apply (wpsimp wp: setObject_ko_wp_at getObject_tcb_wp + simp: objBits_simps' archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (auto simp: obj_at'_def ko_wp_at'_def) + done + +crunch ct[wp]: archThreadSet "\s. P (ksCurThread s)" + (wp: setObject_ct_inv) + +crunch sched[wp]: archThreadSet "\s. P (ksSchedulerAction s)" + (wp: setObject_sa_unchanged) + +crunch L1[wp]: archThreadSet "\s. P (ksReadyQueuesL1Bitmap s)" + (wp: setObject_sa_unchanged) + +crunch L2[wp]: archThreadSet "\s. P (ksReadyQueuesL2Bitmap s)" + (wp: setObject_sa_unchanged) + +crunch ksArch[wp]: archThreadSet "\s. P (ksArchState s)" + +crunch ksDomSchedule[wp]: archThreadSet "\s. P (ksDomSchedule s)" + (wp: setObject_ksDomSchedule_inv) + +crunch ksDomScheduleIdx[wp]: archThreadSet "\s. P (ksDomScheduleIdx s)" + +lemma setObject_tcb_ksInterruptState[wp]: + "setObject t (v :: tcb) \\s. P (ksInterruptState s)\" + by (wpsimp simp: setObject_def wp: updateObject_default_inv) + +lemma setObject_tcb_gsMaxObjectSize[wp]: + "setObject t (v :: tcb) \\s. P (gsMaxObjectSize s)\" + by (wpsimp simp: setObject_def wp: updateObject_default_inv) + +crunch ksInterruptState[wp]: archThreadSet "\s. P (ksInterruptState s)" + +crunch gsMaxObjectSize[wp]: archThreadSet "\s. P (gsMaxObjectSize s)" + +crunch ksMachineState[wp]: archThreadSet "\s. P (ksMachineState s)" + (wp: setObject_ksMachine updateObject_default_inv) + +lemma archThreadSet_state_refs_of'[wp]: + "archThreadSet f t \\s. P (state_refs_of' s)\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_state_refs_of' getObject_tcb_wp) + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply (rule ext) + apply (auto simp: state_refs_of'_def obj_at'_def) + done + +lemma archThreadSet_state_hyp_refs_of'[wp]: + "\\s. \tcb. ko_at' tcb t s \ P ((state_hyp_refs_of' s)(t := tcb_hyp_refs' (f (tcbArch tcb))))\ + archThreadSet f t \\_ s. P (state_hyp_refs_of' s)\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_state_hyp_refs_of' getObject_tcb_wp simp: objBits_simps') + apply normalise_obj_at' + apply (erule rsubst[where P=P]) + apply auto + done + +lemma archThreadSet_if_live'[wp]: + "\\s. if_live_then_nonz_cap' s \ + (\tcb. ko_at' tcb t s \ atcbVCPUPtr (f (tcbArch tcb)) \ None \ ex_nonz_cap_to' t s)\ + archThreadSet f t \\_. if_live_then_nonz_cap'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_iflive' getObject_tcb_wp) + apply normalise_obj_at' + apply (clarsimp simp: tcb_cte_cases_def if_live_then_nonz_cap'_def cteSizeBits_def) + apply (erule_tac x=t in allE) + apply (erule impE) + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def live'_def hyp_live'_def) + apply simp + done + +lemma archThreadSet_ifunsafe'[wp]: + "archThreadSet f t \if_unsafe_then_cap'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_ifunsafe' getObject_tcb_wp) + apply normalise_obj_at' + apply (auto simp: tcb_cte_cases_def if_live_then_nonz_cap'_def cteSizeBits_def) + done + +lemma archThreadSet_valid_idle'[wp]: + "archThreadSet f t \valid_idle'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_idle' getObject_tcb_wp) + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + done + +lemma archThreadSet_ko_wp_at_no_vcpu[wp]: + "archThreadSet f t \ko_wp_at' (is_vcpu' and hyp_live') p\" + unfolding archThreadSet_def + apply (wpsimp wp: getObject_tcb_wp setObject_ko_wp_at simp: objBits_simps' | rule refl)+ + apply normalise_obj_at' + apply (auto simp: ko_wp_at'_def obj_at'_real_def is_vcpu'_def) + done + +lemma archThreadSet_valid_arch_state'[wp]: + "archThreadSet f t \valid_arch_state'\" + unfolding valid_arch_state'_def valid_asid_table'_def option_case_all_conv split_def + apply (rule hoare_lift_Pf[where f=ksArchState]; wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift) + apply (clarsimp simp: pred_conj_def) + done + +lemma archThreadSet_valid_queues'[wp]: + "archThreadSet f t \valid_queues'\" + unfolding valid_queues'_def + apply (rule hoare_lift_Pf[where f=ksReadyQueues]; wp?) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + apply auto + done + +lemma archThreadSet_ct_not_inQ[wp]: + "archThreadSet f t \ct_not_inQ\" + unfolding ct_not_inQ_def + apply (rule hoare_lift_Pf[where f=ksCurThread]; wp?) + apply (wpsimp wp: hoare_vcg_imp_lift simp: o_def) + done + +lemma archThreadSet_obj_at'_pte[wp]: + "archThreadSet f t \obj_at' (P::pte \ bool) p\" + unfolding archThreadSet_def + by (wpsimp wp: obj_at_setObject2 simp: updateObject_default_def in_monad) + +crunch pspace_domain_valid[wp]: archThreadSet pspace_domain_valid + +lemma setObject_tcb_gsUntypedZeroRanges[wp]: + "setObject ptr (tcb::tcb) \\s. P (gsUntypedZeroRanges s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +crunch gsUntypedZeroRanges[wp]: archThreadSet "\s. P (gsUntypedZeroRanges s)" + +lemma archThreadSet_untyped_ranges_zero'[wp]: + "archThreadSet f t \untyped_ranges_zero'\" + by (rule hoare_lift_Pf[where f=cteCaps_of]; wp cteCaps_of_ctes_of_lift) + +lemma archThreadSet_tcb_at'[wp]: + "\\\ archThreadSet f t \\_. tcb_at' t\" + unfolding archThreadSet_def + by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) + +lemma dissoc_invs': + "\invs' and (\s. \p. (\a. armHSCurVCPU (ksArchState s) = Some (p, a)) \ p \ v) and + ko_at' vcpu v and K (vcpuTCBPtr vcpu = Some t) and + obj_at' (\tcb. atcbVCPUPtr (tcbArch tcb) = Some v) t\ + do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu + od \\_. invs' and tcb_at' t\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_tcb_valid_objs setObject_vcpu_valid_objs' + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_valid_arch' archThreadSet_if_live' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb valid_arch_tcb'_def + | clarsimp simp: live'_def hyp_live'_def arch_live'_def)+ + supply fun_upd_apply[simp] + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def tcb_vcpu_refs'_def + split: option.splits if_split_asm) + apply safe + apply (rule_tac rfs'="state_hyp_refs_of' s" in delta_sym_refs) + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def tcb_vcpu_refs'_def + split: option.splits if_split_asm)+ + done + +lemma setVCPU_archThreadSet_None_eq: + "do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu; + f + od = do + do + archThreadSet (atcbVCPUPtr_update (\_. Nothing)) t; + setObject v $ vcpuTCBPtr_update (\_. Nothing) vcpu + od; + f + od" by (simp add: bind_assoc) + +lemma vcpuInvalidateActive_inactive[wp]: + "\\\ vcpuInvalidateActive \\rv s. \p. (\a. armHSCurVCPU (ksArchState s) = Some (p, a)) \ P p rv s\" + unfolding vcpuInvalidateActive_def modifyArchState_def by wpsimp + +lemma vcpuDisableNone_obj_at'[wp]: + "vcpuDisable None \\s. P (obj_at' P' p s)\" + unfolding vcpuDisable_def by wpsimp + +lemma vcpuInvalidateActive_obj_at'[wp]: + "vcpuInvalidateActive \\s. P (obj_at' P' p s)\" + unfolding vcpuInvalidateActive_def modifyArchState_def by wpsimp + +lemma when_assert_eq: + "(when P $ haskell_fail xs) = assert (\P)" + by (simp add: assert_def when_def) + +lemma dissociateVCPUTCB_invs'[wp]: + "dissociateVCPUTCB vcpu tcb \invs'\" + unfolding dissociateVCPUTCB_def setVCPU_archThreadSet_None_eq when_assert_eq + apply ( wpsimp wp: dissoc_invs' getVCPU_wp | wpsimp wp: getObject_tcb_wp simp: archThreadGet_def)+ + apply (drule tcb_ko_at') + apply clarsimp + apply (rule exI, rule conjI, assumption) + apply clarsimp + apply (rule conjI) + apply normalise_obj_at' + apply (rule conjI) + apply normalise_obj_at' + apply (clarsimp simp: obj_at'_def) + done + +lemma vcpuFinalise_invs'[wp]: "vcpuFinalise vcpu \invs'\" + unfolding vcpuFinalise_def by wpsimp + +lemma arch_finaliseCap_invs[wp]: + "\invs' and valid_cap' (ArchObjectCap cap)\ Arch.finaliseCap cap fin \\rv. invs'\" + unfolding AARCH64_H.finaliseCap_def Let_def by wpsimp + +lemma setObject_tcb_unlive[wp]: + "\\s. vr \ t \ ko_wp_at' (Not \ live') vr s\ + setObject t (tcbArch_update (\_. atcbVCPUPtr_update Map.empty (tcbArch tcb)) tcb) + \\_. ko_wp_at' (Not \ live') vr\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', simp+) + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def ) + done + +lemma setVCPU_unlive[wp]: + "\\\ setObject vr (vcpuTCBPtr_update Map.empty vcpu) \\_. ko_wp_at' (Not \ live') vr\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at + simp: objBits_def objBitsKO_def archObjSize_def vcpuBits_def pageBits_def) + apply simp+ + apply (clarsimp simp: live'_def hyp_live'_def arch_live'_def ko_wp_at'_def obj_at'_def) + done + +lemma asUser_unlive[wp]: + "\ko_wp_at' (Not \ live') vr\ asUser t f \\_. ko_wp_at' (Not \ live') vr\" + unfolding asUser_def + apply (wpsimp simp: threadSet_def atcbContextSet_def objBits_simps' split_def + wp: setObject_ko_wp_at) + apply (rule refl, simp) + apply (wpsimp simp: atcbContextGet_def wp: getObject_tcb_wp threadGet_wp)+ + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def[where p=t]) + apply (case_tac ko; simp) + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI) + apply (clarsimp simp: obj_at'_def) + apply (rule_tac x=tcb in exI, rule conjI; clarsimp simp: o_def) + apply (clarsimp simp: ko_wp_at'_def live'_def hyp_live'_def) + done + +lemma dissociateVCPUTCB_unlive: + "\ \ \ dissociateVCPUTCB vcpu tcb \ \_. ko_wp_at' (Not o live') vcpu \" + unfolding dissociateVCPUTCB_def setVCPU_archThreadSet_None_eq when_assert_eq + by (wpsimp wp: getVCPU_wp[where p=vcpu] | + wpsimp wp: getObject_tcb_wp hoare_vcg_conj_lift hoare_vcg_ex_lift + getVCPU_wp[where p=vcpu] setVCPU_unlive[simplified o_def] + setObject_tcb_unlive hoare_drop_imp setObject_tcb_strongest + simp: archThreadGet_def archThreadSet_def)+ + +lemma vcpuFinalise_unlive[wp]: + "\ \ \ vcpuFinalise v \ \_. ko_wp_at' (Not o live') v \" + apply (wpsimp simp: vcpuFinalise_def wp: dissociateVCPUTCB_unlive getVCPU_wp) + apply (frule state_hyp_refs_of'_vcpu_absorb) + apply (auto simp: ko_wp_at'_def) + apply (rule_tac x="KOArch (KOVCPU ko)" in exI) + apply (clarsimp simp: live'_def hyp_live'_def arch_live'_def obj_at'_def) + done + +crunches setVMRoot, deleteASIDPool, invalidateTLBByASID, invalidateASIDEntry, vcpuFinalise + for ctes_of[wp]: "\s. P (ctes_of s)" + (wp: crunch_wps getObject_inv loadObject_default_inv getASID_wp simp: crunch_simps) + +lemma deleteASID_ctes_of[wp]: + "deleteASID a ptr \\s. P (ctes_of s)\" + unfolding deleteASID_def by (wpsimp wp: getASID_wp hoare_drop_imps hoare_vcg_all_lift) + +lemma arch_finaliseCap_removeable[wp]: + "\\s. s \' ArchObjectCap cap \ invs' s + \ (final_matters' (ArchObjectCap cap) + \ (final = isFinal (ArchObjectCap cap) slot (cteCaps_of s))) \ + Arch.finaliseCap cap final + \\rv s. isNullCap (fst rv) \ removeable' slot s (ArchObjectCap cap) \ isNullCap (snd rv)\" + unfolding AARCH64_H.finaliseCap_def + apply (wpsimp wp: hoare_vcg_op_lift simp: removeable'_def isCap_simps cte_wp_at_ctes_of) + apply (fastforce simp: final_matters'_def isFinal_def cte_wp_at_ctes_of cteCaps_of_def + sameObjectAs_def3) + done + +lemma isZombie_Null: + "\ isZombie NullCap" + by (simp add: isCap_simps) + +lemma prepares_delete_helper'': + assumes x: "\P\ f \\rv. ko_wp_at' (Not \ live') p\" + shows "\P and K ((\x. cte_refs' cap x = {}) + \ zobj_refs' cap = {p} + \ threadCapRefs cap = {})\ + f \\rv s. removeable' sl s cap\" + apply (rule hoare_gen_asm) + apply (rule hoare_strengthen_post [OF x]) + apply (clarsimp simp: removeable'_def) + done + +crunches finaliseCapTrue_standin, unbindNotification + for ctes_of[wp]: "\s. P (ctes_of s)" + (wp: crunch_wps getObject_inv loadObject_default_inv simp: crunch_simps) + +lemma cteDeleteOne_cteCaps_of: + "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ + P ((cteCaps_of s)(p \ NullCap)))\ + cteDeleteOne p + \\rv s. P (cteCaps_of s)\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") + apply (simp add: finaliseCapTrue_standin_simple_def) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def + finaliseCap_def isCap_simps) + apply (drule_tac x=s in fun_cong) + apply (simp add: return_def fail_def) + apply (wp emptySlot_cteCaps_of) + apply (simp add: cteCaps_of_def) + apply (wp (once) hoare_drop_imps) + apply (wp isFinalCapability_inv getCTE_wp')+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of) + apply (auto simp: fun_upd_idem fun_upd_def[symmetric] o_def) + done + +lemma cteDeleteOne_isFinal: + "\\s. isFinal cap slot (cteCaps_of s)\ + cteDeleteOne p + \\rv s. isFinal cap slot (cteCaps_of s)\" + apply (wp cteDeleteOne_cteCaps_of) + apply (clarsimp simp: isFinal_def sameObjectAs_def2) + done + +lemmas setEndpoint_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ep_ctes_of] +lemmas setNotification_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF set_ntfn_ctes_of] +lemmas setQueue_cteCaps_of[wp] = cteCaps_of_ctes_of_lift [OF setQueue_ctes_of] +lemmas threadSet_cteCaps_of = cteCaps_of_ctes_of_lift [OF threadSet_ctes_of] + +crunches archThreadSet, vcpuUpdate, dissociateVCPUTCB + for isFinal: "\s. isFinal cap slot (cteCaps_of s)" + (wp: cteCaps_of_ctes_of_lift) + +crunch isFinal: suspend, prepareThreadDelete "\s. isFinal cap slot (cteCaps_of s)" + (ignore: threadSet + wp: threadSet_cteCaps_of crunch_wps + simp: crunch_simps unless_def o_def) + +lemma isThreadCap_threadCapRefs_tcbptr: + "isThreadCap cap \ threadCapRefs cap = {capTCBPtr cap}" + by (clarsimp simp: isCap_simps) + +lemma isArchObjectCap_Cap_capCap: + "isArchObjectCap cap \ ArchObjectCap (capCap cap) = cap" + by (clarsimp simp: isCap_simps) + +lemma cteDeleteOne_deletes[wp]: + "\\\ cteDeleteOne p \\rv s. cte_wp_at' (\c. cteCap c = NullCap) p s\" + apply (subst tree_cte_cteCap_eq[unfolded o_def]) + apply (wp cteDeleteOne_cteCaps_of) + apply clarsimp + done + +crunch irq_node'[wp]: finaliseCap "\s. P (irq_node' s)" + (wp: crunch_wps getObject_inv loadObject_default_inv + updateObject_default_inv setObject_ksInterrupt + simp: crunch_simps o_def) + +lemma deletingIRQHandler_removeable': + "\invs' and (\s. isFinal (IRQHandlerCap irq) slot (cteCaps_of s)) + and K (cap = IRQHandlerCap irq)\ + deletingIRQHandler irq + \\rv s. removeable' slot s cap\" + apply (rule hoare_gen_asm) + apply (simp add: deletingIRQHandler_def getIRQSlot_def locateSlot_conv + getInterruptState_def getSlotCap_def) + apply (simp add: removeable'_def tree_cte_cteCap_eq[unfolded o_def]) + apply (subst tree_cte_cteCap_eq[unfolded o_def])+ + apply (wp hoare_use_eq_irq_node' [OF cteDeleteOne_irq_node' cteDeleteOne_cteCaps_of] + getCTE_wp') + apply (clarsimp simp: cte_level_bits_def ucast_nat_def shiftl_t2n mult_ac cteSizeBits_def + split: option.split_asm) + done + +lemma finaliseCap_cte_refs: + "\\s. s \' cap\ + finaliseCap cap final flag + \\rv s. fst rv \ NullCap \ cte_refs' (fst rv) = cte_refs' cap\" + apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot + AARCH64_H.finaliseCap_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp only: o_def)+ + apply (frule valid_capAligned) + apply (cases cap, simp_all add: isCap_simps) + apply (clarsimp simp: tcb_cte_cases_def word_count_from_top objBits_defs) + apply clarsimp + apply (rule ext, simp) + apply (rule image_cong [OF _ refl]) + apply (fastforce simp: mask_def capAligned_def objBits_simps shiftL_nat) + done + +lemma deletingIRQHandler_final: + "\\s. isFinal cap slot (cteCaps_of s) + \ (\final. finaliseCap cap final True = fail)\ + deletingIRQHandler irq + \\rv s. isFinal cap slot (cteCaps_of s)\" + apply (simp add: deletingIRQHandler_def isFinal_def getIRQSlot_def + getInterruptState_def locateSlot_conv getSlotCap_def) + apply (wp cteDeleteOne_cteCaps_of getCTE_wp') + apply (auto simp: sameObjectAs_def3) + done + +declare suspend_unqueued [wp] + +lemma unbindNotification_valid_objs'_helper: + "valid_tcb' tcb s \ valid_tcb' (tcbBoundNotification_update (\_. None) tcb) s " + by (clarsimp simp: valid_bound_ntfn'_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def + split: option.splits ntfn.splits) + +lemma unbindNotification_valid_objs'_helper': + "valid_ntfn' tcb s \ valid_ntfn' (ntfnBoundTCB_update (\_. None) tcb) s " + by (clarsimp simp: valid_bound_tcb'_def valid_ntfn'_def + split: option.splits ntfn.splits) + +lemma typ_at'_valid_tcb'_lift: + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\\s. valid_tcb' tcb s\ f \\rv s. valid_tcb' tcb s\" + including no_pre + apply (simp add: valid_tcb'_def) + apply (case_tac "tcbState tcb", simp_all add: valid_tcb_state'_def split_def valid_bound_ntfn'_def) + apply (wp hoare_vcg_const_Ball_lift typ_at_lifts[OF P] + | case_tac "tcbBoundNotification tcb", simp_all)+ + done + +lemmas setNotification_valid_tcb' = typ_at'_valid_tcb'_lift [OF setNotification_typ_at'] + +lemma unbindNotification_valid_objs'[wp]: + "\valid_objs'\ + unbindNotification t + \\rv. valid_objs'\" + apply (simp add: unbindNotification_def) + apply (rule hoare_pre) + apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift + setNotification_valid_tcb' getNotification_wp + | wpc | clarsimp simp: setBoundNotification_def unbindNotification_valid_objs'_helper)+ + apply (clarsimp elim!: obj_atE') + apply (rule valid_objsE', assumption+) + apply (clarsimp simp: valid_obj'_def unbindNotification_valid_objs'_helper') + done + +lemma unbindMaybeNotification_valid_objs'[wp]: + "\valid_objs'\ + unbindMaybeNotification t + \\rv. valid_objs'\" + apply (simp add: unbindMaybeNotification_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp threadSet_valid_objs' gbn_wp' set_ntfn_valid_objs' hoare_vcg_all_lift + setNotification_valid_tcb' getNotification_wp + | wpc | clarsimp simp: setBoundNotification_def unbindNotification_valid_objs'_helper)+ + apply (clarsimp elim!: obj_atE') + apply (rule valid_objsE', assumption+) + apply (clarsimp simp: valid_obj'_def unbindNotification_valid_objs'_helper') + done + +lemma unbindMaybeNotification_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ unbindMaybeNotification t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: unbindMaybeNotification_def) + apply (rule hoare_pre) + apply (wp sbn_sch_act' | wpc | simp)+ + done + +lemma valid_cong: + "\ \s. P s = P' s; \s. P' s \ f s = f' s; + \rv s' s. \ (rv, s') \ fst (f' s); P' s \ \ Q rv s' = Q' rv s' \ + \ \P\ f \Q\ = \P'\ f' \Q'\" + by (clarsimp simp add: valid_def, blast) + +lemma sym_refs_ntfn_bound_eq: "sym_refs (state_refs_of' s) + \ obj_at' (\ntfn. ntfnBoundTCB ntfn = Some t) x s + = bound_tcb_at' (\st. st = Some x) t s" + apply (rule iffI) + apply (drule (1) sym_refs_obj_atD') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def refs_of_rev') + apply (drule(1) sym_refs_bound_tcb_atD') + apply (clarsimp simp: obj_at'_def ko_wp_at'_def refs_of_rev') + done + +lemma unbindMaybeNotification_obj_at'_bound: + "\\\ + unbindMaybeNotification r + \\_ s. obj_at' (\ntfn. ntfnBoundTCB ntfn = None) r s\" + apply (simp add: unbindMaybeNotification_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp obj_at_setObject2 + | wpc + | simp add: setBoundNotification_def threadSet_def updateObject_default_def in_monad)+ + apply (simp add: setNotification_def obj_at'_real_def cong: valid_cong) + apply (wp setObject_ko_wp_at, (simp add: objBits_simps')+) + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +crunches unbindNotification, unbindMaybeNotification + for isFinal[wp]: "\s. isFinal cap slot (cteCaps_of s)" + (wp: sts_bound_tcb_at' threadSet_cteCaps_of crunch_wps getObject_inv + loadObject_default_inv + ignore: threadSet + simp: setBoundNotification_def) + +crunches cancelSignal, cancelAllIPC + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' threadSet_cteCaps_of crunch_wps getObject_inv + loadObject_default_inv + ignore: threadSet) + +lemma finaliseCapTrue_standin_bound_tcb_at': + "\\s. bound_tcb_at' P t s \ (\tt b r. cap = ReplyCap tt b r) \ + finaliseCapTrue_standin cap final + \\_. bound_tcb_at' P t\" + apply (case_tac cap, simp_all add:finaliseCapTrue_standin_def) + apply (clarsimp simp: isNotificationCap_def) + apply (wp, clarsimp) + done + +lemma capDeleteOne_bound_tcb_at': + "\bound_tcb_at' P tptr and cte_wp_at' (isReplyCap \ cteCap) callerCap\ + cteDeleteOne callerCap \\rv. bound_tcb_at' P tptr\" + apply (simp add: cteDeleteOne_def unless_def) + apply (rule hoare_pre) + apply (wp finaliseCapTrue_standin_bound_tcb_at' hoare_vcg_all_lift + hoare_vcg_if_lift2 getCTE_cteCap_wp + | wpc | simp | wp (once) hoare_drop_imp)+ + apply (clarsimp simp: cteCaps_of_def isReplyCap_def cte_wp_at_ctes_of + split: option.splits) + apply (case_tac "cteCap cte", simp_all) + done + +lemma cancelIPC_bound_tcb_at'[wp]: + "\bound_tcb_at' P tptr\ cancelIPC t \\rv. bound_tcb_at' P tptr\" + apply (simp add: cancelIPC_def Let_def) + apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (case_tac "state", simp_all) + defer 2 + apply (rule hoare_pre) + apply ((wp sts_bound_tcb_at' getEndpoint_wp | wpc | simp)+)[8] + apply (simp add: getThreadReplySlot_def locateSlot_conv liftM_def) + apply (rule hoare_pre) + apply (wp capDeleteOne_bound_tcb_at' getCTE_ctes_of) + apply (rule_tac Q="\_. bound_tcb_at' P tptr" in hoare_post_imp) + apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma archThreadSet_bound_tcb_at'[wp]: + "archThreadSet f t \bound_tcb_at' P t'\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_tcb_strongest getObject_tcb_wp simp: pred_tcb_at'_def) + by (auto simp: obj_at'_def objBits_simps) + +lemmas asUser_bound_obj_at'[wp] = asUser_pred_tcb_at' [of itcbBoundNotification] + +lemmas setObject_vcpu_pred_tcb_at'[wp] = + setObject_vcpu_obj_at'_no_vcpu [of _ "\ko. tst (pr (tcb_to_itcb' ko))" for tst pr, folded pred_tcb_at'_def] + +crunches dissociateVCPUTCB, vgicUpdateLR + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' getVCPU_wp crunch_wps hoare_vcg_all_lift hoare_vcg_if_lift3 + ignore: archThreadSet) + +crunches suspend, prepareThreadDelete + for bound_tcb_at'[wp]: "bound_tcb_at' P t" + (wp: sts_bound_tcb_at' cancelIPC_bound_tcb_at' + ignore: threadSet) + +lemma unbindNotification_bound_tcb_at': + "\\_. True\ unbindNotification t \\rv. bound_tcb_at' ((=) None) t\" + apply (simp add: unbindNotification_def) + apply (wp setBoundNotification_bound_tcb gbn_wp' | wpc | simp)+ + done + +crunches unbindNotification, unbindMaybeNotification + for valid_queues[wp]: "Invariants_H.valid_queues" + (wp: sbn_valid_queues) + +crunches unbindNotification, unbindMaybeNotification + for weak_sch_act_wf[wp]: "\s. weak_sch_act_wf (ksSchedulerAction s) s" + +lemma unbindNotification_tcb_at'[wp]: + "\tcb_at' t'\ unbindNotification t \\rv. tcb_at' t'\" + apply (simp add: unbindNotification_def) + apply (wp gbn_wp' | wpc | simp)+ + done + +lemma unbindMaybeNotification_tcb_at'[wp]: + "\tcb_at' t'\ unbindMaybeNotification t \\rv. tcb_at' t'\" + apply (simp add: unbindMaybeNotification_def) + apply (wp gbn_wp' | wpc | simp)+ + done + +lemma dissociateVCPUTCB_cte_wp_at'[wp]: + "dissociateVCPUTCB v t \cte_wp_at' P p\" + unfolding cte_wp_at_ctes_of by wp + +lemmas dissociateVCPUTCB_typ_ats'[wp] = typ_at_lifts[OF dissociateVCPUTCB_typ_at'] + +crunch cte_wp_at'[wp]: prepareThreadDelete "cte_wp_at' P p" +crunch valid_cap'[wp]: prepareThreadDelete "valid_cap' cap" + +lemma unset_vcpu_hyp_unlive[wp]: + "\\\ archThreadSet (atcbVCPUPtr_update Map.empty) t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding archThreadSet_def + apply (wpsimp wp: setObject_ko_wp_at' getObject_tcb_wp; (simp add: objBits_simps')?)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def hyp_live'_def) + done + + lemma unset_tcb_hyp_unlive[wp]: + "\\\ setObject vr (vcpuTCBPtr_update Map.empty vcpu) \\_. ko_wp_at' (Not \ hyp_live') vr\" + apply (wpsimp wp: setObject_ko_wp_at' getObject_tcb_wp + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def hyp_live'_def arch_live'_def) + done + +lemma setObject_vcpu_hyp_unlive[wp]: + "\\s. t \ vr \ ko_wp_at' (Not \ hyp_live') t s\ + setObject vr (vcpuTCBPtr_update Map.empty vcpu) + \\_. ko_wp_at' (Not \ hyp_live') t\" + apply (rule wp_pre) + apply (wpsimp wp: setObject_ko_wp_at + simp: objBits_def objBitsKO_def archObjSize_def vcpuBits_def pageBits_def + | simp)+ + apply (clarsimp simp: tcb_at_typ_at' typ_at'_def ko_wp_at'_def ) + done + +lemma asUser_hyp_unlive[wp]: + "asUser f t \ko_wp_at' (Not \ hyp_live') t'\" + unfolding asUser_def + apply (wpsimp wp: threadSet_ko_wp_at2' threadGet_wp) + apply (clarsimp simp: ko_wp_at'_def obj_at'_def hyp_live'_def atcbContextSet_def) + done + +lemma dissociateVCPUTCB_hyp_unlive[wp]: + "\\\ dissociateVCPUTCB v t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding dissociateVCPUTCB_def + by (cases "v = t"; wpsimp wp: unset_tcb_hyp_unlive unset_vcpu_hyp_unlive[simplified comp_def]) + +lemma prepareThreadDelete_hyp_unlive[wp]: + "\\\ prepareThreadDelete t \\_. ko_wp_at' (Not \ hyp_live') t\" + unfolding prepareThreadDelete_def archThreadGet_def fpuThreadDelete_def + apply (wpsimp wp: getObject_tcb_wp hoare_vcg_imp_lift' hoare_vcg_ex_lift) + apply (auto simp: ko_wp_at'_def obj_at'_def hyp_live'_def) + done + +lemma fpuThreadDeleteOp_invs'[wp]: + "\invs'\ doMachineOp (fpuThreadDeleteOp t) \\rv. invs'\" + apply (wp dmo_invs' no_irq_fpuThreadDeleteOp no_irq) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" + in use_valid) + apply wpsimp+ + done + +crunch invs[wp]: prepareThreadDelete "invs'" (ignore: doMachineOp) + +end + +lemma (in delete_one_conc_pre) finaliseCap_replaceable: + "\\s. invs' s \ cte_wp_at' (\cte. cteCap cte = cap) slot s + \ (final_matters' cap \ (final = isFinal cap slot (cteCaps_of s))) + \ weak_sch_act_wf (ksSchedulerAction s) s\ + finaliseCap cap final flag + \\rv s. (isNullCap (fst rv) \ removeable' slot s cap + \ (snd rv \ NullCap \ snd rv = cap \ cap_has_cleanup' cap + \ isFinal cap slot (cteCaps_of s))) + \ + (isZombie (fst rv) \ snd rv = NullCap + \ isFinal cap slot (cteCaps_of s) + \ capClass cap = capClass (fst rv) + \ capUntypedPtr (fst rv) = capUntypedPtr cap + \ capBits (fst rv) = capBits cap + \ capRange (fst rv) = capRange cap + \ (isThreadCap cap \ isCNodeCap cap \ isZombie cap) + \ (\p \ threadCapRefs cap. st_tcb_at' ((=) Inactive) p s + \ obj_at' (Not \ tcbQueued) p s + \ bound_tcb_at' ((=) None) p s + \ ko_wp_at' (Not \ hyp_live') p s + \ (\pr. p \ set (ksReadyQueues s pr))))\" + apply (simp add: finaliseCap_def Let_def getThreadCSpaceRoot + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp prepares_delete_helper'' [OF cancelAllIPC_unlive] + prepares_delete_helper'' [OF cancelAllSignals_unlive] + suspend_isFinal prepareThreadDelete_unqueued prepareThreadDelete_nonq + prepareThreadDelete_inactive prepareThreadDelete_isFinal + suspend_makes_inactive suspend_nonq + deletingIRQHandler_removeable' + deletingIRQHandler_final[where slot=slot ] + unbindMaybeNotification_obj_at'_bound + getNotification_wp + suspend_bound_tcb_at' + unbindNotification_bound_tcb_at' + | simp add: isZombie_Null isThreadCap_threadCapRefs_tcbptr + isArchObjectCap_Cap_capCap + | (rule hoare_strengthen_post [OF arch_finaliseCap_removeable[where slot=slot]], + clarsimp simp: isCap_simps) + | wpc)+ + apply clarsimp + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (case_tac "cteCap cte", + simp_all add: isCap_simps capRange_def cap_has_cleanup'_def + final_matters'_def objBits_simps + not_Final_removeable finaliseCap_def, + simp_all add: removeable'_def) + (* thread *) + apply (frule capAligned_capUntypedPtr [OF valid_capAligned], simp) + apply (clarsimp simp: valid_cap'_def) + apply (drule valid_globals_cte_wpD'[rotated], clarsimp) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (clarsimp simp: obj_at'_def | rule conjI)+ + done + +lemma cteDeleteOne_cte_wp_at_preserved: + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\\s. cte_wp_at' (\cte. P (cteCap cte)) p s\ + cteDeleteOne ptr + \\rv s. cte_wp_at' (\cte. P (cteCap cte)) p s\" + apply (simp add: tree_cte_cteCap_eq[unfolded o_def]) + apply (rule hoare_pre, wp cteDeleteOne_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) + done + +crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" + (simp: crunch_simps wp: crunch_wps) + +lemma cancelIPC_cteCaps_of: + "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ + P ((cteCaps_of s)(p \ NullCap))) \ + P (cteCaps_of s)\ + cancelIPC t + \\rv s. P (cteCaps_of s)\" + apply (simp add: cancelIPC_def Let_def capHasProperty_def + getThreadReplySlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp cteDeleteOne_cteCaps_of getCTE_wp' | wpcw + | simp add: cte_wp_at_ctes_of + | wp (once) hoare_drop_imps cteCaps_of_ctes_of_lift)+ + apply (wp hoare_convert_imp hoare_vcg_all_lift + threadSet_ctes_of threadSet_cteCaps_of + | clarsimp)+ + apply (wp cteDeleteOne_cteCaps_of getCTE_wp' | wpcw | simp + | wp (once) hoare_drop_imps cteCaps_of_ctes_of_lift)+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + apply (drule_tac x="mdbNext (cteMDBNode x)" in spec) + apply clarsimp + apply (auto simp: o_def map_option_case fun_upd_def[symmetric]) + done + +lemma cancelIPC_cte_wp_at': + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\\s. cte_wp_at' (\cte. P (cteCap cte)) p s\ + cancelIPC t + \\rv s. cte_wp_at' (\cte. P (cteCap cte)) p s\" + apply (simp add: tree_cte_cteCap_eq[unfolded o_def]) + apply (rule hoare_pre, wp cancelIPC_cteCaps_of) + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of x) + done + +crunch cte_wp_at'[wp]: tcbSchedDequeue "cte_wp_at' P p" + +lemma suspend_cte_wp_at': + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\cte_wp_at' (\cte. P (cteCap cte)) p\ + suspend t + \\rv. cte_wp_at' (\cte. P (cteCap cte)) p\" + apply (simp add: suspend_def updateRestartPC_def) + apply (rule hoare_pre) + apply (wp threadSet_cte_wp_at' cancelIPC_cte_wp_at' + | simp add: x)+ + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch cte_wp_at'[wp]: deleteASIDPool "cte_wp_at' P p" + (simp: crunch_simps assertE_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma deleteASID_cte_wp_at'[wp]: + "\cte_wp_at' P p\ deleteASID param_a param_b \\_. cte_wp_at' P p\" + apply (simp add: deleteASID_def + cong: option.case_cong) + apply (wp setObject_cte_wp_at'[where Q="\"] getObject_inv + loadObject_default_inv setVMRoot_cte_wp_at' + | clarsimp simp: updateObject_default_def in_monad + | rule equals0I + | wpc)+ + done + +crunches unmapPageTable, unmapPage, unbindNotification, finaliseCapTrue_standin + for cte_wp_at'[wp]: "cte_wp_at' P p" + (simp: crunch_simps wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch cte_wp_at'[wp]: vcpuFinalise "cte_wp_at' P p" + (wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma arch_finaliseCap_cte_wp_at[wp]: + "\cte_wp_at' P p\ Arch.finaliseCap cap fin \\rv. cte_wp_at' P p\" + apply (simp add: AARCH64_H.finaliseCap_def) + apply (wpsimp wp: unmapPage_cte_wp_at') + done + +lemma deletingIRQHandler_cte_preserved: + assumes x: "\cap final. P cap \ finaliseCap cap final True = fail" + shows "\cte_wp_at' (\cte. P (cteCap cte)) p\ + deletingIRQHandler irq + \\rv. cte_wp_at' (\cte. P (cteCap cte)) p\" + apply (simp add: deletingIRQHandler_def getSlotCap_def + getIRQSlot_def locateSlot_conv getInterruptState_def) + apply (wpsimp wp: cteDeleteOne_cte_wp_at_preserved getCTE_wp' simp: x) + done + +lemma finaliseCap_equal_cap[wp]: + "\cte_wp_at' (\cte. cteCap cte = cap) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. cteCap cte = cap) sl\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp suspend_cte_wp_at' deletingIRQHandler_cte_preserved + | clarsimp simp: finaliseCap_def | wpc)+ + apply (case_tac cap) + apply auto + done + +lemma setThreadState_st_tcb_at_simplish': + "simple' st \ + \st_tcb_at' (P or simple') t\ + setThreadState st t' + \\rv. st_tcb_at' (P or simple') t\" + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemmas setThreadState_st_tcb_at_simplish + = setThreadState_st_tcb_at_simplish'[unfolded pred_disj_def] + +crunch st_tcb_at_simplish: cteDeleteOne + "st_tcb_at' (\st. P st \ simple' st) t" + (wp: crunch_wps getObject_inv loadObject_default_inv threadSet_pred_tcb_no_state + simp: crunch_simps unless_def ignore: threadSet) + +lemma cteDeleteOne_st_tcb_at[wp]: + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ cteDeleteOne slot \\rv. st_tcb_at' P t\" + apply (subgoal_tac "\Q. P = (Q or simple')") + apply (clarsimp simp: pred_disj_def) + apply (rule cteDeleteOne_st_tcb_at_simplish) + apply (rule_tac x=P in exI) + apply auto + done + +lemma cteDeleteOne_reply_pred_tcb_at: + "\\s. pred_tcb_at' proj P t s \ (\t' r. cte_wp_at' (\cte. cteCap cte = ReplyCap t' False r) slot s)\ + cteDeleteOne slot + \\rv. pred_tcb_at' proj P t\" + apply (simp add: cteDeleteOne_def unless_def isFinalCapability_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (clarsimp simp: cte_wp_at_ctes_of when_def isCap_simps + Let_def finaliseCapTrue_standin_def) + apply (intro impI conjI, (wp | simp)+) + done + +lemmas setNotification_typ_at'[wp] = typ_at_lifts[OF setNotification_typ_at'] + +crunches setBoundNotification, setNotification + for sch_act_simple[wp]: sch_act_simple + (wp: sch_act_simple_lift) + +crunches cteDeleteOne, unbindNotification + for sch_act_simple[wp]: sch_act_simple + (wp: crunch_wps ssa_sch_act_simple sts_sch_act_simple getObject_inv + loadObject_default_inv + simp: crunch_simps + rule: sch_act_simple_lift) + +lemma rescheduleRequired_sch_act_not[wp]: + "\\\ rescheduleRequired \\rv. sch_act_not t\" + apply (simp add: rescheduleRequired_def setSchedulerAction_def) + apply (wp hoare_post_taut | simp)+ + done + +crunch sch_act_not[wp]: cteDeleteOne "sch_act_not t" + (simp: crunch_simps case_Null_If unless_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma cancelAllIPC_mapM_x_valid_queues: + "\Invariants_H.valid_queues and valid_objs' and (\s. \t\set q. tcb_at' t s)\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv. Invariants_H.valid_queues\" + apply (rule_tac R="\_ s. (\t\set q. tcb_at' t s) \ valid_objs' s" + in hoare_post_add) + apply (rule hoare_pre) + apply (rule mapM_x_wp') + apply (rule hoare_name_pre_state) + apply (wp hoare_vcg_const_Ball_lift + tcbSchedEnqueue_valid_queues tcbSchedEnqueue_not_st + sts_valid_queues sts_st_tcb_at'_cases setThreadState_not_st + | simp + | ((elim conjE)?, drule (1) bspec, clarsimp elim!: obj_at'_weakenE simp: valid_tcb_state'_def))+ + done + +lemma cancelAllIPC_mapM_x_weak_sch_act: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (rule mapM_x_wp_inv) + apply (wp) + apply (clarsimp) + done + +lemma cancelAllIPC_mapM_x_valid_objs': + "\valid_objs'\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\_. valid_objs'\" + apply (wp mapM_x_wp' sts_valid_objs') + apply (clarsimp simp: valid_tcb_state'_def)+ + done + +lemma cancelAllIPC_mapM_x_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + by (wp mapM_x_wp' tcbSchedEnqueue_not_st setThreadState_oa_queued | simp)+ + +lemma rescheduleRequired_oa_queued': + "\obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\ + rescheduleRequired + \\_. obj_at' (\tcb. Q (tcbDomain tcb) (tcbPriority tcb)) t'\" + apply (simp add: rescheduleRequired_def) + apply (wp tcbSchedEnqueue_not_st + | wpc + | simp)+ + done + +lemma cancelAllIPC_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + cancelAllIPC epptr + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cancelAllIPC_def) + apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' + getEndpoint_wp + | wpc + | simp)+ + done + +lemma cancelAllIPC_valid_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + cancelAllIPC ep_ptr + \\rv. Invariants_H.valid_queues\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) + apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act + set_ep_valid_objs' getEndpoint_wp) + apply (clarsimp simp: valid_ep'_def) + apply (drule (1) ko_at_valid_objs') + apply (auto simp: valid_obj'_def valid_ep'_def valid_tcb'_def + split: endpoint.splits + elim: valid_objs_valid_tcbE) + done + +lemma cancelAllSignals_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + cancelAllSignals epptr + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" +apply (simp add: cancelAllSignals_def) +apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + rescheduleRequired_oa_queued' cancelAllIPC_mapM_x_tcbDomain_obj_at' + getNotification_wp + | wpc + | simp)+ +done + +lemma unbindMaybeNotification_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ + unbindMaybeNotification r + \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: unbindMaybeNotification_def) + apply (wp setBoundNotification_oa_queued getNotification_wp gbn_wp' | wpc | simp)+ + done + +lemma cancelAllSignals_valid_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + cancelAllSignals ntfn + \\rv. Invariants_H.valid_queues\" + apply (simp add: cancelAllSignals_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp)+ + apply (wp hoare_vcg_conj_lift hoare_vcg_const_Ball_lift + cancelAllIPC_mapM_x_valid_queues cancelAllIPC_mapM_x_valid_objs' cancelAllIPC_mapM_x_weak_sch_act + set_ntfn_valid_objs' + | simp)+ + apply (clarsimp simp: valid_ep'_def) + apply (drule (1) ko_at_valid_objs') + apply (auto simp: valid_obj'_def valid_ntfn'_def valid_tcb'_def + split: endpoint.splits + elim: valid_objs_valid_tcbE) + done + +lemma finaliseCapTrue_standin_valid_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + finaliseCapTrue_standin cap final + \\_. Invariants_H.valid_queues\" + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (safe) + apply (wp | clarsimp | wpc)+ + done + + +crunch valid_queues[wp]: isFinalCapability "Invariants_H.valid_queues" + (simp: crunch_simps) + +crunch sch_act[wp]: isFinalCapability "\s. sch_act_wf (ksSchedulerAction s) s" + (simp: crunch_simps) + +crunch weak_sch_act[wp]: + isFinalCapability "\s. weak_sch_act_wf (ksSchedulerAction s) s" + (simp: crunch_simps) + +lemma cteDeleteOne_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + cteDeleteOne sl + \\_. Invariants_H.valid_queues\" (is "\?PRE\ _ \_\") + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (wp isFinalCapability_inv getCTE_wp | rule hoare_drop_imps | simp)+ + apply (clarsimp simp: cte_wp_at'_def) + done + +lemma valid_inQ_queues_lift: + assumes tat: "\d p tcb. \obj_at' (inQ d p) tcb\ f \\_. obj_at' (inQ d p) tcb\" + and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" + shows "\valid_inQ_queues\ f \\_. valid_inQ_queues\" + proof - + show ?thesis + apply (clarsimp simp: valid_def valid_inQ_queues_def) + apply safe + apply (rule use_valid [OF _ tat], assumption) + apply (drule spec, drule spec, erule conjE, erule bspec) + apply (rule ccontr) + apply (erule notE[rotated], erule(1) use_valid [OF _ prq]) + apply (erule use_valid [OF _ prq]) + apply simp + done + qed + +lemma emptySlot_valid_inQ_queues [wp]: + "\valid_inQ_queues\ emptySlot sl opt \\rv. valid_inQ_queues\" + unfolding emptySlot_def + by (wp opt_return_pres_lift | wpcw | wp valid_inQ_queues_lift | simp)+ + +lemma cancelAllIPC_mapM_x_valid_inQ_queues: + "\valid_inQ_queues\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv. valid_inQ_queues\" + apply (rule mapM_x_wp_inv) + apply (wp sts_valid_queues [where st="Structures_H.thread_state.Restart", simplified] + setThreadState_st_tcb) + done + +lemma cancelAllIPC_valid_inQ_queues[wp]: + "\valid_inQ_queues\ + cancelAllIPC ep_ptr + \\rv. valid_inQ_queues\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) + apply (wp cancelAllIPC_mapM_x_valid_inQ_queues) + apply (wp hoare_conjI hoare_drop_imp | simp)+ + done + +lemma cancelAllSignals_valid_inQ_queues[wp]: + "\valid_inQ_queues\ + cancelAllSignals ntfn + \\rv. valid_inQ_queues\" + apply (simp add: cancelAllSignals_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp)+ + apply (wp cancelAllIPC_mapM_x_valid_inQ_queues)+ + apply (simp) + done + +crunches unbindNotification, unbindMaybeNotification + for valid_inQ_queues[wp]: "valid_inQ_queues" + +lemma finaliseCapTrue_standin_valid_inQ_queues[wp]: + "\valid_inQ_queues\ + finaliseCapTrue_standin cap final + \\_. valid_inQ_queues\" + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (safe) + apply (wp | clarsimp | wpc)+ + done + +crunch valid_inQ_queues[wp]: isFinalCapability valid_inQ_queues + (simp: crunch_simps) + +lemma cteDeleteOne_valid_inQ_queues[wp]: + "\valid_inQ_queues\ + cteDeleteOne sl + \\_. valid_inQ_queues\" + apply (simp add: cteDeleteOne_def unless_def) + apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift) + done + +crunch ksCurDomain[wp]: cteDeleteOne "\s. P (ksCurDomain s)" + (wp: crunch_wps simp: crunch_simps unless_def) + +lemma cteDeleteOne_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cteDeleteOne slot \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (wp emptySlot_tcbDomain cancelAllIPC_tcbDomain_obj_at' cancelAllSignals_tcbDomain_obj_at' + isFinalCapability_inv getCTE_wp + unbindMaybeNotification_tcbDomain_obj_at' + | rule hoare_drop_imp + | simp add: finaliseCapTrue_standin_def Let_def + split del: if_split + | wpc)+ + apply (clarsimp simp: cte_wp_at'_def) + done + +end + +global_interpretation delete_one_conc_pre + by (unfold_locales, wp) + (wp cteDeleteOne_tcbDomain_obj_at' cteDeleteOne_typ_at' cteDeleteOne_reply_pred_tcb_at | simp)+ + +lemma cteDeleteOne_invs[wp]: + "\invs'\ cteDeleteOne ptr \\rv. invs'\" + apply (simp add: cteDeleteOne_def unless_def + split_def finaliseCapTrue_standin_simple_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_True_invs) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_replaceable[where slot=ptr]) + apply (rule hoare_vcg_conj_lift) + apply (rule finaliseCap_cte_refs) + apply (rule finaliseCap_equal_cap[where sl=ptr]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule disjE) + apply simp + apply (clarsimp dest!: isCapDs simp: capRemovable_def) + apply (clarsimp simp: removeable'_def fun_eq_iff[where f="cte_refs' cap" for cap] + del: disjCI) + apply (rule disjI2) + apply (rule conjI) + subgoal by auto + subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def + live'_def hyp_live'_def ko_wp_at'_def) + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp + | wp (once) isFinal[where x=ptr])+ + apply (fastforce simp: cte_wp_at_ctes_of) + done + +global_interpretation delete_one_conc_fr: delete_one_conc + by unfold_locales wp + +declare cteDeleteOne_invs[wp] + +lemma deletingIRQHandler_invs' [wp]: + "\invs'\ deletingIRQHandler i \\_. invs'\" + apply (simp add: deletingIRQHandler_def getSlotCap_def + getIRQSlot_def locateSlot_conv getInterruptState_def) + apply (wp getCTE_wp') + apply simp + done + +lemma finaliseCap_invs: + "\invs' and sch_act_simple and valid_cap' cap + and cte_wp_at' (\cte. cteCap cte = cap) sl\ + finaliseCap cap fin flag + \\rv. invs'\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp hoare_drop_imps hoare_vcg_all_lift | simp only: o_def | wpc)+ + apply clarsimp + apply (intro conjI impI) + apply (clarsimp dest!: isCapDs simp: valid_cap'_def) + apply (drule invs_valid_global', drule(1) valid_globals_cte_wpD') + apply (drule valid_capAligned, drule capAligned_capUntypedPtr) + apply (clarsimp dest!: isCapDs) + apply (clarsimp dest!: isCapDs) + apply (clarsimp dest!: isCapDs) + done + +lemma finaliseCap_zombie_cap[wp]: + "\cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp suspend_cte_wp_at' + deletingIRQHandler_cte_preserved + | clarsimp simp: finaliseCap_def isCap_simps | wpc)+ + done + +lemma finaliseCap_zombie_cap': + "\cte_wp_at' (\cte. (P and isZombie) (cteCap cte)) sl\ + finaliseCap cap fin flag + \\rv. cte_wp_at' (\cte. P (cteCap cte)) sl\" + apply (rule hoare_strengthen_post) + apply (rule finaliseCap_zombie_cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma finaliseCap_cte_cap_wp_to[wp]: + "\ex_cte_cap_wp_to' P sl\ finaliseCap cap fin flag \\rv. ex_cte_cap_wp_to' P sl\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre, rule hoare_use_eq_irq_node' [OF finaliseCap_irq_node']) + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (wp suspend_cte_wp_at' + deletingIRQHandler_cte_preserved + hoare_vcg_ex_lift + | clarsimp simp: finaliseCap_def isCap_simps + | rule conjI + | wpc)+ + apply fastforce + done + +crunch valid_cap'[wp]: unbindNotification "valid_cap' cap" + +lemma finaliseCap_valid_cap[wp]: + "\valid_cap' cap\ finaliseCap cap final flag \\rv. valid_cap' (fst rv)\" + apply (simp add: finaliseCap_def Let_def + getThreadCSpaceRoot + AARCH64_H.finaliseCap_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp | simp only: valid_NullCap o_def fst_conv | wpc)+ + apply simp + apply (intro conjI impI) + apply (clarsimp simp: valid_cap'_def isCap_simps capAligned_def + objBits_simps shiftL_nat)+ + done + +lemma no_idle_thread_cap: + "\ cte_wp_at ((=) (cap.ThreadCap (idle_thread s))) sl s; valid_global_refs s \ \ False" + apply (cases sl) + apply (clarsimp simp: valid_global_refs_def valid_refs_def cte_wp_at_caps_of_state) + apply ((erule allE)+, erule (1) impE) + apply (clarsimp simp: cap_range_def) + done + +lemmas getCTE_no_0_obj'_helper + = getCTE_inv + hoare_strengthen_post[where Q="\_. no_0_obj'" and P=no_0_obj' and a="getCTE slot" for slot] + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches invalidateTLBByASID + for nosch[wp]: "\s. P (ksSchedulerAction s)" + +crunch nosch[wp]: dissociateVCPUTCB, unmapPageTable "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getVCPU_wp getObject_inv hoare_vcg_all_lift hoare_vcg_if_lift3 + simp: loadObject_default_def updateObject_default_def) + +crunch nosch[wp]: "Arch.finaliseCap" "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getObject_inv simp: loadObject_default_def updateObject_default_def) + +crunch sch_act_simple[wp]: finaliseCap sch_act_simple + (simp: crunch_simps + rule: sch_act_simple_lift + wp: getObject_inv loadObject_default_inv crunch_wps) + +end + + +lemma interrupt_cap_null_or_ntfn: + "invs s + \ cte_wp_at (\cp. is_ntfn_cap cp \ cp = cap.NullCap) (interrupt_irq_node s irq, []) s" + apply (frule invs_valid_irq_node) + apply (clarsimp simp: valid_irq_node_def) + apply (drule_tac x=irq in spec) + apply (drule cte_at_0) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (drule caps_of_state_cteD) + apply (frule if_unsafe_then_capD, clarsimp+) + apply (clarsimp simp: ex_cte_cap_wp_to_def cte_wp_at_caps_of_state) + apply (frule cte_refs_obj_refs_elem, erule disjE) + apply (clarsimp | drule caps_of_state_cteD valid_global_refsD[rotated] + | rule irq_node_global_refs[where irq=irq])+ + apply (simp add: cap_range_def) + apply (clarsimp simp: appropriate_cte_cap_def + split: cap.split_asm) + done + +lemma (in delete_one) deletingIRQHandler_corres: + "corres dc (einvs) (invs') + (deleting_irq_handler irq) (deletingIRQHandler irq)" + apply (simp add: deleting_irq_handler_def deletingIRQHandler_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule_tac P'="cte_at' (cte_map slot)" in corres_symb_exec_r_conj) + apply (rule_tac F="isNotificationCap rv \ rv = capability.NullCap" + and P="cte_wp_at (\cp. is_ntfn_cap cp \ cp = cap.NullCap) slot + and einvs" + and P'="invs' and cte_wp_at' (\cte. cteCap cte = rv) + (cte_map slot)" in corres_req) + apply (clarsimp simp: cte_wp_at_caps_of_state state_relation_def) + apply (drule caps_of_state_cteD) + apply (drule(1) pspace_relation_cte_wp_at, clarsimp+) + apply (auto simp: cte_wp_at_ctes_of is_cap_simps isCap_simps)[1] + apply simp + apply (rule corres_guard_imp, rule delete_one_corres[unfolded dc_def]) + apply (auto simp: cte_wp_at_caps_of_state is_cap_simps can_fast_finalise_def)[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getCTE_wp' | simp add: getSlotCap_def)+ + apply (wp | simp add: get_irq_slot_def getIRQSlot_def + locateSlot_conv getInterruptState_def)+ + apply (clarsimp simp: ex_cte_cap_wp_to_def interrupt_cap_null_or_ntfn) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma sym_refs_vcpu_tcb: (* FIXME: move to AInvs *) + "\ vcpus_of s v = Some vcpu; vcpu_tcb vcpu = Some t; sym_refs (state_hyp_refs_of s) \ \ + \tcb. ko_at (TCB tcb) t s \ tcb_vcpu (tcb_arch tcb) = Some v" + apply (frule hyp_sym_refs_obj_atD[where p=v and P="(=) (ArchObj (VCPU vcpu))", rotated]) + apply (fastforce simp: obj_at_def in_omonad) + apply (clarsimp simp: obj_at_def hyp_refs_of_def) + apply (rename_tac ko) + apply (case_tac ko; simp add: tcb_vcpu_refs_def split: option.splits) + apply (rename_tac koa) + apply (case_tac koa; simp add: refs_of_ao_def vcpu_tcb_refs_def split: option.splits) + done + +lemma vcpuFinalise_corres[corres]: + "vcpu' = vcpu \ + corres dc (invs and vcpu_at vcpu) no_0_obj' (vcpu_finalise vcpu) (vcpuFinalise vcpu')" + apply (simp add: vcpuFinalise_def vcpu_finalise_def) + apply (corres corres: getObject_vcpu_corres + simp: vcpu_relation_def + wp: get_vcpu_wp getVCPU_wp + | corres_cases_both)+ + apply (fastforce simp: obj_at_def in_omonad dest: sym_refs_vcpu_tcb) + apply (fastforce elim: vcpu_at_cross) + done + +lemma return_NullCap_pair_corres[corres]: + "corres (\r r'. cap_relation (fst r) (fst r') \ cap_relation (snd r) (snd r')) + \ \ + (return (cap.NullCap, cap.NullCap)) (return (NullCap, NullCap))" + by (corres corres: corres_returnTT) + +lemma arch_finaliseCap_corres: + "\ final_matters' (ArchObjectCap cap') \ final = final'; acap_relation cap cap' \ + \ corres (\r r'. cap_relation (fst r) (fst r') \ cap_relation (snd r) (snd r')) + (\s. invs s \ valid_etcbs s + \ s \ cap.ArchObjectCap cap + \ (final_matters (cap.ArchObjectCap cap) + \ final = is_final_cap' (cap.ArchObjectCap cap) s) + \ cte_wp_at ((=) (cap.ArchObjectCap cap)) sl s) + (\s. invs' s \ s \' ArchObjectCap cap' \ + (final_matters' (ArchObjectCap cap') \ + final' = isFinal (ArchObjectCap cap') (cte_map sl) (cteCaps_of s))) + (arch_finalise_cap cap final) (Arch.finaliseCap cap' final')" + apply (simp add: arch_finalise_cap_def AARCH64_H.finaliseCap_def) + apply (corres_cases_both simp: final_matters'_def acap_relation_def mdata_map_def | + corres corres: deleteASIDPool_corres[@lift_corres_args] unmapPageTable_corres)+ + apply (clarsimp simp: valid_cap_def) + apply (rule conjI, clarsimp simp: wellformed_mapdata_def valid_unmap_def vmsz_aligned_def)+ + apply (fastforce dest: vspace_for_asid_not_normal_pt simp: wellformed_mapdata_def) + apply (clarsimp simp: cap_aligned_def cte_wp_at_caps_of_state) + apply fastforce + done + +lemma unbindNotification_corres: + "corres dc + (invs and tcb_at t) + invs' + (unbind_notification t) + (unbindNotification t)" + apply (simp add: unbind_notification_def unbindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getBoundNotification_corres]) + apply (rule corres_option_split) + apply simp + apply (rule corres_return_trivial) + apply (rule corres_split[OF getNotification_corres]) + apply clarsimp + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split:Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp gbn_wp' gbn_wp)+ + apply (clarsimp elim!: obj_at_valid_objsE + dest!: bound_tcb_at_state_refs_ofD invs_valid_objs + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def obj_at_def + valid_tcb_def valid_bound_ntfn_def invs_psp_aligned invs_distinct + split: option.splits) + apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def tcb_ntfn_is_bound'_def + split: option.splits) + done + +lemma unbindMaybeNotification_corres: + "corres dc + (invs and ntfn_at ntfnptr) (invs' and ntfn_at' ntfnptr) + (unbind_maybe_notification ntfnptr) + (unbindMaybeNotification ntfnptr)" + apply (simp add: unbind_maybe_notification_def unbindMaybeNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule corres_option_split) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule corres_return_trivial) + apply simp + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp get_simple_ko_wp getNotification_wp)+ + apply (clarsimp elim!: obj_at_valid_objsE + dest!: bound_tcb_at_state_refs_ofD invs_valid_objs + simp: valid_obj_def is_tcb tcb_ntfn_is_bound_def invs_psp_aligned invs_distinct + valid_tcb_def valid_bound_ntfn_def valid_ntfn_def + split: option.splits) + apply (clarsimp dest!: obj_at_valid_objs' bound_tcb_at_state_refs_ofD' invs_valid_objs' + simp: valid_obj'_def valid_tcb'_def valid_bound_ntfn'_def + tcb_ntfn_is_bound'_def valid_ntfn'_def + split: option.splits) + done + +lemma fast_finaliseCap_corres: + "\ final_matters' cap' \ final = final'; cap_relation cap cap'; + can_fast_finalise cap \ + \ corres dc + (\s. invs s \ valid_sched s \ s \ cap + \ cte_wp_at ((=) cap) sl s) + (\s. invs' s \ s \' cap') + (fast_finalise cap final) + (do + p \ finaliseCap cap' final' True; + assert (capRemovable (fst p) (cte_map ptr) \ snd p = NullCap) + od)" + apply (cases cap, simp_all add: finaliseCap_def isCap_simps + corres_liftM2_simp[unfolded liftM_def] + o_def dc_def[symmetric] when_def + can_fast_finalise_def capRemovable_def + split del: if_split cong: if_cong) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule ep_cancel_corres) + apply simp + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindMaybeNotification_corres]) + apply (rule cancelAllSignals_corres) + apply (wp abs_typ_at_lifts unbind_maybe_notification_invs typ_at_lifts hoare_drop_imps getNotification_wp + | wpc)+ + apply (clarsimp simp: valid_cap_def) + apply (clarsimp simp: valid_cap'_def valid_obj'_def + dest!: invs_valid_objs' obj_at_valid_objs' ) + done + +lemma cap_delete_one_corres: + "corres dc (einvs and cte_wp_at can_fast_finalise ptr) + (invs' and cte_at' (cte_map ptr)) + (cap_delete_one ptr) (cteDeleteOne (cte_map ptr))" + apply (simp add: cap_delete_one_def cteDeleteOne_def' + unless_def when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac F="can_fast_finalise cap" in corres_gen_asm) + apply (rule corres_if) + apply fastforce + apply (rule corres_split[OF isFinalCapability_corres[where ptr=ptr]]) + apply (simp add: split_def bind_assoc [THEN sym]) + apply (rule corres_split[OF fast_finaliseCap_corres[where sl=ptr]]) + apply simp+ + apply (rule emptySlot_corres, simp) + apply (wp hoare_drop_imps)+ + apply (wp isFinalCapability_inv | wp (once) isFinal[where x="cte_map ptr"])+ + apply (rule corres_trivial, simp) + apply (wp get_cap_wp getCTE_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state can_fast_finalise_Null + elim!: caps_of_state_valid_cap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + done + +context +notes option.case_cong_weak[cong] +begin +crunches ThreadDecls_H.suspend, unbindNotification + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getCTE_no_0_obj'_helper) +end + +end +(* FIXME: strengthen locale instead *) + +global_interpretation delete_one + apply unfold_locales + apply (rule corres_guard_imp) + apply (rule cap_delete_one_corres) + apply auto + done + +lemma finaliseCap_corres: + "\ final_matters' cap' \ final = final'; cap_relation cap cap'; + flag \ can_fast_finalise cap \ + \ corres (\x y. cap_relation (fst x) (fst y) \ cap_relation (snd x) (snd y)) + (\s. einvs s \ s \ cap \ (final_matters cap \ final = is_final_cap' cap s) + \ cte_wp_at ((=) cap) sl s) + (\s. invs' s \ s \' cap' \ sch_act_simple s \ + (final_matters' cap' \ + final' = isFinal cap' (cte_map sl) (cteCaps_of s))) + (finalise_cap cap final) (finaliseCap cap' final' flag)" + supply invs_no_0_obj'[simp] + apply (cases cap, simp_all add: finaliseCap_def isCap_simps + corres_liftM2_simp[unfolded liftM_def] + o_def dc_def[symmetric] when_def + can_fast_finalise_def + split del: if_split cong: if_cong) + apply (clarsimp simp: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule ep_cancel_corres) + apply (simp add: valid_cap_def) + apply (simp add: valid_cap'_def) + apply (clarsimp simp add: final_matters'_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindMaybeNotification_corres]) + apply (rule cancelAllSignals_corres) + apply (wp abs_typ_at_lifts unbind_maybe_notification_invs typ_at_lifts hoare_drop_imps hoare_vcg_all_lift | wpc)+ + apply (clarsimp simp: valid_cap_def) + apply (clarsimp simp: valid_cap'_def) + apply (fastforce simp: final_matters'_def shiftL_nat zbits_map_def) + apply (clarsimp simp add: final_matters'_def getThreadCSpaceRoot + liftM_def[symmetric] o_def zbits_map_def + dc_def[symmetric]) + apply (rename_tac t) + apply (rule_tac P="\s. t \ idle_thread s" and P'="\s. t \ ksIdleThread s" in corres_add_guard) + apply clarsimp + apply (rule context_conjI) + apply (clarsimp dest!: no_idle_thread_cap) + apply (clarsimp simp: state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindNotification_corres]) + apply (rule corres_split[OF suspend_corres]) + apply (clarsimp simp: liftM_def[symmetric] o_def dc_def[symmetric] zbits_map_def) + apply (rule prepareThreadDelete_corres, simp) + apply (wp unbind_notification_invs unbind_notification_simple_sched_action + delete_one_conc_fr.suspend_objs')+ + apply (clarsimp simp add: valid_cap_def) + apply (clarsimp simp add: valid_cap'_def) + apply (simp add: final_matters'_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (intro impI, rule corres_guard_imp) + apply (rule deletingIRQHandler_corres) + apply simp + apply simp + apply (clarsimp simp: final_matters'_def) + apply (rule_tac F="False" in corres_req) + apply clarsimp + apply (frule zombies_finalD, (clarsimp simp: is_cap_simps)+) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply simp + apply (clarsimp split del: if_split simp: o_def) + apply (rule corres_guard_imp [OF arch_finaliseCap_corres], (fastforce simp: valid_sched_def)+) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma threadSet_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and (\s. \tcb. tcbDomain tcb = ksCurDomain s \ tcbDomain (F tcb) = ksCurDomain s)\ + threadSet F t + \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift) + apply wps + apply wp + apply wps + apply wp + apply (auto simp: obj_at'_def) + done + +lemma cte_wp_at_norm_eq': + "cte_wp_at' P p s = (\cte. cte_wp_at' ((=) cte) p s \ P cte)" + by (simp add: cte_wp_at_ctes_of) + +lemma isFinal_cte_wp_def: + "isFinal cap p (cteCaps_of s) = + (\isUntypedCap cap \ + (\p'. p \ p' \ + cte_at' p' s \ + cte_wp_at' (\cte'. \ isUntypedCap (cteCap cte') \ + \ sameObjectAs cap (cteCap cte')) p' s))" + apply (simp add: isFinal_def cte_wp_at_ctes_of cteCaps_of_def) + apply (rule iffI) + apply clarsimp + apply (case_tac cte) + apply fastforce + apply fastforce + done + +lemma valid_cte_at_neg_typ': + assumes T: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + shows "\\s. \ cte_at' p' s\ f \\rv s. \ cte_at' p' s\" + apply (simp add: cte_at_typ') + apply (rule hoare_vcg_conj_lift [OF T]) + apply (simp only: imp_conv_disj) + apply (rule hoare_vcg_all_lift) + apply (rule hoare_vcg_disj_lift [OF T]) + apply (rule hoare_vcg_prop) + done + +lemma isFinal_lift: + assumes x: "\P p. \cte_wp_at' P p\ f \\_. cte_wp_at' P p\" + assumes y: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + shows "\\s. cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s\ + f + \\r s. cte_wp_at' (\cte. isFinal (cteCap cte) sl (cteCaps_of s)) sl s\" + apply (subst cte_wp_at_norm_eq') + apply (subst cte_wp_at_norm_eq' [where P="\cte. isFinal (cteCap cte) sl m" for sl m]) + apply (simp only: isFinal_cte_wp_def imp_conv_disj de_Morgan_conj) + apply (wp hoare_vcg_ex_lift hoare_vcg_all_lift x hoare_vcg_disj_lift + valid_cte_at_neg_typ' [OF y]) + done + +lemmas final_matters'_simps = final_matters'_def [split_simps capability.split arch_capability.split] + +definition set_thread_all :: "obj_ref \ Structures_A.tcb \ etcb + \ unit det_ext_monad" where + "set_thread_all ptr tcb etcb \ + do s \ get; + kh \ return $ (kheap s)(ptr \ (TCB tcb)); + ekh \ return $ (ekheap s)(ptr \ etcb); + put (s\kheap := kh, ekheap := ekh\) + od" + +definition thread_gets_the_all :: "obj_ref \ (Structures_A.tcb \ etcb) det_ext_monad" where + "thread_gets_the_all tptr \ + do tcb \ gets_the $ get_tcb tptr; + etcb \ gets_the $ get_etcb tptr; + return $ (tcb, etcb) od" + +definition thread_set_all :: "(Structures_A.tcb \ Structures_A.tcb) \ (etcb \ etcb) + \ obj_ref \ unit det_ext_monad" where + "thread_set_all f g tptr \ + do (tcb, etcb) \ thread_gets_the_all tptr; + set_thread_all tptr (f tcb) (g etcb) + od" + +lemma set_thread_all_corres: + fixes ob' :: "'a :: pspace_storable" + assumes x: "updateObject ob' = updateObject_default ob'" + assumes z: "\s. obj_at' P ptr s + \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" + assumes b: "\ko. P ko \ objBits ko = objBits ob'" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + assumes e: "etcb_relation etcb tcb'" + assumes is_t: "injectKO (ob' :: 'a :: pspace_storable) = KOTCB tcb'" + shows "other_obj_relation (TCB tcb) (injectKO (ob' :: 'a :: pspace_storable)) \ + corres dc (obj_at (same_caps (TCB tcb)) ptr and is_etcb_at ptr) + (obj_at' (P :: 'a \ bool) ptr) + (set_thread_all ptr tcb etcb) (setObject ptr ob')" + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (rule x) + apply (clarsimp simp: b elim!: obj_at'_weakenE) + apply (unfold set_thread_all_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def x + updateObject_default_def in_magnitude_check [OF _ P]) + apply (clarsimp simp add: state_relation_def z) + apply (simp flip: trans_state_update) + apply (clarsimp simp add: swp_def fun_upd_def obj_at_def is_etcb_at_def) + apply (subst cte_wp_at_after_update,fastforce simp add: obj_at_def) + apply (subst caps_of_state_after_update,fastforce simp add: obj_at_def) + apply clarsimp + apply (subst conj_assoc[symmetric]) + apply (rule conjI[rotated]) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply (clarsimp simp: obj_at_def + split: Structures_A.kernel_object.splits if_split_asm) + + apply (fold fun_upd_def) + apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) + apply (subst pspace_dom_update) + apply assumption + apply simp + apply (simp only: dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply (rule conjI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: is_other_obj_relation_type) + apply (drule(1) bspec) + apply clarsimp + apply (frule_tac ko'="TCB tcb'" and x'=ptr in obj_relation_cut_same_type, + (fastforce simp add: is_other_obj_relation_type)+)[1] + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e is_t) + by (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + +lemma tcb_update_all_corres': + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes r: "r () ()" + assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" + shows "corres r (ko_at (TCB tcb) add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add) + (set_thread_all add tcbu etcbu) (setObject add tcbu')" + apply (rule_tac F="tcb_relation tcb tcb' \ etcb_relation etcbu tcbu'" in corres_req) + apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) + apply (frule(1) pspace_relation_absD) + apply (force simp: other_obj_relation_def ekheap_relation_def e) + apply (erule conjE) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule set_thread_all_corres[where P="(=) tcb'"]) + apply (rule ext)+ + apply simp + defer + apply (simp add: is_other_obj_relation_type_def + objBits_simps' other_obj_relation_def tcbs r)+ + apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE dest: bspec[OF tables]) + apply (subst(asm) eq_commute, assumption) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subst map_to_ctes_upd_tcb, assumption+) + apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) + apply (subst if_not_P) + apply (fastforce dest: bspec [OF tables', OF ranI]) + apply simp + done + +lemma thread_gets_the_all_corres: + shows "corres (\(tcb, etcb) tcb'. tcb_relation tcb tcb' \ etcb_relation etcb tcb') + (tcb_at t and is_etcb_at t) (tcb_at' t) + (thread_gets_the_all t) (getObject t)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: gets_def get_def return_def bind_def get_tcb_def thread_gets_the_all_def + threadGet_def ethread_get_def gets_the_def assert_opt_def get_etcb_def + is_etcb_at_def tcb_at_def liftM_def + split: option.splits Structures_A.kernel_object.splits) + apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) + apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def + projectKO_opt_tcb split_def + getObject_def loadObject_default_def in_monad) + apply (case_tac ko) + apply (simp_all add: fail_def return_def) + apply (clarsimp simp add: state_relation_def pspace_relation_def ekheap_relation_def) + apply (drule bspec) + apply clarsimp + apply blast + apply (drule bspec, erule domI) + apply (clarsimp simp add: other_obj_relation_def + lookupAround2_known1) + done + +lemma thread_set_all_corresT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (f tcb) (f' tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ + etcb_relation (g etcb) (f' tcb')" + shows "corres dc (tcb_at t and valid_etcbs) + (tcb_at' t) + (thread_set_all f g t) (threadSet f' t)" + apply (simp add: thread_set_all_def threadSet_def bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split[OF thread_gets_the_all_corres]) + apply (simp add: split_def) + apply (rule tcb_update_all_corres') + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply (erule e) + apply (simp add: thread_gets_the_all_def, wp+) + apply clarsimp + apply (frule(1) tcb_at_is_etcb_at) + apply (clarsimp simp add: tcb_at_def get_etcb_def obj_at_def) + apply (drule get_tcb_SomeD) + apply fastforce + apply simp + done + +lemmas thread_set_all_corres = + thread_set_all_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] + +crunch idle_thread[wp]: deleteCallerCap "\s. P (ksIdleThread s)" + (wp: crunch_wps) +crunch sch_act_simple: deleteCallerCap sch_act_simple + (wp: crunch_wps) +crunch sch_act_not[wp]: deleteCallerCap "sch_act_not t" + (wp: crunch_wps) +crunch typ_at'[wp]: deleteCallerCap "\s. P (typ_at' T p s)" + (wp: crunch_wps) +lemmas deleteCallerCap_typ_ats[wp] = typ_at_lifts [OF deleteCallerCap_typ_at'] + +lemma setEndpoint_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ + setEndpoint ptr val \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps setEndpoint_ct', wp, simp) + +lemma cancelAll_ct_not_ksQ_helper: + "\(\s. ksCurThread s \ set (ksReadyQueues s p)) and (\s. ksCurThread s \ set q) \ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + apply (rule mapM_x_inv_wp2, simp) + apply (wp) + apply (wps tcbSchedEnqueue_ct') + apply (wp tcbSchedEnqueue_ksQ) + apply (wps setThreadState_ct') + apply (wp sts_ksQ') + apply (clarsimp) + done + +lemma cancelAllIPC_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + cancelAllIPC epptr + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + (is "\?PRE\ _ \\_. ?POST\") + apply (simp add: cancelAllIPC_def) + apply (wp, wpc, wp) + apply (wps rescheduleRequired_ct') + apply (wp rescheduleRequired_ksQ') + apply (clarsimp) + apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) + apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ + apply (wps rescheduleRequired_ct') + apply (wp rescheduleRequired_ksQ') + apply (clarsimp) + apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) + apply (wp hoare_lift_Pf2 [OF setEndpoint_ksQ setEndpoint_ct'])+ + prefer 2 + apply assumption + apply (rule_tac Q="\ep. ?PRE and ko_at' ep epptr" in hoare_post_imp) + apply (clarsimp) + apply (rule conjI) + apply ((clarsimp simp: invs'_def valid_state'_def + sch_act_sane_def + | drule(1) ct_not_in_epQueue)+)[2] + apply (wp get_ep_sp') + done + +lemma cancelAllSignals_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + cancelAllSignals ntfnptr + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + (is "\?PRE\ _ \\_. ?POST\") + apply (simp add: cancelAllSignals_def) + apply (wp, wpc, wp+) + apply (wps rescheduleRequired_ct') + apply (wp rescheduleRequired_ksQ') + apply clarsimp + apply (wp cancelAll_ct_not_ksQ_helper mapM_x_wp_inv) + apply (wp hoare_lift_Pf2 [OF setNotification_ksQ setNotification_ksCurThread]) + apply (wps setNotification_ksCurThread, wp) + prefer 2 + apply assumption + apply (rule_tac Q="\ep. ?PRE and ko_at' ep ntfnptr" in hoare_post_imp) + apply ((clarsimp simp: invs'_def valid_state'_def sch_act_sane_def + | drule(1) ct_not_in_ntfnQueue)+)[1] + apply (wp get_ntfn_sp') + done + +lemma unbindMaybeNotification_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + unbindMaybeNotification t + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + apply (simp add: unbindMaybeNotification_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (case_tac "ntfnBoundTCB ntfn", simp, wp, simp+) + apply (rule hoare_pre) + apply wp + apply (wps setBoundNotification_ct') + apply (wp sbn_ksQ) + apply (wps setNotification_ksCurThread, wp) + apply clarsimp + done + +lemma sbn_ct_in_state'[wp]: + "\ct_in_state' P\ setBoundNotification ntfn t \\_. ct_in_state' P\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply (wps setBoundNotification_ct') + apply (wp sbn_st_tcb', clarsimp) + done + +lemma set_ntfn_ct_in_state'[wp]: + "\ct_in_state' P\ setNotification a ntfn \\_. ct_in_state' P\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply (wps setNotification_ksCurThread, wp, clarsimp) + done + +lemma unbindMaybeNotification_ct_in_state'[wp]: + "\ct_in_state' P\ unbindMaybeNotification t \\_. ct_in_state' P\" + apply (simp add: unbindMaybeNotification_def) + apply (wp | wpc | simp)+ + done + +lemma setNotification_sch_act_sane: + "\sch_act_sane\ setNotification a ntfn \\_. sch_act_sane\" + by (wp sch_act_sane_lift) + + +lemma unbindMaybeNotification_sch_act_sane[wp]: + "\sch_act_sane\ unbindMaybeNotification t \\_. sch_act_sane\" + apply (simp add: unbindMaybeNotification_def) + apply (wp setNotification_sch_act_sane sbn_sch_act_sane | wpc | clarsimp)+ + done + +lemma finaliseCapTrue_standin_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + finaliseCapTrue_standin cap final + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (safe) + apply (wp cancelAllIPC_ct_not_ksQ cancelAllSignals_ct_not_ksQ + hoare_drop_imps unbindMaybeNotification_ct_not_ksQ + | wpc + | clarsimp simp: isNotificationCap_def isReplyCap_def split:capability.splits)+ + done + +lemma cteDeleteOne_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + cteDeleteOne slot + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (case_tac "\final. finaliseCap (cteCap cte) final True = fail") + apply (simp add: finaliseCapTrue_standin_simple_def) + apply wp + apply (clarsimp) + apply (wp emptySlot_cteCaps_of hoare_lift_Pf2 [OF emptySlot_ksRQ emptySlot_ct]) + apply (simp add: cteCaps_of_def) + apply (wp (once) hoare_drop_imps) + apply (wp finaliseCapTrue_standin_ct_not_ksQ isFinalCapability_inv)+ + apply (clarsimp) + done + +end + +end diff --git a/proof/refine/AARCH64/IncKernelInit.thy b/proof/refine/AARCH64/IncKernelInit.thy new file mode 100644 index 0000000000..93c1390f7a --- /dev/null +++ b/proof/refine/AARCH64/IncKernelInit.thy @@ -0,0 +1,13 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IncKernelInit +imports ADT_H Tcb_R Arch_R +begin + +(* Dummy include file for kernel init *) + +end diff --git a/proof/refine/AARCH64/InitLemmas.thy b/proof/refine/AARCH64/InitLemmas.thy new file mode 100644 index 0000000000..d469761d28 --- /dev/null +++ b/proof/refine/AARCH64/InitLemmas.thy @@ -0,0 +1,28 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* General lemmas removed from KernelInit *) + +theory InitLemmas +imports IncKernelInit +begin + +declare headM_tailM_Cons[simp] + +declare cart_singletons[simp] + +declare less_1_simp[simp] + +declare is_aligned_no_overflow[simp] + +declare unless_True[simp] + +declare maybe_fail_bind_fail[simp] + +crunch cte_wp_at'[wp]: setPriority "cte_wp_at' P p" (simp: crunch_simps) +crunch irq_node'[wp]: setPriority "\s. P (irq_node' s)" (simp: crunch_simps) + +end diff --git a/proof/refine/AARCH64/Init_R.thy b/proof/refine/AARCH64/Init_R.thy new file mode 100644 index 0000000000..9b15030e68 --- /dev/null +++ b/proof/refine/AARCH64/Init_R.thy @@ -0,0 +1,134 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2021, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Init_R +imports + KHeap_R + +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* + This provides a very simple witness that the state relation used in the first refinement proof is + non-trivial, by exhibiting a pair of related states. This helps guard against silly mistakes in + the state relation, since we currently assume that the system starts in a state satisfying + invariants and state relations. + + Note that the states we exhibit are not intended to be useful states. They are just the simplest + possible states that prove non-triviality of the state relation. In particular, these states do + not satisfy the respective invariant conditions. In future, this could be improved by exhibiting + a tuple of more realistic states that are related across all levels of the refinement, and that + also satisfy respective invariant. Ultimately, we would like to prove functional correctness of + kernel initialisation. That would allow us to start from a minimal but real configuration that + would allow us to make a much smaller set of assumptions about the initial configuration of the + system. +*) + +definition zeroed_arch_abstract_state :: + arch_state + where + "zeroed_arch_abstract_state \ \ + arm_asid_table = Map.empty, + arm_kernel_vspace = K ArmVSpaceUserRegion, + arm_vmid_table = Map.empty, + arm_next_vmid = 0, + arm_us_global_vspace = 0, + arm_current_vcpu = None, + arm_gicvcpu_numlistregs = 0 + \" + +definition zeroed_main_abstract_state :: + abstract_state + where + "zeroed_main_abstract_state \ \ + kheap = Map.empty, + cdt = Map.empty, + is_original_cap = \, + cur_thread = 0, + idle_thread = 0, + machine_state = init_machine_state, + interrupt_irq_node = (\irq. ucast irq << cte_level_bits), + interrupt_states = (K irq_state.IRQInactive), + arch_state = zeroed_arch_abstract_state + \" + +definition zeroed_extended_state :: + det_ext + where + "zeroed_extended_state \ \ + work_units_completed_internal = 0, + scheduler_action_internal = resume_cur_thread, + ekheap_internal = Map.empty, + domain_list_internal = [], + domain_index_internal = 0, + cur_domain_internal = 0, + domain_time_internal = 0, + ready_queues_internal = (\_ _. []), + cdt_list_internal = K [] + \" + +definition zeroed_abstract_state :: + det_state + where + "zeroed_abstract_state \ abstract_state.extend zeroed_main_abstract_state + (state.fields zeroed_extended_state)" + +definition zeroed_arch_intermediate_state :: + Arch.kernel_state + where + "zeroed_arch_intermediate_state \ + ARMKernelState Map.empty (K ArmVSpaceUserRegion) + Map.empty 0 0 None 0 Map.empty" + +definition zeroed_intermediate_state :: + global.kernel_state + where + "zeroed_intermediate_state \ \ + ksPSpace = Map.empty, + gsUserPages = Map.empty, + gsCNodes = Map.empty, + gsUntypedZeroRanges = {}, + gsMaxObjectSize = 0, + ksDomScheduleIdx = 0, + ksDomSchedule = [], + ksCurDomain = 0, + ksDomainTime = 0, + ksReadyQueues = K [], + ksReadyQueuesL1Bitmap = K 0, + ksReadyQueuesL2Bitmap = K 0, + ksCurThread = 0, + ksIdleThread = 0, + ksSchedulerAction = ResumeCurrentThread, + ksInterruptState = (InterruptState 0 (K IRQInactive)), + ksWorkUnitsCompleted = 0, + ksArchState = zeroed_arch_intermediate_state, + ksMachineState = init_machine_state + \" + +lemmas zeroed_state_defs = zeroed_main_abstract_state_def zeroed_abstract_state_def + zeroed_arch_abstract_state_def zeroed_extended_state_def + zeroed_intermediate_state_def abstract_state.defs + zeroed_arch_intermediate_state_def + +lemma non_empty_refine_state_relation: + "(zeroed_abstract_state, zeroed_intermediate_state) \ state_relation" + apply (clarsimp simp: state_relation_def zeroed_state_defs state.defs) + apply (intro conjI) + apply (clarsimp simp: pspace_relation_def pspace_dom_def) + apply (clarsimp simp: ekheap_relation_def) + apply (clarsimp simp: ready_queues_relation_def) + apply (clarsimp simp: ghost_relation_def) + apply (fastforce simp: cdt_relation_def swp_def dest: cte_wp_at_domI) + apply (clarsimp simp: cdt_list_relation_def map_to_ctes_def) + apply (clarsimp simp: revokable_relation_def map_to_ctes_def) + apply (clarsimp simp: zeroed_state_defs arch_state_relation_def) + apply (clarsimp simp: interrupt_state_relation_def irq_state_relation_def cte_level_bits_def) + done + +end +end diff --git a/proof/refine/AARCH64/InterruptAcc_R.thy b/proof/refine/AARCH64/InterruptAcc_R.thy new file mode 100644 index 0000000000..3f046d309e --- /dev/null +++ b/proof/refine/AARCH64/InterruptAcc_R.thy @@ -0,0 +1,171 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory InterruptAcc_R +imports TcbAcc_R +begin + +lemma getIRQSlot_corres: + "corres (\sl sl'. sl' = cte_map sl) \ \ (get_irq_slot irq) (getIRQSlot irq)" + apply (simp add: getIRQSlot_def get_irq_slot_def locateSlot_conv + liftM_def[symmetric]) + apply (simp add: getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + apply (simp add: cte_map_def cte_level_bits_def + ucast_nat_def shiftl_t2n) + done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma setIRQState_corres: + "irq_state_relation state state' \ + corres dc \ \ (set_irq_state state irq) (setIRQState state' irq)" + apply (simp add: set_irq_state_def setIRQState_def + bind_assoc[symmetric]) + apply (subgoal_tac "(state = irq_state.IRQInactive) = (state' = irqstate.IRQInactive)") + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (simp add: getInterruptState_def setInterruptState_def + simpler_gets_def simpler_modify_def bind_def) + apply (simp add: simpler_modify_def[symmetric]) + apply (rule corres_trivial, rule corres_modify) + apply (simp add: state_relation_def swp_def) + apply (clarsimp simp: interrupt_state_relation_def) + apply (rule corres_machine_op) + apply (rule corres_Id | simp)+ + apply wpsimp+ + apply (clarsimp simp: irq_state_relation_def + split: irq_state.split_asm irqstate.split_asm) + done + +lemma setIRQState_invs[wp]: + "\\s. invs' s \ (state \ IRQSignal \ IRQHandlerCap irq \ ran (cteCaps_of s)) \ + (state \ IRQInactive \ irq \ maxIRQ)\ + setIRQState state irq + \\rv. invs'\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def cur_tcb'_def + Invariants_H.valid_queues_def valid_queues'_def + valid_idle'_def valid_irq_node'_def + valid_arch_state'_def valid_global_refs'_def + global_refs'_def valid_machine_state'_def + if_unsafe_then_cap'_def ex_cte_cap_to'_def + valid_irq_handlers'_def irq_issued'_def + cteCaps_of_def valid_irq_masks'_def + bitmapQ_defs valid_queues_no_bitmap_def split: option.splits) + apply (rule conjI, clarsimp) + apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) + apply (rule conjI, fastforce) + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule conjI, clarsimp) + apply (clarsimp simp: irqs_masked'_def ct_not_inQ_def) + apply (rule conjI) + apply fastforce + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma getIRQSlot_real_cte[wp]: + "\invs'\ getIRQSlot irq \real_cte_at'\" + apply (simp add: getIRQSlot_def getInterruptState_def locateSlot_conv) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_irq_node'_def + cte_level_bits_def ucast_nat_def cteSizeBits_def shiftl_t2n) + done + +lemma getIRQSlot_cte_at[wp]: + "\invs'\ getIRQSlot irq \cte_at'\" + apply (rule hoare_strengthen_post [OF getIRQSlot_real_cte]) + apply (clarsimp simp: real_cte_at') + done + +lemma work_units_updated_state_relationI[intro!]: + "(s,s') \ state_relation \ + (work_units_completed_update (\_. work_units_completed s + 1) s, s'\ksWorkUnitsCompleted := ksWorkUnitsCompleted s' + 1\) \ state_relation" + apply (simp add: state_relation_def) + done + +lemma work_units_and_irq_state_state_relationI [intro!]: + "(s, s') \ state_relation \ + (s \ work_units_completed := n, machine_state := machine_state s \ irq_state := f (irq_state (machine_state s)) \\, + s' \ ksWorkUnitsCompleted := n, ksMachineState := ksMachineState s' \ irq_state := f (irq_state (ksMachineState s')) \\) + \ state_relation" + by (simp add: state_relation_def swp_def) + +lemma preemptionPoint_corres: + "corres (dc \ dc) \ \ preemption_point preemptionPoint" + apply (simp add: preemption_point_def preemptionPoint_def) + by (auto simp: preemption_point_def preemptionPoint_def o_def gets_def liftE_def whenE_def getActiveIRQ_def + corres_underlying_def select_def bind_def get_def bindE_def select_f_def modify_def + alternative_def throwError_def returnOk_def return_def lift_def doMachineOp_def split_def + put_def getWorkUnits_def setWorkUnits_def modifyWorkUnits_def do_machine_op_def + update_work_units_def wrap_ext_bool_det_ext_ext_def work_units_limit_def workUnitsLimit_def + work_units_limit_reached_def OR_choiceE_def reset_work_units_def mk_ef_def + elim: state_relationE) + (* what? *) + (* who says our proofs are not automatic.. *) + +lemma preemptionPoint_inv: + assumes "(\f s. P (ksWorkUnitsCompleted_update f s) = P s)" + "irq_state_independent_H P" + shows "\P\ preemptionPoint \\_. P\" using assms + apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) + apply (wpc + | wp whenE_wp hoare_seq_ext [OF _ select_inv] hoare_drop_imps + | simp)+ + done + +lemma ct_running_irq_state_independent[intro!, simp]: + "ct_running (s \machine_state := machine_state s \irq_state := f (irq_state (machine_state s)) \ \) + = ct_running s" + by (simp add: ct_in_state_def) + +lemma ct_idle_irq_state_independent[intro!, simp]: + "ct_idle (s \machine_state := machine_state s \irq_state := f (irq_state (machine_state s)) \ \) + = ct_idle s" + by (simp add: ct_in_state_def) + +lemma typ_at'_irq_state_independent[simp, intro!]: + "P (typ_at' T p (s \ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \\)) + = P (typ_at' T p s)" + by (simp add: typ_at'_def) + +lemma sch_act_simple_irq_state_independent[intro!, simp]: + "sch_act_simple (s \ ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \ \) = + sch_act_simple s" + by (simp add: sch_act_simple_def) + +lemma invs'_irq_state_independent [simp, intro!]: + "invs' (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\) = + invs' s" + apply (clarsimp simp: irq_state_independent_H_def invs'_def valid_state'_def + valid_pspace'_def sch_act_wf_def + valid_queues_def sym_refs_def state_refs_of'_def + if_live_then_nonz_cap'_def if_unsafe_then_cap'_def + valid_idle'_def valid_global_refs'_def + valid_arch_state'_def valid_irq_node'_def + valid_irq_handlers'_def valid_irq_states'_def + irqs_masked'_def bitmapQ_defs valid_queues_no_bitmap_def + valid_queues'_def + pspace_domain_valid_def cur_tcb'_def + valid_machine_state'_def tcb_in_cur_domain'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def + cong: if_cong option.case_cong) + apply (rule iffI[rotated]) + apply (clarsimp) + apply (case_tac "ksSchedulerAction s", simp_all) + apply clarsimp + apply (case_tac "ksSchedulerAction s", simp_all) + done + +lemma preemptionPoint_invs [wp]: + "\invs'\ preemptionPoint \\_. invs'\" + by (wp preemptionPoint_inv | clarsimp)+ + +end +end diff --git a/proof/refine/AARCH64/Interrupt_R.thy b/proof/refine/AARCH64/Interrupt_R.thy new file mode 100644 index 0000000000..c93c7b9f24 --- /dev/null +++ b/proof/refine/AARCH64/Interrupt_R.thy @@ -0,0 +1,1178 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Refinement for interrupt controller operations +*) + +theory Interrupt_R +imports Ipc_R Invocations_R +begin + +context Arch begin + +(*FIXME: arch_split: move up *) +requalify_types + irqcontrol_invocation + +lemmas [crunch_def] = decodeIRQControlInvocation_def performIRQControl_def + +context begin global_naming global + +(*FIXME: arch_split: move up *) +requalify_types + Invocations_H.irqcontrol_invocation + +(*FIXME: arch_split*) +requalify_facts + Interrupt_H.decodeIRQControlInvocation_def + Interrupt_H.performIRQControl_def + +end +end + +primrec + irq_handler_inv_relation :: "irq_handler_invocation \ irqhandler_invocation \ bool" +where + "irq_handler_inv_relation (Invocations_A.ACKIrq irq) x = (x = AckIRQ irq)" +| "irq_handler_inv_relation (Invocations_A.ClearIRQHandler irq) x = (x = ClearIRQHandler irq)" +| "irq_handler_inv_relation (Invocations_A.SetIRQHandler irq cap ptr) x = + (\cap'. x = SetIRQHandler irq cap' (cte_map ptr) \ cap_relation cap cap')" + +primrec + arch_irq_control_inv_relation :: "arch_irq_control_invocation \ Arch.irqcontrol_invocation \ bool" +where + "arch_irq_control_inv_relation (AARCH64_A.ARMIRQControlInvocation i ptr ptr' t) x = + (x = AARCH64_H.IssueIRQHandler i (cte_map ptr) (cte_map ptr') t)" + +primrec + irq_control_inv_relation :: "irq_control_invocation \ irqcontrol_invocation \ bool" +where + "irq_control_inv_relation (Invocations_A.IRQControl irq slot slot') x + = (x = IssueIRQHandler irq (cte_map slot) (cte_map slot'))" +| "irq_control_inv_relation (Invocations_A.ArchIRQControl ivk) x + = (\ivk'. x = ArchIRQControl ivk' \ arch_irq_control_inv_relation ivk ivk')" + +primrec + irq_handler_inv_valid' :: "irqhandler_invocation \ kernel_state \ bool" +where + "irq_handler_inv_valid' (AckIRQ irq) = (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)" +| "irq_handler_inv_valid' (ClearIRQHandler irq) = \" +| "irq_handler_inv_valid' (SetIRQHandler irq cap cte_ptr) + = (valid_cap' cap and valid_cap' (IRQHandlerCap irq) + and K (isNotificationCap cap) + and cte_wp_at' (badge_derived' cap \ cteCap) cte_ptr + and (\s. \ptr'. cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) ptr' s) + and ex_cte_cap_wp_to' isCNodeCap cte_ptr)" + +primrec + arch_irq_control_inv_valid' :: "Arch.irqcontrol_invocation \ kernel_state \ bool" +where + "arch_irq_control_inv_valid' (AARCH64_H.IssueIRQHandler irq ptr ptr' t) = + (cte_wp_at' (\cte. cteCap cte = NullCap) ptr and + cte_wp_at' (\cte. cteCap cte = IRQControlCap) ptr' and + ex_cte_cap_to' ptr and real_cte_at' ptr and + (Not o irq_issued' irq) and K (irq \ maxIRQ))" + +primrec + irq_control_inv_valid' :: "irqcontrol_invocation \ kernel_state \ bool" +where + "irq_control_inv_valid' (ArchIRQControl ivk) = arch_irq_control_inv_valid' ivk" +| "irq_control_inv_valid' (IssueIRQHandler irq ptr ptr') = + (cte_wp_at' (\cte. cteCap cte = NullCap) ptr and + cte_wp_at' (\cte. cteCap cte = IRQControlCap) ptr' and + ex_cte_cap_to' ptr and real_cte_at' ptr and + (Not o irq_issued' irq) and K (irq \ maxIRQ))" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma decodeIRQHandlerInvocation_corres: + "\ list_all2 cap_relation (map fst caps) (map fst caps'); + list_all2 (\p pa. snd pa = cte_map (snd p)) caps caps' \ \ + corres (ser \ irq_handler_inv_relation) invs invs' + (decode_irq_handler_invocation label irq caps) + (decodeIRQHandlerInvocation label irq caps')" + apply (simp add: decode_irq_handler_invocation_def decodeIRQHandlerInvocation_def + split del: if_split) + apply (cases caps) + apply (simp add: returnOk_def split: invocation_label.split gen_invocation_labels.split list.splits split del: if_split) + defer + apply (clarsimp simp: list_all2_Cons1 split del: if_split) + apply (simp add: returnOk_def split: invocation_label.split gen_invocation_labels.split list.splits) + apply (clarsimp split: cap_relation_split_asm arch_cap.split_asm simp: returnOk_def) + done + +crunch inv[wp]: decodeIRQHandlerInvocation "P" + (simp: crunch_simps) + +lemma decode_irq_handler_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' fst cap) + \ (\ptr'. cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) ptr' s) + \ (\cap \ set caps. \r \ cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s) + \ (\cap \ set caps. ex_cte_cap_wp_to' isCNodeCap (snd cap) s) + \ (\cap \ set caps. cte_wp_at' (badge_derived' (fst cap) \ cteCap) (snd cap) s) + \ s \' IRQHandlerCap irq\ + decodeIRQHandlerInvocation label irq caps + \irq_handler_inv_valid'\,-" + apply (simp add: decodeIRQHandlerInvocation_def Let_def split_def + split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + apply (clarsimp simp: neq_Nil_conv isCap_simps) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule (1) valid_irq_handlers_ctes_ofD) + apply (simp add: invs'_def valid_state'_def) + apply (simp add: irq_issued'_def) + apply clarsimp + done + +lemma is_irq_active_corres: + "corres (=) \ \ (is_irq_active irq) (isIRQActive irq)" + apply (simp add: is_irq_active_def isIRQActive_def get_irq_state_def + getIRQState_def getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + apply (drule_tac x=irq in spec)+ + apply (simp add: irq_state_relation_def + split: irqstate.split_asm irq_state.split_asm) + done + +crunch inv: isIRQActive "P" + +lemma isIRQActive_wp: + "\\s. \rv. (irq_issued' irq s \ rv) \ Q rv s\ isIRQActive irq \Q\" + apply (simp add: isIRQActive_def getIRQState_def + getInterruptState_def) + apply wp + apply (clarsimp simp: irq_issued'_def) + done + +lemma checkIRQ_corres: + "corres (ser \ dc) \ \ (arch_check_irq irq) (checkIRQ irq)" + unfolding arch_check_irq_def checkIRQ_def rangeCheck_def + apply (rule corres_guard_imp) + apply (clarsimp simp: minIRQ_def unlessE_whenE not_le) + apply (rule corres_whenE) + apply (fastforce simp: ucast_nat_def)+ + done + +lemma whenE_rangeCheck_eq: + "(rangeCheck (x :: 'a :: {linorder, integral}) y z) = + (whenE (x < fromIntegral y \ fromIntegral z < x) + (throwError (RangeError (fromIntegral y) (fromIntegral z))))" + by (simp add: rangeCheck_def unlessE_whenE linorder_not_le[symmetric]) + +lemmas irq_const_defs = maxIRQ_def minIRQ_def + +crunches arch_check_irq, checkIRQ + for inv: "P" + (simp: crunch_simps) + +lemma arch_check_irq_valid: + "\\\ arch_check_irq y \\_. (\s. unat y \ unat maxIRQ)\, -" + unfolding arch_check_irq_def + apply (wpsimp simp: validE_R_def wp: whenE_throwError_wp) + by (meson le_trans unat_ucast_le word_le_not_less word_less_eq_iff_unsigned) + +lemma arch_check_irq_valid': + "\\\ arch_check_irq y \\_ _. unat y \ unat maxIRQ\, \\_. \\" + by (wp arch_check_irq_valid) + +lemma arch_decodeIRQControlInvocation_corres: + "list_all2 cap_relation caps caps' \ + corres (ser \ arch_irq_control_inv_relation) + (invs and (\s. \cp \ set caps. s \ cp)) + (invs' and (\s. \cp \ set caps'. s \' cp)) + (arch_decode_irq_control_invocation label args slot caps) + (AARCH64_H.decodeIRQControlInvocation label args (cte_map slot) caps')" + apply (clarsimp simp: arch_decode_irq_control_invocation_def + AARCH64_H.decodeIRQControlInvocation_def Let_def) + apply (rule conjI; clarsimp) + prefer 2 + apply (cases caps + ; fastforce split: arch_invocation_label.splits list.splits invocation_label.splits + simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq liftE_bindE) + apply (cases caps, simp split: list.split) + apply (case_tac "\n. length args = Suc (Suc (Suc (Suc n)))", + clarsimp simp: length_Suc_conv list_all2_Cons1 whenE_rangeCheck_eq liftE_bindE) + prefer 2 apply (fastforce split: list.split) + \\ARMIRQIssueIRQHandler\ + apply (rule conjI, clarsimp) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF checkIRQ_corres]) + apply (rule_tac F="unat y \ unat maxIRQ" in corres_gen_asm) + apply (clarsimp simp add: minIRQ_def maxIRQ_def ucast_nat_def) + apply (rule corres_split_eqr[OF is_irq_active_corres]) + apply (rule whenE_throwError_corres, clarsimp, clarsimp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; clarsimp) + apply (rule corres_splitEE[OF ensureEmptySlot_corres], simp) + apply (rule corres_returnOkTT) + apply (clarsimp simp: arch_irq_control_inv_relation_def) + apply (wp del: arch_check_irq_inv + | wpsimp wp: isIRQActive_inv checkIRQ_inv arch_check_irq_valid' + simp: invs_valid_objs invs_psp_aligned invs_valid_objs' + invs_pspace_aligned' invs_pspace_distinct' + | strengthen invs_valid_objs invs_psp_aligned + | wp (once) hoare_drop_imps arch_check_irq_inv)+ + apply (auto split: arch_invocation_label.splits invocation_label.splits) + done + +lemma irqhandler_simp[simp]: + "gen_invocation_type label \ IRQIssueIRQHandler \ + (case gen_invocation_type label of IRQIssueIRQHandler \ b | _ \ c) = c" + by (clarsimp split: gen_invocation_labels.splits) + +lemma decodeIRQControlInvocation_corres: + "list_all2 cap_relation caps caps' \ + corres (ser \ irq_control_inv_relation) + (invs and (\s. \cp \ set caps. s \ cp)) (invs' and (\s. \cp \ set caps'. s \' cp)) + (decode_irq_control_invocation label args slot caps) + (decodeIRQControlInvocation label args (cte_map slot) caps')" + apply (clarsimp simp: decode_irq_control_invocation_def decodeIRQControlInvocation_def + arch_check_irq_def AARCH64_H.checkIRQ_def + split del: if_split cong: if_cong) + apply clarsimp + apply (rule conjI, clarsimp) + apply (rule conjI, clarsimp) + apply (cases caps, simp split: list.split) + apply (case_tac "\n. length args = Suc (Suc (Suc n))") + apply (clarsimp simp: list_all2_Cons1 Let_def split_def liftE_bindE + length_Suc_conv checkIRQ_def) + defer + apply (prop_tac "length args \ 2", arith) + apply (clarsimp split: list.split) + apply (simp add: minIRQ_def o_def) + apply (auto intro!: corres_guard_imp[OF arch_decodeIRQControlInvocation_corres])[1] + apply (auto intro!: corres_guard_imp[OF arch_decodeIRQControlInvocation_corres] + dest!: not_le_imp_less + simp: minIRQ_def o_def length_Suc_conv whenE_rangeCheck_eq ucast_nat_def + split: list.splits)[1] + apply (rule corres_guard_imp) + apply (simp add: whenE_rangeCheck_eq) + apply (rule whenE_throwError_corres, clarsimp, fastforce) + apply (rule_tac F="unat y \ unat maxIRQ" in corres_gen_asm) + apply (clarsimp simp add: minIRQ_def maxIRQ_def ucast_nat_def) + apply (rule corres_split_eqr[OF is_irq_active_corres]) + apply (rule whenE_throwError_corres, clarsimp, clarsimp) + apply (rule corres_splitEE) + apply (rule lookupSlotForCNodeOp_corres; clarsimp) + apply (rule corres_splitEE[OF ensureEmptySlot_corres], simp) + apply (rule corres_returnOkTT) + apply (clarsimp simp: arch_irq_control_inv_relation_def) + apply (wpsimp wp: isIRQActive_inv arch_check_irq_valid' checkIRQ_inv + simp: invs_valid_objs invs_psp_aligned invs_valid_objs' + invs_pspace_aligned' invs_pspace_distinct' + | strengthen invs_valid_objs invs_psp_aligned + | wp (once) hoare_drop_imps arch_check_irq_inv)+ + apply (auto split: arch_invocation_label.splits invocation_label.splits + simp: not_less unat_le_helper) + done + +crunch inv[wp]: "InterruptDecls_H.decodeIRQControlInvocation" "P" + (simp: crunch_simps wp: crunch_wps) + +(* Levity: added (20090201 10:50:27) *) +declare ensureEmptySlot_stronger [wp] + +lemma arch_decode_irq_control_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' cap) + \ (\cap \ set caps. \r \ cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + \ cte_wp_at' (\cte. cteCap cte = IRQControlCap) slot s\ + AARCH64_H.decodeIRQControlInvocation label args slot caps + \arch_irq_control_inv_valid'\,-" + apply (clarsimp simp add: AARCH64_H.decodeIRQControlInvocation_def Let_def split_def + rangeCheck_def unlessE_whenE + split del: if_split + cong: if_cong list.case_cong prod.case_cong arch_invocation_label.case_cong) + apply (rule hoare_pre) + apply (simp add: rangeCheck_def unlessE_whenE checkIRQ_def + cong: list.case_cong prod.case_cong + | wp whenE_throwError_wp isIRQActive_wp ensureEmptySlot_stronger + | wpc + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' irq_const_defs unat_word_ariths word_le_nat_alt + not_less unat_le_helper unat_of_nat unat_ucast_mask) + apply (meson le_trans word_and_le2 word_less_eq_iff_unsigned) + done + +lemma decode_irq_control_valid'[wp]: + "\\s. invs' s \ (\cap \ set caps. s \' cap) + \ (\cap \ set caps. \r \ cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + \ cte_wp_at' (\cte. cteCap cte = IRQControlCap) slot s\ + decodeIRQControlInvocation label args slot caps + \irq_control_inv_valid'\,-" + apply (simp add: decodeIRQControlInvocation_def Let_def split_def checkIRQ_def + rangeCheck_def unlessE_whenE + split del: if_split cong: if_cong list.case_cong + gen_invocation_labels.case_cong) + apply (wpsimp wp: ensureEmptySlot_stronger isIRQActive_wp whenE_throwError_wp + simp: o_def + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' irq_const_defs unat_word_ariths word_le_nat_alt + not_less unat_le_helper unat_of_nat unat_ucast_mask) + apply (meson le_trans word_and_le2 word_less_eq_iff_unsigned) + done + +lemma valid_globals_ex_cte_cap_irq: + "\ ex_cte_cap_wp_to' isCNodeCap ptr s; valid_global_refs' s; + valid_objs' s \ + \ ptr \ intStateIRQNode (ksInterruptState s) + 2 ^ cte_level_bits * ucast (irq :: irq)" + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_wp_to'_def) + apply (drule(1) ctes_of_valid'[rotated]) + apply (drule(1) valid_global_refsD') + apply (drule subsetD[rotated], erule cte_refs_capRange) + apply (clarsimp simp: isCap_simps) + apply (subgoal_tac "irq_node' s + 2 ^ cte_level_bits * ucast irq \ global_refs' s") + apply blast + apply (simp add: global_refs'_def cte_level_bits_def cteSizeBits_def shiftl_t2n mult.commute mult.left_commute) + done + +lemma no_fail_plic_complete_claim [simp, wp]: + "no_fail \ (AARCH64.plic_complete_claim irw)" + unfolding AARCH64.plic_complete_claim_def + by (rule no_fail_machine_op_lift) + +lemma arch_invokeIRQHandler_corres: + "irq_handler_inv_relation i i' \ + corres dc \ \ (arch_invoke_irq_handler i) (AARCH64_H.invokeIRQHandler i')" + apply (cases i; clarsimp simp: AARCH64_H.invokeIRQHandler_def) + apply (rule corres_machine_op, rule corres_Id; simp?) + done + + +lemma invokeIRQHandler_corres: + "irq_handler_inv_relation i i' \ + corres dc (einvs and irq_handler_inv_valid i) + (invs' and irq_handler_inv_valid' i') + (invoke_irq_handler i) + (InterruptDecls_H.invokeIRQHandler i')" + supply arch_invoke_irq_handler.simps[simp del] + apply (cases i; simp add: Interrupt_H.invokeIRQHandler_def) + apply (rule corres_guard_imp, rule arch_invokeIRQHandler_corres; simp) + apply (rename_tac word cap prod) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule corres_split_nor[OF cap_delete_one_corres]) + apply (rule cteInsert_corres, simp+) + apply (rule_tac Q="\rv s. einvs s \ cte_wp_at (\c. c = cap.NullCap) irq_slot s + \ (a, b) \ irq_slot + \ cte_wp_at (is_derived (cdt s) (a, b) cap) (a, b) s" + in hoare_post_imp) + apply fastforce + apply (wp cap_delete_one_still_derived)+ + apply (strengthen invs_mdb_strengthen') + apply wp+ + apply (simp add: conj_comms eq_commute) + apply (wp get_irq_slot_different hoare_drop_imps)+ + apply (clarsimp simp: valid_state_def invs_def) + apply (erule cte_wp_at_weakenE, simp add: is_derived_use_interrupt) + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule cap_delete_one_corres) + apply wp+ + apply simp+ + done + +lemma ntfn_badge_derived_enough_strg: + "cte_wp_at' (\cte. isNotificationCap cap \ badge_derived' cap (cteCap cte)) ptr s + \ cte_wp_at' (is_derived' ctes ptr cap \ cteCap) ptr s" + by (clarsimp simp: cte_wp_at_ctes_of isCap_simps + badge_derived'_def is_derived'_def) + +lemma cteDeleteOne_ex_cte_cap_to'[wp]: + "\ex_cte_cap_wp_to' P p\ cteDeleteOne ptr \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteDeleteOne_irq_node']) + apply (wp hoare_vcg_ex_lift cteDeleteOne_cte_wp_at_preserved) + apply (case_tac cap, simp_all add: finaliseCap_def isCap_simps) + done + +lemma cteDeleteOne_other_cap: + "\(\s. cte_wp_at' (P o cteCap) p s) and K (p \ p')\ + cteDeleteOne p' + \\rv s. cte_wp_at' (P o cteCap) p s\" + apply (rule hoare_gen_asm) + apply (simp add: tree_cte_cteCap_eq) + apply (wp cteDeleteOne_cteCaps_of) + apply simp + done + +lemma isnt_irq_handler_strg: + "(\ isIRQHandlerCap cap) \ (\irq. cap = IRQHandlerCap irq \ P irq)" + by (clarsimp simp: isCap_simps) + +lemma dmo_plic_complete_claim_invs'[wp]: + "doMachineOp (AARCH64.plic_complete_claim irq) \invs'\" + apply (wp dmo_invs') + apply (clarsimp simp: in_monad AARCH64.plic_complete_claim_def machine_op_lift_def machine_rest_lift_def select_f_def) + done + +lemma doMachineOp_maskInterrupt_False[wp]: + "\ \s. invs' s \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive \ + doMachineOp (maskInterrupt False irq) + \\_. invs'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma invoke_arch_irq_handler_invs'[wp]: + "\invs' and irq_handler_inv_valid' i\ AARCH64_H.invokeIRQHandler i \\rv. invs'\" + by (cases i; wpsimp simp: AARCH64_H.invokeIRQHandler_def) + +lemma invoke_irq_handler_invs'[wp]: + "\invs' and irq_handler_inv_valid' i\ + InterruptDecls_H.invokeIRQHandler i \\rv. invs'\" + apply (cases i; simp add: Interrupt_H.invokeIRQHandler_def) + apply wpsimp + apply (wp cteInsert_invs)+ + apply (strengthen ntfn_badge_derived_enough_strg isnt_irq_handler_strg) + apply (wp cteDeleteOne_other_cap cteDeleteOne_other_cap[unfolded o_def]) + apply (rename_tac word1 cap word2) + apply (simp add: getInterruptState_def getIRQSlot_def locateSlot_conv) + apply wp + apply (rename_tac word1 cap word2 s) + apply (clarsimp simp: ucast_nat_def) + apply (drule_tac irq=word1 in valid_globals_ex_cte_cap_irq) + apply clarsimp+ + apply (clarsimp simp: cte_wp_at_ctes_of ex_cte_cap_to'_def + isCap_simps untyped_derived_eq_def) + apply (fastforce simp: cte_level_bits_def cteSizeBits_def shiftl_t2n)+ + done + +lemma IRQHandler_valid': + "(s' \' IRQHandlerCap irq) = (irq \ maxIRQ)" + by (simp add: valid_cap'_def capAligned_def word_bits_conv) + +crunch valid_mdb'[wp]: setIRQState "valid_mdb'" + +method do_machine_op_corres + = (rule corres_machine_op, rule corres_Id, rule refl, simp, wp) + +lemma no_fail_setIRQTrigger: "no_fail \ (setIRQTrigger irq trig)" + by (simp add: setIRQTrigger_def) + +lemma setIRQTrigger_corres: + "corres dc \ \ (do_machine_op (setIRQTrigger irq t)) (doMachineOp (setIRQTrigger irq t))" + apply (rule corres_machine_op) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (wp + | rule corres_underlying_trivial + | rule no_fail_setIRQTrigger + | simp add: dc_def)+ + done + +lemma arch_performIRQControl_corres: + "arch_irq_control_inv_relation x2 ivk' \ corres (dc \ dc) + (einvs and arch_irq_control_inv_valid x2) + (invs' and arch_irq_control_inv_valid' ivk') + (arch_invoke_irq_control x2) + (Arch.performIRQControl ivk')" + apply (cases x2; simp add: AARCH64_H.performIRQControl_def invoke_irq_control.cases IRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule setIRQTrigger_corres) + apply (rule corres_split_nor) + apply (rule setIRQState_corres) + apply (simp add: irq_state_relation_def) + apply (rule cteInsert_simple_corres; simp) + apply (wp | simp add: irq_state_relation_def IRQHandler_valid IRQHandler_valid')+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def cte_wp_at_caps_of_state + is_simple_cap_def is_cap_simps arch_irq_control_inv_valid_def + safe_parent_for_def is_simple_cap_arch_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def IRQHandler_valid + IRQHandler_valid' is_simple_cap'_def isCap_simps IRQ_def) + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply (auto dest: valid_irq_handlers_ctes_ofD)[1] + done + +lemma performIRQControl_corres: + "irq_control_inv_relation i i' \ + corres (dc \ dc) (einvs and irq_control_inv_valid i) + (invs' and irq_control_inv_valid' i') + (invoke_irq_control i) + (performIRQControl i')" + apply (cases i, simp_all add: performIRQControl_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF setIRQState_corres]) + apply (simp add: irq_state_relation_def) + apply (rule cteInsert_simple_corres) + apply (wp | simp add: IRQHandler_valid IRQHandler_valid')+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + cte_wp_at_caps_of_state is_simple_cap_def is_simple_cap_arch_def + is_cap_simps safe_parent_for_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + IRQHandler_valid IRQHandler_valid' is_simple_cap'_def + isCap_simps) + apply (clarsimp simp: safe_parent_for'_def cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (clarsimp simp: isCap_simps sameRegionAs_def3) + apply (auto dest: valid_irq_handlers_ctes_ofD)[1] + by (clarsimp simp: arch_performIRQControl_corres) + +crunch valid_cap'[wp]: setIRQState "valid_cap' cap" + +lemma setIRQState_cte_cap_to'[wp]: + "\ex_cte_cap_to' p\ setIRQState st irq \\_. ex_cte_cap_to' p\" + apply (simp add: setIRQState_def doMachineOp_def + split_def setInterruptState_def getInterruptState_def) + apply wp + apply (clarsimp simp: ex_cte_cap_to'_def) + done + +lemma setIRQState_issued[wp]: + "\K (st = IRQSignal)\ setIRQState st irq \\rv. irq_issued' irq\" + apply (simp add: setIRQState_def irq_issued'_def setInterruptState_def + getInterruptState_def) + apply wp + apply clarsimp + done + +lemma dmo_setIRQTrigger_invs'[wp]: + "\invs'\ doMachineOp (setIRQTrigger irq t) \\r. invs'\" + apply (wp dmo_invs' no_irq_setIRQTrigger no_irq) + apply clarsimp + apply (drule_tac P4="\m'. underlying_memory m' p = underlying_memory m p" + in use_valid[where P=P and Q="\_. P" for P]) + apply (wpsimp simp: setIRQTrigger_def machine_op_lift_def machine_rest_lift_def split_def)+ + done + +lemma arch_invoke_irq_control_invs'[wp]: + "\invs' and arch_irq_control_inv_valid' i\ AARCH64_H.performIRQControl i \\rv. invs'\" + apply (simp add: AARCH64_H.performIRQControl_def) + apply (rule hoare_pre) + apply (wpsimp wp: cteInsert_simple_invs simp: cte_wp_at_ctes_of isCap_simps IRQ_def) + apply (clarsimp simp: cte_wp_at_ctes_of IRQHandler_valid' is_simple_cap'_def isCap_simps + safe_parent_for'_def sameRegionAs_def3) + apply (rule conjI, clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac ctea) + apply (auto dest: valid_irq_handlers_ctes_ofD + simp: invs'_def valid_state'_def IRQ_def) + done + +lemma invoke_irq_control_invs'[wp]: + "\invs' and irq_control_inv_valid' i\ performIRQControl i \\rv. invs'\" + apply (cases i, simp_all add: performIRQControl_def) + apply (rule hoare_pre) + apply (wp cteInsert_simple_invs | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: cte_wp_at_ctes_of IRQHandler_valid' + is_simple_cap'_def isCap_simps + safe_parent_for'_def sameRegionAs_def3) + apply (case_tac ctea) + apply (auto dest: valid_irq_handlers_ctes_ofD + simp: invs'_def valid_state'_def) + done + +lemma getIRQState_corres: + "corres irq_state_relation \ \ + (get_irq_state irq) (getIRQState irq)" + apply (simp add: get_irq_state_def getIRQState_def getInterruptState_def) + apply (clarsimp simp: state_relation_def interrupt_state_relation_def) + done + +lemma getIRQState_prop: + "\\s. P (intStateIRQTable (ksInterruptState s) irq)\ + getIRQState irq + \\rv s. P rv\" + apply (simp add: getIRQState_def getInterruptState_def) + apply wp + apply simp + done + +lemma decDomainTime_corres: + "corres dc \ \ dec_domain_time decDomainTime" + apply (simp add:dec_domain_time_def corres_underlying_def decDomainTime_def simpler_modify_def) + apply (clarsimp simp:state_relation_def) + done + +lemma tcbSchedAppend_valid_objs': + "\valid_objs'\tcbSchedAppend t \\r. valid_objs'\" + apply (simp add:tcbSchedAppend_def) + apply (wpsimp wp: unless_wp threadSet_valid_objs' threadGet_wp) + apply (clarsimp simp add:obj_at'_def typ_at'_def) + done + +lemma thread_state_case_if: + "(case state of Structures_A.thread_state.Running \ f | _ \ g) = + (if state = Structures_A.thread_state.Running then f else g)" + by (case_tac state,auto) + +lemma threadState_case_if: + "(case state of Structures_H.thread_state.Running \ f | _ \ g) = + (if state = Structures_H.thread_state.Running then f else g)" + by (case_tac state,auto) + +lemma tcbSchedAppend_invs_but_ct_not_inQ': + "\invs' and st_tcb_at' runnable' t \ + tcbSchedAppend t \\_. all_invs_but_ct_not_inQ'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def + | fastforce elim!: st_tcb_ex_cap'' split: thread_state.split_asm)+ + done + +lemma timerTick_corres: + "corres dc (cur_tcb and valid_sched and pspace_aligned and pspace_distinct) + invs' + timer_tick timerTick" + apply (simp add: timerTick_def timer_tick_def) + apply (simp add:thread_state_case_if threadState_case_if) + apply (rule_tac Q="\ and (cur_tcb and valid_sched and pspace_aligned and pspace_distinct)" + and Q'="\ and invs'" in corres_guard_imp) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF getThreadState_corres]) + apply (rename_tac state state') + apply (rule corres_split[where r' = dc]) + apply (rule corres_if[where Q = \ and Q' = \]) + apply (case_tac state,simp_all)[1] + apply (rule_tac r'="(=)" in corres_split[OF ethreadget_corres]) + apply (simp add:etcb_relation_def) + apply (rename_tac ts ts') + apply (rule_tac R="1 < ts" in corres_cases) + apply (simp) + apply (unfold thread_set_time_slice_def) + apply (rule ethread_set_corres, simp+) + apply (clarsimp simp: etcb_relation_def) + apply simp + apply (rule corres_split[OF ethread_set_corres]) + apply (simp add: sch_act_wf_weak etcb_relation_def pred_conj_def)+ + apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule rescheduleRequired_corres) + apply (wp)[1] + apply (rule hoare_strengthen_post) + apply (rule tcbSchedAppend_invs_but_ct_not_inQ', clarsimp simp: sch_act_wf_weak) + apply (wp threadSet_timeslice_invs threadSet_valid_queues + threadSet_valid_queues' threadSet_pred_tcb_at_state)+ + apply simp + apply (rule corres_when,simp) + apply (rule corres_split[OF decDomainTime_corres]) + apply (rule corres_split[OF getDomainTime_corres]) + apply (rule corres_when,simp) + apply (rule rescheduleRequired_corres) + apply (wp hoare_drop_imp)+ + apply (simp add:dec_domain_time_def) + apply wp+ + apply (simp add:decDomainTime_def) + apply wp + apply (wpsimp wp: hoare_weak_lift_imp threadSet_timeslice_invs threadSet_valid_queues + threadSet_valid_queues' tcbSchedAppend_valid_objs' + threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf + rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues)+ + apply (strengthen sch_act_wf_weak) + apply (clarsimp simp:conj_comms) + apply (wp tcbSchedAppend_valid_queues tcbSchedAppend_sch_act_wf) + apply simp + apply (wpsimp wp: threadSet_valid_queues threadSet_pred_tcb_at_state threadSet_sch_act + threadSet_tcbDomain_triv threadSet_valid_queues' threadSet_valid_objs' + threadGet_wp gts_wp gts_wp')+ + apply (clarsimp simp: cur_tcb_def tcb_at_is_etcb_at valid_sched_def valid_sched_action_def) + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak cur_tcb'_def inQ_def + ct_in_state'_def obj_at'_def) + apply (clarsimp simp:st_tcb_at'_def valid_idle'_def ct_idle_or_in_cur_domain'_def obj_at'_def) + apply simp + apply simp + done + +lemma corres_return_VGICMaintenance [corres]: + "corres ((=) o arch_fault_map) (K (a=b)) \ + (return (AARCH64_A.VGICMaintenance a)) (return (AARCH64_H.VGICMaintenance b))" + by simp + +lemmas corres_gets_numlistregs [corres] = corres_gets_gicvcpu_numlistregs (* FIXME AARCH64: de-duplicate *) + +lemmas corres_eq_trivial = corres_Id[where f = h and g = h for h, simplified] + +lemma countTrailingZeros_simp[simp]: + "countTrailingZeros = word_ctz" + unfolding countTrailingZeros_def word_ctz_def + by (simp add: to_bl_upt) + +crunches doMachineOp + for sch_act_ct_rq[wp]: "\s. P (ksSchedulerAction s) (ksCurThread s) (ksReadyQueues s)" + and pred_tcb_at'_ct[wp]: "\s. pred_tcb_at' proj test (ksCurThread s) s" + and ex_nonz_cap_to'[wp]: "\s. P (ex_nonz_cap_to' (ksCurThread s) s)" + +lemma dmo_wp_no_rest: + "\K((\s f. P s = (P (machine_state_update (machine_state_rest_update f) s)))) and P\ + do_machine_op (machine_op_lift f) + \\_. P\" + apply (simp add: do_machine_op_def machine_op_lift_def bind_assoc) + apply wpsimp + apply (clarsimp simp add: machine_rest_lift_def in_monad select_f_def ignore_failure_def) + apply (clarsimp split: if_splits) + apply (drule_tac x=s in spec) + apply (drule_tac x="\_. b" in spec) + apply simp + apply (erule rsubst[OF _ arg_cong[where f=P]]) + apply clarsimp + done + +lemma dmo_gets_wp: + "\\s. P (f (machine_state s)) s\ do_machine_op (gets f) \P\" + by (wpsimp simp: submonad_do_machine_op.gets) + +crunches vgicUpdateLR + for ksCurThread[wp]: "\s. P (ksCurThread s)" + +lemma virqType_eq[simp]: + "virqType = virq_type" + unfolding virqType_def virq_type_def + by simp + +lemma virqSetEOIIRQEN_eq[simp]: + "AARCH64_H.virqSetEOIIRQEN = AARCH64_A.virqSetEOIIRQEN" + unfolding virqSetEOIIRQEN_def AARCH64_A.virqSetEOIIRQEN_def + by auto + +lemma not_pred_tcb': + "(\pred_tcb_at' proj P t s) = (\tcb_at' t s \ pred_tcb_at' proj (\a. \P a) t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma vgic_maintenance_corres [corres]: + "corres dc einvs + (\s. invs' s \ sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + vgic_maintenance vgicMaintenance" +proof - + (* hoare_lift_Pf-style rules match too often, slowing down proof unless specialised *) + note vilr = hoare_lift_Pf2[where f=cur_thread and m="vgic_update_lr v i virq" for v i virq] + note vilr' = hoare_lift_Pf2[where f=ksCurThread and m="vgicUpdateLR v i virq" for v i virq] + note wplr = vilr[where P="st_tcb_at active"] + vilr[where P="ex_nonz_cap_to"] + note wplr' = vilr'[where P="sch_act_not"] + vilr'[where P="ex_nonz_cap_to'"] + vilr'[where P="st_tcb_at' simple'"] + vilr'[where P="\t s. t \ set (ksReadyQueues s x)" for x] + show ?thesis + unfolding vgic_maintenance_def vgicMaintenance_def isRunnable_def Let_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_current_vcpu], simp, rename_tac hsCurVCPU) + (* we only care about the one case we do something: active current vcpu *) + apply (rule_tac R="hsCurVCPU = None" in corres_cases') + apply (rule corres_trivial, simp) + apply (clarsimp, rename_tac vcpu_ptr active) + apply wpfix + apply (rule_tac R="\ active" in corres_cases') + apply (rule corres_trivial, simp) + apply clarsimp + + apply (rule corres_split_eqr[OF corres_machine_op], + (rule corres_Id; wpsimp simp: get_gic_vcpu_ctrl_misr_def + get_gic_vcpu_ctrl_eisr1_def + get_gic_vcpu_ctrl_eisr0_def))+ + apply (rename_tac eisr0 eisr1 flags) + apply (rule corres_split[OF corres_gets_numlistregs]) + apply (rule corres_split[where r'="\rv rv'. rv' = arch_fault_map rv"]) + apply (rule corres_if[rotated -1]) + apply (rule corres_trivial, simp) + apply clarsimp + apply (rule corres_if, simp) + apply (rule corres_trivial, simp) + supply if_split[split del] + apply (clarsimp simp: bind_assoc cong: if_cong) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply clarsimp + apply (rule corres_split_dc[OF vgicUpdateLR_corres]) + apply (rule corres_trivial, simp) + apply wpsimp+ + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getThreadState_corres]) + apply (fold dc_def) + apply (rule corres_when) + apply clarsimp + apply (rename_tac threadState threadState') + apply (case_tac threadState; simp) + apply (rule handleFault_corres) + apply clarsimp + apply clarsimp + apply (wp gts_wp) + apply (wp gts_wp') + apply (rule_tac + Q="\rv. tcb_at rv and einvs + and (\_. valid_fault (ExceptionTypes_A.fault.ArchFault rva))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) + apply (strengthen st_tcb_ex_cap'[where P=active], clarsimp) + apply (fastforce simp: pred_tcb_at_def obj_at_def) + apply wp + apply clarsimp + apply (rule_tac Q="\rv x. tcb_at' rv x + \ invs' x + \ sch_act_not rv x + \ (\d p. rv \ set (ksReadyQueues x (d, p)))" + in hoare_post_imp) + apply (rename_tac rv s) + apply clarsimp + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: pred_tcb_at'_def) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, + fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply wp + apply (wpsimp wp: wplr wplr' hoare_vcg_all_lift + hoare_vcg_imp_lift' dmo_gets_wp dmo'_gets_wp + simp: get_gic_vcpu_ctrl_misr_def if_apply_def2 + get_gic_vcpu_ctrl_eisr1_def + get_gic_vcpu_ctrl_eisr0_def + | strengthen tcb_at_invs tcb_at_invs')+ + + apply (frule invs_arch_state) + apply (clarsimp simp: valid_arch_state_def valid_fault_def obj_at_def cur_vcpu_def in_omonad) + apply (clarsimp simp: tcb_at_invs') + apply (frule invs_arch_state') + apply (clarsimp simp: valid_arch_state'_def vcpu_at_is_vcpu') + apply (erule ko_wp_at'_weakenE, simp) + done +qed + +lemma vppiEvent_corres: + "corres dc einvs + (\s. invs' s \ sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p))) + (vppi_event irq) (vppiEvent irq)" + unfolding vppi_event_def vppiEvent_def isRunnable_def + supply [[simproc del: defined_all]] + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_current_vcpu]) + apply (clarsimp simp del: subst_all (* avoid destroying useful name of rv *)) + (* we only care about the one case we do something: active current vcpu *) + apply (rule_tac R="hsCurVCPU = None" in corres_cases') + apply (rule corres_trivial, simp) + apply (clarsimp, rename_tac vcpu_ptr active) + apply wpfix + apply (rule_tac R="\ active" in corres_cases') + apply (rule corres_trivial, simp) + apply clarsimp + + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF vcpuUpdate_corres]) + apply (fastforce simp: vcpu_relation_def irq_vppi_event_index_def + irqVPPIEventIndex_def IRQ_def) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getThreadState_corres], rename_tac gts gts') + apply (fold dc_def) + apply (rule corres_when) + apply (case_tac gts; fastforce) + apply (rule handleFault_corres, simp) + apply (wp gts_st_tcb_at hoare_vcg_imp_lift') + apply (wp gts_st_tcb_at' hoare_vcg_imp_lift') + (* on both sides, we check that the current thread is runnable, then have to know it + is runnable directly afterwards, which is obvious and should not propagate further; + clean up the postconditions of the thread_get and threadGet *) + apply (rule_tac + Q="\rv. tcb_at rv and einvs + and (\_. valid_fault (ExceptionTypes_A.fault.ArchFault + (AARCH64_A.VPPIEvent irq)))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb runnable_eq pred_conj_def) + apply (strengthen st_tcb_ex_cap'[where P=active], fastforce) + apply wp + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\rv x. tcb_at' rv x + \ invs' x + \ sch_act_not rv x + \ (\d p. rv \ set (ksReadyQueues x (d, p)))" in hoare_post_imp) + apply (rename_tac rv s) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: pred_tcb_at'_def) + apply (rule conjI, erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply (erule_tac p=rv in obj_at'_weakenE, fastforce split: thread_state.splits) + apply wp + apply (wpsimp wp: vcpu_update_tcb_at hoare_vcg_all_lift hoare_vcg_imp_lift' + cong: vcpu.fold_congs)+ + apply (strengthen tcb_at_invs) + apply (wpsimp wp: dmo_maskInterrupt_True maskInterrupt_invs + setVCPU_VPPIMasked_invs' simp: vcpuUpdate_def + | wps)+ + apply (frule invs_arch_state) + apply (simp add: valid_arch_state_def valid_fault_def tcb_at_invs) + apply (clarsimp simp: obj_at_def cur_vcpu_def in_omonad) + apply clarsimp + apply (frule invs_arch_state') + apply (rule conjI) + apply (clarsimp simp: valid_arch_state'_def vcpu_at_is_vcpu') + apply (erule ko_wp_at'_weakenE, simp) + apply (simp add: tcb_at_invs') + done + +lemma handle_reserved_irq_corres[corres]: + "corres dc einvs + (\s. invs' s \ (irq \ non_kernel_IRQs \ + sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))) + (handle_reserved_irq irq) (handleReservedIRQ irq)" + apply (clarsimp simp: handle_reserved_irq_def handleReservedIRQ_def irqVPPIEventIndex_def + irq_vppi_event_index_def non_kernel_IRQs_def IRQ_def irqVGICMaintenance_def + irqVTimerEvent_def) + apply (rule conjI; clarsimp) + apply (rule corres_guard_imp, rule vppiEvent_corres, assumption, fastforce) + apply (rule corres_guard_imp) + apply (rule corres_when) + apply (fastforce intro: vgic_maintenance_corres simp: unat_arith_simps)+ + done + +lemma handleInterrupt_corres: + "corres dc + (einvs) (invs' and (\s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive)) + (handle_interrupt irq) (handleInterrupt irq)" + (is "corres dc (einvs) ?P' ?f ?g") + apply (simp add: handle_interrupt_def handleInterrupt_def ) + apply (rule conjI[rotated]; rule impI) + + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQState_corres, + where R="\rv. einvs" + and R'="\rv. invs' and (\s. rv \ IRQInactive)"]) + defer + apply (wp getIRQState_prop getIRQState_inv do_machine_op_bind doMachineOp_bind | simp add: do_machine_op_bind doMachineOp_bind )+ + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_machine_op, rule corres_eq_trivial ; (simp add: dc_def no_fail_bind)+)+ + apply ((wp | simp)+)[4] + + apply (rule corres_gen_asm2) + apply (case_tac st, simp_all add: irq_state_relation_def split: irqstate.split_asm) + apply (simp add: getSlotCap_def bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIRQSlot_corres]) + apply simp + apply (rule corres_split[OF get_cap_corres, + where R="\rv. einvs and valid_cap rv" + and R'="\rv. invs' and valid_cap' (cteCap rv)"]) + apply (rule corres_underlying_split[where r'=dc]) + apply (case_tac xb, simp_all add: doMachineOp_return)[1] + apply (clarsimp simp add: when_def doMachineOp_return) + apply (rule corres_guard_imp, rule sendSignal_corres) + apply (clarsimp simp: valid_cap_def valid_cap'_def arch_mask_irq_signal_def + maskIrqSignal_def do_machine_op_bind doMachineOp_bind)+ + apply corres + apply (rule corres_machine_op, rule corres_eq_trivial; simp)+ + apply wpsimp+ + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split) + apply simp + apply (rule corres_split[OF timerTick_corres corres_machine_op]) + apply (rule corres_eq_trivial, wpsimp+) + apply (rule corres_machine_op) + apply (rule corres_eq_trivial; simp) + apply wp+ + apply (clarsimp simp: invs_distinct invs_psp_aligned) + apply clarsimp + done + +lemma threadSet_ksDomainTime[wp]: + "\\s. P (ksDomainTime s)\ threadSet f ptr \\rv s. P (ksDomainTime s)\" + apply (simp add: threadSet_def setObject_def split_def) + apply (wp crunch_wps | simp add:updateObject_default_def)+ + done + +crunch ksDomainTime[wp]: rescheduleRequired "\s. P (ksDomainTime s)" +(simp:tcbSchedEnqueue_def wp:unless_wp) + +crunch ksDomainTime[wp]: tcbSchedAppend "\s. P (ksDomainTime s)" +(simp:tcbSchedEnqueue_def wp:unless_wp) + +lemma updateTimeSlice_valid_pspace[wp]: + "\valid_pspace'\ threadSet (tcbTimeSlice_update (\_. ts')) thread + \\r. valid_pspace'\" + apply (wp threadSet_valid_pspace'T) + apply (auto simp:tcb_cte_cases_def cteSizeBits_def) + done + +lemma updateTimeSlice_valid_queues[wp]: + "\\s. Invariants_H.valid_queues s \ + threadSet (tcbTimeSlice_update (\_. ts')) thread + \\r s. Invariants_H.valid_queues s\" + apply (wp threadSet_valid_queues,simp) + apply (clarsimp simp:obj_at'_def inQ_def) + done + +lemma dom_upd_eq: + "f t = Some y \ dom (\a. if a = t then Some x else f a) = dom f" + by (auto split: if_split_asm) + +lemma updateTimeSlice_hyp_refs[wp]: + "\\s. P (state_hyp_refs_of' s)\ + threadSet (tcbTimeSlice_update f) thread + \\r s. P (state_hyp_refs_of' s)\" + unfolding threadSet_def + apply (clarsimp simp: setObject_def split_def) + apply (wp getObject_tcb_wp | simp add: updateObject_default_def)+ + apply (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + apply (erule subst[where P=P, rotated]) + apply (rule ext) + apply (clarsimp simp: objBitsKO_def ps_clear_def dom_upd_eq split: option.splits) + done + +crunches tcbSchedAppend + for irq_handlers'[wp]: valid_irq_handlers' + and irqs_masked'[wp]: irqs_masked' + and ct[wp]: cur_tcb' + (simp: unless_def tcb_cte_cases_def cteSizeBits_def wp: crunch_wps cur_tcb_lift) + +lemma timerTick_invs'[wp]: + "\invs'\ timerTick \\rv. invs'\" + apply (simp add: timerTick_def) + apply (wpsimp wp: threadSet_invs_trivial threadSet_pred_tcb_no_state + rescheduleRequired_all_invs_but_ct_not_inQ + tcbSchedAppend_invs_but_ct_not_inQ' + simp: tcb_cte_cases_def) + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp add:invs'_def valid_state'_def) + apply (simp add: decDomainTime_def) + apply wp + apply simp + apply wpc + apply (wp add: threadGet_wp threadSet_cur threadSet_timeslice_invs + rescheduleRequired_all_invs_but_ct_not_inQ + hoare_vcg_imp_lift threadSet_ct_idle_or_in_cur_domain' + del: tcbSchedAppend_sch_act_wf)+ + apply (rule hoare_strengthen_post[OF tcbSchedAppend_invs_but_ct_not_inQ']) + apply (wpsimp simp: valid_pspace'_def sch_act_wf_weak)+ + apply (wpsimp wp: threadSet_pred_tcb_no_state threadSet_tcbDomain_triv + threadSet_valid_objs' threadSet_timeslice_invs)+ + apply (wp threadGet_wp) + apply (wp gts_wp')+ + apply (clarsimp simp: invs'_def st_tcb_at'_def obj_at'_def valid_state'_def) + done + +lemma resetTimer_invs'[wp]: + "\invs'\ doMachineOp resetTimer \\_. invs'\" + apply (wp dmo_invs' no_irq no_irq_resetTimer) + apply clarsimp + apply (drule_tac Q="%_ b. underlying_memory b p = underlying_memory m p" + in use_valid) + apply (simp add: resetTimer_def + machine_op_lift_def machine_rest_lift_def split_def) + apply wp + apply clarsimp+ + done + +lemma dmo_ackInterrupt[wp]: +"\invs'\ doMachineOp (ackInterrupt irq) \\y. invs'\" + apply (wp dmo_invs' no_irq no_irq_ackInterrupt) + apply safe + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" + in use_valid) + apply ((clarsimp simp: ackInterrupt_def machine_op_lift_def + machine_rest_lift_def split_def | wp)+)[3] + done + +lemma runnable'_eq: + "runnable' st = (st = Running \ st = Restart)" + by (cases st; simp) + +lemma vgicMaintenance_invs'[wp]: + "\invs' and (\s. sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ + vgicMaintenance + \\_. invs'\" + supply if_split[split del] + apply (clarsimp simp: vgicMaintenance_def get_gic_vcpu_ctrl_lr_def set_gic_vcpu_ctrl_lr_def + get_gic_vcpu_ctrl_misr_def get_gic_vcpu_ctrl_eisr1_def get_gic_vcpu_ctrl_eisr0_def + doMachineOp_bind) + apply (wpsimp simp: if_apply_def2 wp: hoare_vcg_const_imp_lift) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s + \ invs' s + \ sch_act_not (ksCurThread s) s + \ (\d p. (ksCurThread s) \ set (ksReadyQueues s (d, p)))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply wps + apply (wpsimp simp: if_apply_def2 + wp: hoare_vcg_const_imp_lift hoare_drop_imps dmo'_gets_wp + | wps)+ + apply (clarsimp cong: conj_cong imp_cong split: if_split) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: conj_cong imp_cong split: if_split) + apply (rule conjI) + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply clarsimp + done + +lemma vppiEvent_invs'[wp]: + "\invs' and (\s. sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ + vppiEvent irq \\y. invs'\" + supply if_split[split del] + apply (clarsimp simp: vppiEvent_def doMachineOp_bind) + apply (wpsimp simp: if_apply_def2 wp: hoare_vcg_const_imp_lift) + apply (strengthen st_tcb_ex_cap''[where P=active']) + apply (strengthen invs_iflive') + apply (clarsimp cong: imp_cong conj_cong simp: pred_conj_def) + apply (rule_tac Q="\_ s. tcb_at' (ksCurThread s) s + \ invs' s + \ sch_act_not (ksCurThread s) s + \ (\d p. (ksCurThread s) \ set (ksReadyQueues s (d, p)))" + in hoare_post_imp) + apply (clarsimp cong: imp_cong conj_cong simp: not_pred_tcb') + apply (clarsimp simp: st_tcb_at'_def obj_at'_def runnable'_eq) + apply (rule conjI) + apply (fastforce elim!: st_tcb_ex_cap'' simp: valid_state'_def valid_pspace'_def) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def obj_at'_def idle_tcb'_def) + apply wps + apply (wpsimp simp: if_apply_def2 vcpuUpdate_def + wp: hoare_vcg_const_imp_lift hoare_drop_imps + setVCPU_VPPIMasked_invs' dmo_maskInterrupt_True + | wps)+ + done + +lemma hint_invs[wp]: + "\invs' and (\s. irq \ non_kernel_IRQs \ + sch_act_not (ksCurThread s) s \ (\p. ksCurThread s \ set (ksReadyQueues s p)))\ + handleInterrupt irq \\rv. invs'\" + apply (simp add: handleInterrupt_def getSlotCap_def cong: irqstate.case_cong) + apply (rule conjI; rule impI) + apply (wp dmo_maskInterrupt_True getCTE_wp' | wpc | simp add: doMachineOp_bind maskIrqSignal_def)+ + apply (rule_tac Q="\rv. invs'" in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of ex_nonz_cap_to'_def) + apply fastforce + apply (wpsimp wp: threadSet_invs_trivial getIRQState_wp + simp: inQ_def handleReservedIRQ_def if_apply_def2 irqVPPIEventIndex_def + IRQ_def irqVTimerEvent_def irqVGICMaintenance_def unat_arith_simps + split_del: if_split)+ + apply (clarsimp split: if_split_asm)+ + apply (clarsimp simp: non_kernel_IRQs_def irqVTimerEvent_def irqVGICMaintenance_def + unat_arith_simps) + done + +crunch st_tcb_at'[wp]: timerTick "st_tcb_at' P t" + (wp: threadSet_pred_tcb_no_state) + +end + +end diff --git a/proof/refine/AARCH64/InvariantUpdates_H.thy b/proof/refine/AARCH64/InvariantUpdates_H.thy new file mode 100644 index 0000000000..3c307580e2 --- /dev/null +++ b/proof/refine/AARCH64/InvariantUpdates_H.thy @@ -0,0 +1,418 @@ +(* + * Copyright 2021, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory InvariantUpdates_H +imports Invariants_H +begin + +(* FIXME: use locales to shorten this work *) + +lemma ps_clear_domE[elim?]: + "\ ps_clear x n s; dom (ksPSpace s) = dom (ksPSpace s') \ \ ps_clear x n s'" + by (simp add: ps_clear_def) + +lemma ps_clear_upd: + "ksPSpace s y = Some v \ + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" + by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ + +lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] + +lemma ct_not_inQ_ksMachineState_update[simp]: + "ct_not_inQ (ksMachineState_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma ct_in_current_domain_ksMachineState[simp]: + "ct_idle_or_in_cur_domain' (ksMachineState_update p s) = ct_idle_or_in_cur_domain' s" + by (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma invs'_machine: + assumes mask: "irq_masks (f (ksMachineState s)) = + irq_masks (ksMachineState s)" + assumes vms: "valid_machine_state' (ksMachineState_update f s) = + valid_machine_state' s" + shows "invs' (ksMachineState_update f s) = invs' s" +proof - + show ?thesis + apply (cases "ksSchedulerAction s") + apply (simp_all add: invs'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) + done +qed + +lemma invs_no_cicd'_machine: + assumes mask: "irq_masks (f (ksMachineState s)) = + irq_masks (ksMachineState s)" + assumes vms: "valid_machine_state' (ksMachineState_update f s) = + valid_machine_state' s" + shows "invs_no_cicd' (ksMachineState_update f s) = invs_no_cicd' s" +proof - + show ?thesis + apply (cases "ksSchedulerAction s") + apply (simp_all add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def cur_tcb'_def ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + vms ct_not_inQ_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def mask + cong: option.case_cong) + done +qed + +lemma pspace_no_overlap_queues [simp]: + "pspace_no_overlap' w sz (ksReadyQueues_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksSchedulerAction[simp]: + "pspace_no_overlap' a b (ksSchedulerAction_update f s) = + pspace_no_overlap' a b s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksArchState_update[simp]: + "pspace_no_overlap' p n (ksArchState_update f s) = + pspace_no_overlap' p n s" + by (simp add: pspace_no_overlap'_def) + +lemma ksReadyQueues_update_id[simp]: + "ksReadyQueues_update id s = s" + by simp + +lemma ct_not_inQ_ksReadyQueues_update[simp]: + "ct_not_inQ (ksReadyQueues_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma inQ_context[simp]: + "inQ d p (tcbArch_update f tcb) = inQ d p tcb" + by (cases tcb, simp add: inQ_def) + +lemma valid_tcb'_tcbQueued[simp]: + "valid_tcb' (tcbQueued_update f tcb) = valid_tcb' tcb" + by (cases tcb, rule ext, simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_tcb'_tcbFault_update[simp]: + "valid_tcb' tcb s \ valid_tcb' (tcbFault_update f tcb) s" + by (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_tcb'_tcbTimeSlice_update[simp]: + "valid_tcb' (tcbTimeSlice_update f tcb) s = valid_tcb' tcb s" + by (simp add:valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma valid_queues_ksSchedulerAction_update[simp]: + "valid_queues (ksSchedulerAction_update f s) = valid_queues s" + unfolding valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + by simp + +lemma valid_queues'_ksSchedulerAction_update[simp]: + "valid_queues' (ksSchedulerAction_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma ex_cte_cap_wp_to'_gsCNodes_update[simp]: + "ex_cte_cap_wp_to' P p (gsCNodes_update f s') = ex_cte_cap_wp_to' P p s'" + by (simp add: ex_cte_cap_wp_to'_def) +lemma ex_cte_cap_wp_to'_gsUserPages_update[simp]: + "ex_cte_cap_wp_to' P p (gsUserPages_update f s') = ex_cte_cap_wp_to' P p s'" + by (simp add: ex_cte_cap_wp_to'_def) + +lemma pspace_no_overlap'_gsCNodes_update[simp]: + "pspace_no_overlap' p b (gsCNodes_update f s') = pspace_no_overlap' p b s'" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_gsUserPages_update[simp]: + "pspace_no_overlap' p b (gsUserPages_update f s') = pspace_no_overlap' p b s'" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap'_ksMachineState_update[simp]: + "pspace_no_overlap' p n (ksMachineState_update f s) = + pspace_no_overlap' p n s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap_gsUntypedZeroRanges[simp]: + "pspace_no_overlap' ptr n (gsUntypedZeroRanges_update f s) + = pspace_no_overlap' ptr n s" + by (simp add: pspace_no_overlap'_def) + +lemma vms'_ct[simp]: + "valid_machine_state' (ksCurThread_update f s) = valid_machine_state' s" + by (simp add: valid_machine_state'_def) + +lemma tcb_in_cur_domain_ct[simp]: + "tcb_in_cur_domain' t (ksCurThread_update f s) = tcb_in_cur_domain' t s" + by (fastforce simp: tcb_in_cur_domain'_def) + +lemma valid_queues'_ksCurDomain[simp]: + "valid_queues' (ksCurDomain_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues'_ksDomScheduleIdx[simp]: + "valid_queues' (ksDomScheduleIdx_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues'_ksDomSchedule[simp]: + "valid_queues' (ksDomSchedule_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues'_ksDomainTime[simp]: + "valid_queues' (ksDomainTime_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues'_ksWorkUnitsCompleted[simp]: + "valid_queues' (ksWorkUnitsCompleted_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues_ksCurDomain[simp]: + "valid_queues (ksCurDomain_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_queues_ksDomScheduleIdx[simp]: + "valid_queues (ksDomScheduleIdx_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_queues_ksDomSchedule[simp]: + "valid_queues (ksDomSchedule_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_queues_ksDomainTime[simp]: + "valid_queues (ksDomainTime_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_queues_ksWorkUnitsCompleted[simp]: + "valid_queues (ksWorkUnitsCompleted_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_irq_node'_ksCurDomain[simp]: + "valid_irq_node' w (ksCurDomain_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomScheduleIdx[simp]: + "valid_irq_node' w (ksDomScheduleIdx_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomSchedule[simp]: + "valid_irq_node' w (ksDomSchedule_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksDomainTime[simp]: + "valid_irq_node' w (ksDomainTime_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma valid_irq_node'_ksWorkUnitsCompleted[simp]: + "valid_irq_node' w (ksWorkUnitsCompleted_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma ex_cte_cap_wp_to_work_units[simp]: + "ex_cte_cap_wp_to' P slot (ksWorkUnitsCompleted_update f s) + = ex_cte_cap_wp_to' P slot s" + by (simp add: ex_cte_cap_wp_to'_def) + +add_upd_simps "ct_in_state' P (gsUntypedZeroRanges_update f s)" +declare upd_simps[simp] + +lemma ct_not_inQ_ksArchState_update[simp]: + "ct_not_inQ (ksArchState_update f s) = ct_not_inQ s" + by (simp add: ct_not_inQ_def) + +lemma ct_in_current_domain_ArchState_update[simp]: + "ct_idle_or_in_cur_domain' (ksArchState_update f s) = ct_idle_or_in_cur_domain' s" + by (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma pspace_no_overlap_queuesL1 [simp]: + "pspace_no_overlap' w sz (ksReadyQueuesL1Bitmap_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma pspace_no_overlap_queuesL2 [simp]: + "pspace_no_overlap' w sz (ksReadyQueuesL2Bitmap_update f s) = pspace_no_overlap' w sz s" + by (simp add: pspace_no_overlap'_def) + +lemma tcb_in_cur_domain'_ksSchedulerAction_update[simp]: + "tcb_in_cur_domain' t (ksSchedulerAction_update f s) = tcb_in_cur_domain' t s" + by (simp add: tcb_in_cur_domain'_def) + +lemma ct_idle_or_in_cur_domain'_ksSchedulerAction_update[simp]: + "b \ ResumeCurrentThread \ + ct_idle_or_in_cur_domain' (s\ksSchedulerAction := b\)" + apply (clarsimp simp add: ct_idle_or_in_cur_domain'_def) + done + +lemma sch_act_simple_wu [simp, intro!]: + "sch_act_simple (ksWorkUnitsCompleted_update f s) = sch_act_simple s" + by (simp add: sch_act_simple_def) + +lemma sch_act_simple_ksPSpace_update[simp]: + "sch_act_simple (ksPSpace_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma ps_clear_ksReadyQueue[simp]: + "ps_clear x n (ksReadyQueues_update f s) = ps_clear x n s" + by (simp add: ps_clear_def) + +lemma inQ_tcbIPCBuffer_update_idem[simp]: + "inQ d p (tcbIPCBuffer_update f ko) = inQ d p ko" + by (clarsimp simp: inQ_def) + +lemma valid_mdb_interrupts'[simp]: + "valid_mdb' (ksInterruptState_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma vms_ksReadyQueues_update[simp]: + "valid_machine_state' (ksReadyQueues_update f s) = valid_machine_state' s" + by (simp add: valid_machine_state'_def) + +lemma ct_in_state'_ksMachineState_update[simp]: + "ct_in_state' x (ksMachineState_update f s) = ct_in_state' x s" + by (simp add: ct_in_state'_def)+ + +lemma ex_cte_cap_wp_to'_ksMachineState_update[simp]: + "ex_cte_cap_wp_to' x y (ksMachineState_update f s) = ex_cte_cap_wp_to' x y s" + by (simp add: ex_cte_cap_wp_to'_def)+ + +lemma ps_clear_ksMachineState_update[simp]: + "ps_clear a b (ksMachineState_update f s) = ps_clear a b s" + by (simp add: ps_clear_def) + +lemma ct_in_state_ksSched[simp]: + "ct_in_state' P (ksSchedulerAction_update f s) = ct_in_state' P s" + unfolding ct_in_state'_def + apply auto + done + +lemma invs'_wu [simp]: + "invs' (ksWorkUnitsCompleted_update f s) = invs' s" + apply (simp add: invs'_def cur_tcb'_def valid_state'_def Invariants_H.valid_queues_def + valid_queues'_def valid_irq_node'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + bitmapQ_defs valid_queues_no_bitmap_def) + done + +lemma valid_arch_state'_interrupt[simp]: + "valid_arch_state' (ksInterruptState_update f s) = valid_arch_state' s" + by (simp add: valid_arch_state'_def cong: option.case_cong) + +lemma valid_bitmapQ_ksSchedulerAction_upd[simp]: + "valid_bitmapQ (ksSchedulerAction_update f s) = valid_bitmapQ s" + unfolding bitmapQ_defs by simp + +lemma bitmapQ_no_L1_orphans_ksSchedulerAction_upd[simp]: + "bitmapQ_no_L1_orphans (ksSchedulerAction_update f s) = bitmapQ_no_L1_orphans s" + unfolding bitmapQ_defs by simp + +lemma bitmapQ_no_L2_orphans_ksSchedulerAction_upd[simp]: + "bitmapQ_no_L2_orphans (ksSchedulerAction_update f s) = bitmapQ_no_L2_orphans s" + unfolding bitmapQ_defs by simp + +lemma cur_tcb'_ksReadyQueuesL1Bitmap_upd[simp]: + "cur_tcb' (ksReadyQueuesL1Bitmap_update f s) = cur_tcb' s" + unfolding cur_tcb'_def by simp + +lemma cur_tcb'_ksReadyQueuesL2Bitmap_upd[simp]: + "cur_tcb' (ksReadyQueuesL2Bitmap_update f s) = cur_tcb' s" + unfolding cur_tcb'_def by simp + +lemma ex_cte_cap_wp_to'_ksReadyQueuesL1Bitmap[simp]: + "ex_cte_cap_wp_to' P p (ksReadyQueuesL1Bitmap_update f s) = ex_cte_cap_wp_to' P p s" + unfolding ex_cte_cap_wp_to'_def by simp + +lemma ex_cte_cap_wp_to'_ksReadyQueuesL2Bitmap[simp]: + "ex_cte_cap_wp_to' P p (ksReadyQueuesL2Bitmap_update f s) = ex_cte_cap_wp_to' P p s" + unfolding ex_cte_cap_wp_to'_def by simp + +lemma sch_act_simple_readyQueue[simp]: + "sch_act_simple (ksReadyQueues_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma sch_act_simple_ksReadyQueuesL1Bitmap[simp]: + "sch_act_simple (ksReadyQueuesL1Bitmap_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma sch_act_simple_ksReadyQueuesL2Bitmap[simp]: + "sch_act_simple (ksReadyQueuesL2Bitmap_update f s) = sch_act_simple s" + apply (simp add: sch_act_simple_def) + done + +lemma ksDomainTime_invs[simp]: + "invs' (ksDomainTime_update f s) = invs' s" + by (simp add:invs'_def valid_state'_def + cur_tcb'_def ct_not_inQ_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_machine_state'_def) + +lemma valid_machine_state'_ksDomainTime[simp]: + "valid_machine_state' (ksDomainTime_update f s) = valid_machine_state' s" + by (simp add:valid_machine_state'_def) + +lemma cur_tcb'_ksDomainTime[simp]: + "cur_tcb' (ksDomainTime_update f s) = cur_tcb' s" + by (simp add:cur_tcb'_def) + +lemma ct_idle_or_in_cur_domain'_ksDomainTime[simp]: + "ct_idle_or_in_cur_domain' (ksDomainTime_update f s) = ct_idle_or_in_cur_domain' s" + by (simp add:ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma sch_act_sane_ksMachineState[simp]: + "sch_act_sane (ksMachineState_update f s) = sch_act_sane s" + by (simp add: sch_act_sane_def) + +lemma ct_not_inQ_update_cnt[simp]: + "ct_not_inQ (s\ksSchedulerAction := ChooseNewThread\)" + by (simp add: ct_not_inQ_def) + +lemma ct_not_inQ_update_stt[simp]: + "ct_not_inQ (s\ksSchedulerAction := SwitchToThread t\)" + by (simp add: ct_not_inQ_def) + +lemma invs'_update_cnt[elim!]: + "invs' s \ invs' (s\ksSchedulerAction := ChooseNewThread\)" + by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues'_def + valid_irq_node'_def cur_tcb'_def ct_idle_or_in_cur_domain'_def + tcb_in_cur_domain'_def valid_queues_no_bitmap_def + bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def) + + +context begin interpretation Arch . + +lemma valid_arch_state'_vmid_update[simp]: + "valid_arch_state' (s\ksArchState := armKSVMIDTable_update f (ksArchState s)\) = + valid_arch_state' s" + by (auto simp: valid_arch_state'_def split: option.split) + +lemma valid_arch_state'_vmid_next_update[simp]: + "valid_arch_state' (s\ksArchState := armKSNextVMID_update f (ksArchState s)\) = + valid_arch_state' s" + by (auto simp: valid_arch_state'_def split: option.split) + +lemma invs'_armKSVMIDTable_update[simp]: + "invs' (s\ksArchState := armKSVMIDTable_update f s'\) = invs' (s\ksArchState := s'\)" + by (simp add: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs'_armKSNextVMID_update[simp]: + "invs' (s\ksArchState := armKSNextVMID_update f s'\) = invs' (s\ksArchState := s'\)" + by (simp add: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs_no_cicd'_armKSVMIDTable_update[simp]: + "invs_no_cicd' (s\ksArchState := armKSVMIDTable_update f s'\) = invs_no_cicd' (s\ksArchState := s'\)" + by (simp add: invs_no_cicd'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs_no_cicd'_armKSNextVMID_update[simp]: + "invs_no_cicd' (s\ksArchState := armKSNextVMID_update f s'\) = invs_no_cicd' (s\ksArchState := s'\)" + by (simp add: invs_no_cicd'_def valid_state'_def valid_global_refs'_def global_refs'_def table_refs'_def + valid_machine_state'_def valid_arch_state'_def cong: option.case_cong) + +lemma invs'_gsTypes_update: + "ksA' = ksArchState s \ invs' (s \ksArchState := gsPTTypes_update f ksA'\) = invs' s" + by (simp add: invs'_def valid_state'_def valid_global_refs'_def global_refs'_def + valid_machine_state'_def valid_arch_state'_def + cong: option.case_cong) + +end + +end \ No newline at end of file diff --git a/proof/refine/AARCH64/Invariants_H.thy b/proof/refine/AARCH64/Invariants_H.thy new file mode 100644 index 0000000000..fc25bd186b --- /dev/null +++ b/proof/refine/AARCH64/Invariants_H.thy @@ -0,0 +1,3468 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Invariants_H +imports + LevityCatch + "AInvs.ArchDetSchedSchedule_AI" +begin + +(* global data and code of the kernel, not covered by any cap *) +axiomatization + kernel_data_refs :: "word64 set" + +context Arch begin + +declare lookupPTSlotFromLevel.simps[simp del] +declare lookupPTFromLevel.simps[simp del] + +lemmas haskell_crunch_def [crunch_def] = + deriveCap_def finaliseCap_def + hasCancelSendRights_def sameRegionAs_def isPhysicalCap_def + sameObjectAs_def updateCapData_def maskCapRights_def + createObject_def capUntypedPtr_def capUntypedSize_def + performInvocation_def decodeInvocation_def + +context begin global_naming global +requalify_facts + Retype_H.deriveCap_def Retype_H.finaliseCap_def + Retype_H.hasCancelSendRights_def Retype_H.sameRegionAs_def Retype_H.isPhysicalCap_def + Retype_H.sameObjectAs_def Retype_H.updateCapData_def Retype_H.maskCapRights_def + Retype_H.createObject_def Retype_H.capUntypedPtr_def Retype_H.capUntypedSize_def + Retype_H.performInvocation_def Retype_H.decodeInvocation_def +end + +end + +\ \---------------------------------------------------------------------------\ + +section "Invariants on Executable Spec" + +context begin interpretation Arch . + +definition ps_clear :: "obj_ref \ nat \ kernel_state \ bool" where + "ps_clear p n s \ (mask_range p n - {p}) \ dom (ksPSpace s) = {}" + +definition pspace_no_overlap' :: "obj_ref \ nat \ kernel_state \ bool" where + "pspace_no_overlap' ptr bits \ + \s. \x ko. ksPSpace s x = Some ko \ + (mask_range x (objBitsKO ko)) \ {ptr .. (ptr && ~~ mask bits) + mask bits} = {}" + +definition ko_wp_at' :: "(kernel_object \ bool) \ obj_ref \ kernel_state \ bool" where + "ko_wp_at' P p s \ \ko. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) \ P ko \ + ps_clear p (objBitsKO ko) s" + +definition obj_at' :: "('a::pspace_storable \ bool) \ machine_word \ kernel_state \ bool" where + obj_at'_real_def: + "obj_at' P p s \ ko_wp_at' (\ko. \obj. projectKO_opt ko = Some obj \ P obj) p s" + +definition typ_at' :: "kernel_object_type \ machine_word \ kernel_state \ bool" where + "typ_at' T \ ko_wp_at' (\ko. koTypeOf ko = T)" + +abbreviation ep_at' :: "obj_ref \ kernel_state \ bool" where + "ep_at' \ obj_at' ((\x. True) :: endpoint \ bool)" + +abbreviation ntfn_at' :: "obj_ref \ kernel_state \ bool" where + "ntfn_at' \ obj_at' ((\x. True) :: notification \ bool)" + +abbreviation tcb_at' :: "obj_ref \ kernel_state \ bool" where + "tcb_at' \ obj_at' ((\x. True) :: tcb \ bool)" + +abbreviation real_cte_at' :: "obj_ref \ kernel_state \ bool" where + "real_cte_at' \ obj_at' ((\x. True) :: cte \ bool)" + +abbreviation ko_at' :: "'a::pspace_storable \ obj_ref \ kernel_state \ bool" where + "ko_at' v \ obj_at' (\k. k = v)" + +abbreviation + "vcpu_at' \ typ_at' (ArchT VCPUT)" + +abbreviation pte_at' :: "obj_ref \ kernel_state \ bool" where + "pte_at' \ typ_at' (ArchT PTET)" + +end + +record itcb' = + itcbState :: thread_state + itcbFaultHandler :: cptr + itcbIPCBuffer :: vptr + itcbBoundNotification :: "machine_word option" + itcbPriority :: priority + itcbFault :: "fault option" + itcbTimeSlice :: nat + itcbMCP :: priority + +definition tcb_to_itcb' :: "tcb \ itcb'" where + "tcb_to_itcb' tcb \ \ itcbState = tcbState tcb, + itcbFaultHandler = tcbFaultHandler tcb, + itcbIPCBuffer = tcbIPCBuffer tcb, + itcbBoundNotification = tcbBoundNotification tcb, + itcbPriority = tcbPriority tcb, + itcbFault = tcbFault tcb, + itcbTimeSlice = tcbTimeSlice tcb, + itcbMCP = tcbMCP tcb\" + +lemma itcb_simps[simp]: + "itcbState (tcb_to_itcb' tcb) = tcbState tcb" + "itcbFaultHandler (tcb_to_itcb' tcb) = tcbFaultHandler tcb" + "itcbIPCBuffer (tcb_to_itcb' tcb) = tcbIPCBuffer tcb" + "itcbBoundNotification (tcb_to_itcb' tcb) = tcbBoundNotification tcb" + "itcbPriority (tcb_to_itcb' tcb) = tcbPriority tcb" + "itcbFault (tcb_to_itcb' tcb) = tcbFault tcb" + "itcbTimeSlice (tcb_to_itcb' tcb) = tcbTimeSlice tcb" + "itcbMCP (tcb_to_itcb' tcb) = tcbMCP tcb" + by (auto simp: tcb_to_itcb'_def) + +definition pred_tcb_at' :: "(itcb' \ 'a) \ ('a \ bool) \ machine_word \ kernel_state \ bool" + where + "pred_tcb_at' proj test \ obj_at' (\ko. test (proj (tcb_to_itcb' ko)))" + +abbreviation st_tcb_at' :: "(thread_state \ bool) \ obj_ref \ kernel_state \ bool" where + "st_tcb_at' \ pred_tcb_at' itcbState" + +abbreviation bound_tcb_at' :: "(obj_ref option \ bool) \ obj_ref \ kernel_state \ bool" where + "bound_tcb_at' \ pred_tcb_at' itcbBoundNotification" + +abbreviation mcpriority_tcb_at' :: "(priority \ bool) \ obj_ref \ kernel_state \ bool" where + "mcpriority_tcb_at' \ pred_tcb_at' itcbMCP" + +lemma st_tcb_at'_def: + "st_tcb_at' test \ obj_at' (test \ tcbState)" + by (simp add: pred_tcb_at'_def o_def) + +text \ cte with property at \ +definition cte_wp_at' :: "(cte \ bool) \ obj_ref \ kernel_state \ bool" where + "cte_wp_at' P p s \ \cte::cte. fst (getObject p s) = {(cte,s)} \ P cte" + +abbreviation cte_at' :: "obj_ref \ kernel_state \ bool" where + "cte_at' \ cte_wp_at' \" + +definition tcb_cte_cases :: "machine_word \ ((tcb \ cte) \ ((cte \ cte) \ tcb \ tcb))" where + "tcb_cte_cases \ [ 0 << cteSizeBits \ (tcbCTable, tcbCTable_update), + 1 << cteSizeBits \ (tcbVTable, tcbVTable_update), + 2 << cteSizeBits \ (tcbReply, tcbReply_update), + 3 << cteSizeBits \ (tcbCaller, tcbCaller_update), + 4 << cteSizeBits \ (tcbIPCBufferFrame, tcbIPCBufferFrame_update) ]" + +definition max_ipc_words :: machine_word where + "max_ipc_words \ capTransferDataSize + msgMaxLength + msgMaxExtraCaps + 2" + +definition tcb_st_refs_of' :: "thread_state \ (obj_ref \ reftype) set" where + "tcb_st_refs_of' st \ case st of + (BlockedOnReceive x _) => {(x, TCBBlockedRecv)} + | (BlockedOnSend x _ _ _ _) => {(x, TCBBlockedSend)} + | (BlockedOnNotification x) => {(x, TCBSignal)} + | _ => {}" + +definition ep_q_refs_of' :: "endpoint \ (obj_ref \ reftype) set" where + "ep_q_refs_of' ep \ case ep of + IdleEP => {} + | (RecvEP q) => set q \ {EPRecv} + | (SendEP q) => set q \ {EPSend}" + +definition ntfn_q_refs_of' :: "Structures_H.ntfn \ (obj_ref \ reftype) set" where + "ntfn_q_refs_of' ntfn \ case ntfn of + IdleNtfn => {} + | (WaitingNtfn q) => set q \ {NTFNSignal} + | (ActiveNtfn b) => {}" + +definition ntfn_bound_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "ntfn_bound_refs' t \ set_option t \ {NTFNBound}" + +definition tcb_bound_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "tcb_bound_refs' a \ set_option a \ {TCBBound}" + +definition refs_of' :: "kernel_object \ (obj_ref \ reftype) set" where + "refs_of' ko \ case ko of + (KOTCB tcb) => tcb_st_refs_of' (tcbState tcb) \ tcb_bound_refs' (tcbBoundNotification tcb) + | (KOEndpoint ep) => ep_q_refs_of' ep + | (KONotification ntfn) => ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn) + | _ => {}" + +definition state_refs_of' :: "kernel_state \ obj_ref \ (obj_ref \ reftype) set" where + "state_refs_of' s \ \x. + case ksPSpace s x of + None \ {} + | Some ko \ if is_aligned x (objBitsKO ko) \ ps_clear x (objBitsKO ko) s + then refs_of' ko else {}" + +(* the non-hyp, non-arch part of live' *) +primrec live0' :: "Structures_H.kernel_object \ bool" where + "live0' (KOTCB tcb) = + (bound (tcbBoundNotification tcb) \ + (tcbState tcb \ Inactive \ tcbState tcb \ IdleThreadState) \ tcbQueued tcb)" +| "live0' (KOCTE cte) = False" +| "live0' (KOEndpoint ep) = (ep \ IdleEP)" +| "live0' (KONotification ntfn) = (bound (ntfnBoundTCB ntfn) \ (\ts. ntfnObj ntfn = WaitingNtfn ts))" +| "live0' (KOUserData) = False" +| "live0' (KOUserDataDevice) = False" +| "live0' (KOKernelData) = False" +| "live0' (KOArch ako) = False" + +(* hyp_refs *) + +definition tcb_vcpu_refs' :: "machine_word option \ (obj_ref \ reftype) set" where + "tcb_vcpu_refs' t \ set_option t \ {TCBHypRef}" + +definition tcb_hyp_refs' :: "arch_tcb \ (obj_ref \ reftype) set" where + "tcb_hyp_refs' t \ tcb_vcpu_refs' (AARCH64_H.atcbVCPUPtr t)" + +definition vcpu_tcb_refs' :: "obj_ref option \ (obj_ref \ reftype) set" where + "vcpu_tcb_refs' t \ set_option t \ {HypTCBRef}" + +definition refs_of_a' :: "arch_kernel_object \ (obj_ref \ reftype) set" where + "refs_of_a' x \ case x of + AARCH64_H.KOASIDPool asidpool \ {} + | AARCH64_H.KOPTE pte \ {} + | AARCH64_H.KOVCPU vcpu \ vcpu_tcb_refs' (AARCH64_H.vcpuTCBPtr vcpu)" + +definition hyp_refs_of' :: "kernel_object \ (obj_ref \ reftype) set" where + "hyp_refs_of' x \ case x of + (KOTCB tcb) \ tcb_hyp_refs' (tcbArch tcb) + | (KOCTE cte) \ {} + | (KOEndpoint ep) \ {} + | (KONotification ntfn) \ {} + | (KOUserData) \ {} + | (KOUserDataDevice) \ {} + | (KOKernelData) \ {} + | (KOArch ako) \ refs_of_a' ako" + +definition state_hyp_refs_of' :: "kernel_state \ obj_ref \ (obj_ref \ reftype) set" where + "state_hyp_refs_of' s \ + (\p. case (ksPSpace s p) of + None \ {} + | Some ko \ (if is_aligned p (objBitsKO ko) \ ps_clear p (objBitsKO ko) s + then hyp_refs_of' ko + else {}))" + +definition arch_live' :: "arch_kernel_object \ bool" where + "arch_live' ao \ case ao of + AARCH64_H.KOVCPU vcpu \ bound (AARCH64_H.vcpuTCBPtr vcpu) + | _ \ False" + +definition hyp_live' :: "kernel_object \ bool" where + "hyp_live' ko \ case ko of + (KOTCB tcb) \ bound (AARCH64_H.atcbVCPUPtr (tcbArch tcb)) + | (KOArch ako) \ arch_live' ako + | _ \ False" + +definition live' :: "kernel_object \ bool" where + "live' ko \ case ko of + (KOTCB tcb) => live0' ko \ hyp_live' ko + | (KOCTE cte) => False + | (KOEndpoint ep) => live0' ko + | (KONotification ntfn) => live0' ko + | (KOUserData) => False + | (KOUserDataDevice) => False + | (KOKernelData) => False + | (KOArch ako) => hyp_live' ko" + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec azobj_refs' :: "arch_capability \ obj_ref set" where + "azobj_refs' (ASIDPoolCap _ _) = {}" +| "azobj_refs' ASIDControlCap = {}" +| "azobj_refs' (FrameCap _ _ _ _ _) = {}" +| "azobj_refs' (PageTableCap _ _ _) = {}" +| "azobj_refs' (VCPUCap v) = {v}" + +lemma azobj_refs'_only_vcpu: + "(x \ azobj_refs' acap) = (acap = VCPUCap x)" + by (cases acap) auto +end + +fun zobj_refs' :: "capability \ obj_ref set" where + "zobj_refs' NullCap = {}" +| "zobj_refs' DomainCap = {}" +| "zobj_refs' (UntypedCap d r n f) = {}" +| "zobj_refs' (EndpointCap r badge x y z t) = {r}" +| "zobj_refs' (NotificationCap r badge x y) = {r}" +| "zobj_refs' (CNodeCap r b g gsz) = {}" +| "zobj_refs' (ThreadCap r) = {r}" +| "zobj_refs' (Zombie r b n) = {}" +| "zobj_refs' (ArchObjectCap ac) = azobj_refs' ac" +| "zobj_refs' (IRQControlCap) = {}" +| "zobj_refs' (IRQHandlerCap irq) = {}" +| "zobj_refs' (ReplyCap tcb m x) = {}" + +definition ex_nonz_cap_to' :: "obj_ref \ kernel_state \ bool" where + "ex_nonz_cap_to' ref \ \s. \cref. cte_wp_at' (\c. ref \ zobj_refs' (cteCap c)) cref s" + +definition if_live_then_nonz_cap' :: "kernel_state \ bool" where + "if_live_then_nonz_cap' s \ \ptr. ko_wp_at' live' ptr s \ ex_nonz_cap_to' ptr s" + +fun cte_refs' :: "capability \ obj_ref \ obj_ref set" where + "cte_refs' (CNodeCap ref bits _ _) x = (\x. ref + (x << cteSizeBits)) ` {0 .. mask bits}" +| "cte_refs' (ThreadCap ref) x = (\x. ref + x) ` dom tcb_cte_cases" +| "cte_refs' (Zombie ref _ n) x = (\x. ref + (x << cteSizeBits)) ` {0 ..< of_nat n}" +| "cte_refs' (IRQHandlerCap irq) x = {x + (ucast irq << cteSizeBits)}" +| "cte_refs' _ _ = {}" + + +abbreviation irq_node' :: "kernel_state \ obj_ref" where + "irq_node' s \ intStateIRQNode (ksInterruptState s)" + +definition ex_cte_cap_wp_to' :: "(capability \ bool) \ obj_ref \ kernel_state \ bool" where + "ex_cte_cap_wp_to' P ptr \ + \s. \cref. cte_wp_at' (\c. P (cteCap c) \ ptr \ cte_refs' (cteCap c) (irq_node' s)) cref s" + +abbreviation ex_cte_cap_to' :: "obj_ref \ kernel_state \ bool" where + "ex_cte_cap_to' \ ex_cte_cap_wp_to' \" + +definition if_unsafe_then_cap' :: "kernel_state \ bool" where + "if_unsafe_then_cap' s \ + \cref. cte_wp_at' (\c. cteCap c \ NullCap) cref s \ ex_cte_cap_to' cref s" + + +section "Valid caps and objects (design spec)" + +context begin interpretation Arch . + +primrec acapBits :: "arch_capability \ nat" where + "acapBits (ASIDPoolCap _ _) = asidLowBits + word_size_bits" +| "acapBits ASIDControlCap = asidHighBits + word_size_bits" +| "acapBits (FrameCap _ _ sz _ _) = pageBitsForSize sz" +| "acapBits (PageTableCap _ pt_t _) = table_size pt_t" +| "acapBits (VCPUCap v) = vcpuBits" + +end + +primrec zBits :: "zombie_type \ nat" where + "zBits (ZombieCNode n) = objBits (undefined::cte) + n" +| "zBits (ZombieTCB) = tcbBlockSizeBits" + +primrec capBits :: "capability \ nat" where + "capBits NullCap = 0" +| "capBits DomainCap = 0" +| "capBits (UntypedCap _ _ b _) = b" +| "capBits (EndpointCap _ _ _ _ _ _) = objBits (undefined::endpoint)" +| "capBits (NotificationCap _ _ _ _) = objBits (undefined::Structures_H.notification)" +| "capBits (CNodeCap _ b _ _) = objBits (undefined::cte) + b" +| "capBits (ThreadCap _) = objBits (undefined::tcb)" +| "capBits (Zombie _ z _) = zBits z" +| "capBits (IRQControlCap) = 0" +| "capBits (IRQHandlerCap _) = 0" +| "capBits (ReplyCap _ _ _) = objBits (undefined :: tcb)" +| "capBits (ArchObjectCap x) = acapBits x" + +definition capAligned :: "capability \ bool" where + "capAligned c \ is_aligned (capUntypedPtr c) (capBits c) \ capBits c < word_bits" + +definition obj_range' :: "machine_word \ kernel_object \ machine_word set" where + "obj_range' p ko \ mask_range p (objBitsKO ko)" + +primrec (nonexhaustive) usableUntypedRange :: "capability \ machine_word set" where + "usableUntypedRange (UntypedCap _ p n f) = (if f < 2^n then {p+of_nat f .. p + mask n} else {})" + +definition valid_untyped' :: "bool \ obj_ref \ nat \ nat \ kernel_state \ bool" where + "valid_untyped' d ptr bits idx s \ + \ptr'. \ ko_wp_at' (\ko. mask_range ptr bits \ obj_range' ptr' ko + \ obj_range' ptr' ko \ + usableUntypedRange (UntypedCap d ptr bits idx) \ {}) ptr' s" + +primrec zombieCTEs :: "zombie_type \ nat" where + "zombieCTEs ZombieTCB = 5" +| "zombieCTEs (ZombieCNode n) = 2 ^ n" + +context begin interpretation Arch . + +definition page_table_at' :: "pt_type \ obj_ref \ kernel_state \ bool" where + "page_table_at' pt_t p \ \s. + is_aligned p (ptBits pt_t) \ + (\i \ mask (ptTranslationBits pt_t). pte_at' (p + (i << pte_bits)) s)" + +lemmas vspace_table_at'_defs = page_table_at'_def + +abbreviation asid_pool_at' :: "obj_ref \ kernel_state \ bool" where + "asid_pool_at' \ typ_at' (ArchT ASIDPoolT)" + +definition asid_wf :: "asid \ bool" where + "asid_wf asid \ asid \ mask asid_bits" + +definition wellformed_mapdata' :: "asid \ vspace_ref \ bool" where + "wellformed_mapdata' \ \(asid, vref). 0 < asid \ asid_wf asid \ vref \ user_region" + +definition wellformed_acap' :: "arch_capability \ bool" where + "wellformed_acap' ac \ + case ac of + ASIDPoolCap r asid \ is_aligned asid asid_low_bits \ asid_wf asid + | FrameCap r rghts sz dev mapdata \ + case_option True wellformed_mapdata' mapdata \ + case_option True (swp vmsz_aligned sz \ snd) mapdata + | PageTableCap pt_t r (Some mapdata) \ wellformed_mapdata' mapdata + | _ \ True" + +lemmas wellformed_acap'_simps[simp] = wellformed_acap'_def[split_simps arch_capability.split] + +definition frame_at' :: "obj_ref \ vmpage_size \ bool \ kernel_state \ bool" where + "frame_at' r sz dev s \ + \p < 2 ^ (pageBitsForSize sz - pageBits). + typ_at' (if dev then UserDataDeviceT else UserDataT) (r + (p << pageBits)) s" + +definition valid_arch_cap_ref' :: "arch_capability \ kernel_state \ bool" where + "valid_arch_cap_ref' ac s \ case ac of + ASIDPoolCap r as \ typ_at' (ArchT ASIDPoolT) r s + | ASIDControlCap \ True + | FrameCap r rghts sz dev mapdata \ frame_at' r sz dev s + | PageTableCap r pt_t mapdata \ page_table_at' pt_t r s + | VCPUCap r \ vcpu_at' r s" + +lemmas valid_arch_cap_ref'_simps[simp] = + valid_arch_cap_ref'_def[split_simps arch_capability.split] + +definition valid_arch_cap' :: "arch_capability \ kernel_state \ bool" where + "valid_arch_cap' cap \ \s. wellformed_acap' cap \ valid_arch_cap_ref' cap s" + +lemmas valid_arch_cap'_simps[simp] = + valid_arch_cap'_def[unfolded wellformed_acap'_def valid_arch_cap_ref'_def, + split_simps arch_capability.split, simplified] + +definition arch_cap'_fun_lift :: "(arch_capability \ 'a) \ 'a \ capability \ 'a" where + "arch_cap'_fun_lift P F c \ case c of ArchObjectCap ac \ P ac | _ \ F" + +lemmas arch_cap'_fun_lift_simps[simp] = arch_cap'_fun_lift_def[split_simps capability.split] + +definition valid_acap' :: "capability \ kernel_state \ bool" where + "valid_acap' \ arch_cap'_fun_lift valid_arch_cap' \" + +definition + valid_cap' :: "capability \ kernel_state \ bool" +where valid_cap'_def: + "valid_cap' c s \ capAligned c \ + (case c of + NullCap \ True + | DomainCap \ True + | UntypedCap d r n f \ + valid_untyped' d r n f s \ r \ 0 \ minUntypedSizeBits \ n \ n \ maxUntypedSizeBits + \ f \ 2^n \ is_aligned (of_nat f :: machine_word) minUntypedSizeBits + | EndpointCap r badge x y z t \ ep_at' r s + | NotificationCap r badge x y \ ntfn_at' r s + | CNodeCap r bits guard guard_sz \ + bits \ 0 \ bits + guard_sz \ word_bits \ guard && mask guard_sz = guard \ + (\addr. real_cte_at' (r + 2^cteSizeBits * (addr && mask bits)) s) + | ThreadCap r \ tcb_at' r s + | ReplyCap r m x \ tcb_at' r s + | IRQControlCap \ True + | IRQHandlerCap irq \ irq \ maxIRQ + | Zombie r b n \ n \ zombieCTEs b \ zBits b < word_bits + \ (case b of ZombieTCB \ tcb_at' r s | ZombieCNode n \ n \ 0 + \ (\addr. real_cte_at' (r + 2^cteSizeBits * (addr && mask n)) s)) + | ArchObjectCap ac \ valid_arch_cap' ac s)" + +(* Use abbreviation, not syntax, so that it can be input-only *) +abbreviation (input) valid_cap'_syn :: + "kernel_state \ capability \ bool" ("_ \'' _" [60, 60] 61) where + "s \' c \ valid_cap' c s" + +definition valid_cte' :: "cte \ kernel_state \ bool" where + "valid_cte' cte s \ s \' (cteCap cte)" + +definition valid_tcb_state' :: "thread_state \ kernel_state \ bool" where + "valid_tcb_state' ts s \ case ts of + BlockedOnReceive ref a \ ep_at' ref s + | BlockedOnSend ref a b d c \ ep_at' ref s + | BlockedOnNotification ref \ ntfn_at' ref s + | _ \ True" + +definition valid_ipc_buffer_ptr' :: "machine_word \ kernel_state \ bool" where + "valid_ipc_buffer_ptr' a s \ + is_aligned a msg_align_bits \ typ_at' UserDataT (a && ~~ mask pageBits) s" + +definition valid_bound_ntfn' :: "machine_word option \ kernel_state \ bool" where + "valid_bound_ntfn' ntfn_opt s \ + case ntfn_opt of None \ True | Some a \ ntfn_at' a s" + +definition is_device_frame_cap' :: "capability \ bool" where + "is_device_frame_cap' cap \ case cap of ArchObjectCap (FrameCap _ _ _ dev _) \ dev | _ \ False" + +definition valid_arch_tcb' :: "Structures_H.arch_tcb \ kernel_state \ bool" where + "valid_arch_tcb' \ \t s. \v. atcbVCPUPtr t = Some v \ vcpu_at' v s " + +definition valid_tcb' :: "tcb \ kernel_state \ bool" where + "valid_tcb' t s \ (\(getF, setF) \ ran tcb_cte_cases. s \' cteCap (getF t)) + \ valid_tcb_state' (tcbState t) s + \ is_aligned (tcbIPCBuffer t) msg_align_bits + \ valid_bound_ntfn' (tcbBoundNotification t) s + \ tcbDomain t \ maxDomain + \ tcbPriority t \ maxPriority + \ tcbMCP t \ maxPriority + \ valid_arch_tcb' (tcbArch t) s" + +definition valid_ep' :: "Structures_H.endpoint \ kernel_state \ bool" where + "valid_ep' ep s \ case ep of + IdleEP \ True + | SendEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts) + | RecvEP ts \ (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts)" + + +definition valid_bound_tcb' :: "machine_word option \ kernel_state \ bool" where + "valid_bound_tcb' tcb_opt s \ case tcb_opt of None \ True | Some t \ tcb_at' t s" + +definition valid_ntfn' :: "Structures_H.notification \ kernel_state \ bool" where + "valid_ntfn' ntfn s \ (case ntfnObj ntfn of + IdleNtfn \ True + | WaitingNtfn ts \ + (ts \ [] \ (\t \ set ts. tcb_at' t s) \ distinct ts + \ (case ntfnBoundTCB ntfn of Some tcb \ ts = [tcb] | _ \ True)) + | ActiveNtfn b \ True) + \ valid_bound_tcb' (ntfnBoundTCB ntfn) s" + +definition valid_mapping' :: "machine_word \ vmpage_size \ kernel_state \ bool" where + "valid_mapping' x sz s \ is_aligned x (pageBitsForSize sz) \ ptrFromPAddr x \ 0" + +(* KOArch validity can be lifted from AInvs, since all cases only consist of typ_at' predicates. *) +definition + valid_obj' :: "Structures_H.kernel_object \ kernel_state \ bool" +where + "valid_obj' ko s \ case ko of + KOEndpoint endpoint \ valid_ep' endpoint s + | KONotification notification \ valid_ntfn' notification s + | KOKernelData \ False + | KOUserData \ True + | KOUserDataDevice \ True + | KOTCB tcb \ valid_tcb' tcb s + | KOCTE cte \ valid_cte' cte s + | KOArch arch_kernel_object \ True" + +definition + pspace_aligned' :: "kernel_state \ bool" +where + "pspace_aligned' s \ + \x \ dom (ksPSpace s). is_aligned x (objBitsKO (the (ksPSpace s x)))" + +definition + pspace_distinct' :: "kernel_state \ bool" +where + "pspace_distinct' s \ + \x \ dom (ksPSpace s). ps_clear x (objBitsKO (the (ksPSpace s x))) s" + +definition + valid_objs' :: "kernel_state \ bool" +where + "valid_objs' s \ \obj \ ran (ksPSpace s). valid_obj' obj s" + + +type_synonym cte_heap = "machine_word \ cte option" + +definition + map_to_ctes :: "(machine_word \ kernel_object) \ cte_heap" +where + "map_to_ctes m \ \x. + let cte_bits = objBitsKO (KOCTE undefined); + tcb_bits = objBitsKO (KOTCB undefined); + y = (x && (~~ mask tcb_bits)) + in + if \cte. m x = Some (KOCTE cte) \ is_aligned x cte_bits + \ {x + 1 .. x + (1 << cte_bits) - 1} \ dom m = {} + then case m x of Some (KOCTE cte) \ Some cte + else if \tcb. m y = Some (KOTCB tcb) + \ {y + 1 .. y + (1 << tcb_bits) - 1} \ dom m = {} + then case m y of Some (KOTCB tcb) \ + option_map (\(getF, setF). getF tcb) (tcb_cte_cases (x - y)) + else None" + +abbreviation + "ctes_of s \ map_to_ctes (ksPSpace s)" + +definition + mdb_next :: "cte_heap \ machine_word \ machine_word option" +where + "mdb_next s c \ option_map (mdbNext o cteMDBNode) (s c)" + +definition + mdb_next_rel :: "cte_heap \ (machine_word \ machine_word) set" +where + "mdb_next_rel m \ {(x, y). mdb_next m x = Some y}" + +abbreviation + mdb_next_direct :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) +where + "m \ a \ b \ (a, b) \ mdb_next_rel m" + +abbreviation + mdb_next_trans :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \\<^sup>+ _" [60,0,60] 61) +where + "m \ a \\<^sup>+ b \ (a,b) \ (mdb_next_rel m)\<^sup>+" + +abbreviation + mdb_next_rtrans :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \\<^sup>* _" [60,0,60] 61) +where + "m \ a \\<^sup>* b \ (a,b) \ (mdb_next_rel m)\<^sup>*" + +definition + "valid_badges m \ + \p p' cap node cap' node'. + m p = Some (CTE cap node) \ + m p' = Some (CTE cap' node') \ + (m \ p \ p') \ + (sameRegionAs cap cap') \ + (isEndpointCap cap \ + capEPBadge cap \ capEPBadge cap' \ + capEPBadge cap' \ 0 \ + mdbFirstBadged node') + \ + (isNotificationCap cap \ + capNtfnBadge cap \ capNtfnBadge cap' \ + capNtfnBadge cap' \ 0 \ + mdbFirstBadged node')" + +fun (sequential) + untypedRange :: "capability \ machine_word set" +where + "untypedRange (UntypedCap d p n f) = {p .. p + 2 ^ n - 1}" +| "untypedRange c = {}" + +primrec + acapClass :: "arch_capability \ capclass" +where + "acapClass (ASIDPoolCap _ _) = PhysicalClass" +| "acapClass ASIDControlCap = ASIDMasterClass" +| "acapClass (FrameCap _ _ _ _ _) = PhysicalClass" +| "acapClass (PageTableCap _ _ _) = PhysicalClass" +| "acapClass (VCPUCap _) = PhysicalClass" + +primrec + capClass :: "capability \ capclass" +where + "capClass (NullCap) = NullClass" +| "capClass (DomainCap) = DomainClass" +| "capClass (UntypedCap d p n f) = PhysicalClass" +| "capClass (EndpointCap ref badge s r g gr) = PhysicalClass" +| "capClass (NotificationCap ref badge s r) = PhysicalClass" +| "capClass (CNodeCap ref bits g gs) = PhysicalClass" +| "capClass (ThreadCap ref) = PhysicalClass" +| "capClass (Zombie r b n) = PhysicalClass" +| "capClass (IRQControlCap) = IRQClass" +| "capClass (IRQHandlerCap irq) = IRQClass" +| "capClass (ReplyCap tcb m g) = ReplyClass tcb" +| "capClass (ArchObjectCap cap) = acapClass cap" + +definition + "capRange cap \ + if capClass cap \ PhysicalClass then {} + else {capUntypedPtr cap .. capUntypedPtr cap + 2 ^ capBits cap - 1}" + +definition + "caps_contained' m \ + \p p' c n c' n'. + m p = Some (CTE c n) \ + m p' = Some (CTE c' n') \ + \isUntypedCap c' \ + capRange c' \ untypedRange c \ {} \ + capRange c' \ untypedRange c" + +definition + valid_dlist :: "cte_heap \ bool" +where + "valid_dlist m \ + \p cte. m p = Some cte \ + (let prev = mdbPrev (cteMDBNode cte); + next = mdbNext (cteMDBNode cte) + in (prev \ 0 \ (\cte'. m prev = Some cte' \ mdbNext (cteMDBNode cte') = p)) \ + (next \ 0 \ (\cte'. m next = Some cte' \ mdbPrev (cteMDBNode cte') = p)))" + +definition + "no_0 m \ m 0 = None" +definition + "no_loops m \ \c. \ m \ c \\<^sup>+ c" +definition + "mdb_chain_0 m \ \x \ dom m. m \ x \\<^sup>+ 0" + +definition + "class_links m \ \p p' cte cte'. + m p = Some cte \ + m p' = Some cte' \ + m \ p \ p' \ + capClass (cteCap cte) = capClass (cteCap cte')" + +definition + "is_chunk m cap p p' \ + \p''. m \ p \\<^sup>+ p'' \ m \ p'' \\<^sup>* p' \ + (\cap'' n''. m p'' = Some (CTE cap'' n'') \ sameRegionAs cap cap'')" + +definition + "mdb_chunked m \ \p p' cap cap' n n'. + m p = Some (CTE cap n) \ + m p' = Some (CTE cap' n') \ + sameRegionAs cap cap' \ + p \ p' \ + (m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m cap p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m cap' p' p)" + +definition + parentOf :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ parentOf _" [60,0,60] 61) +where + "s \ c' parentOf c \ + \cte' cte. s c = Some cte \ s c' = Some cte' \ isMDBParentOf cte' cte" + + +context +notes [[inductive_internals =true]] +begin + +inductive + subtree :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) + for s :: cte_heap and c :: machine_word +where + direct_parent: + "\ s \ c \ c'; c' \ 0; s \ c parentOf c'\ \ s \ c \ c'" + | + trans_parent: + "\ s \ c \ c'; s \ c' \ c''; c'' \ 0; s \ c parentOf c'' \ \ s \ c \ c''" + +end + +definition + "descendants_of' c s \ {c'. s \ c \ c'}" + + +definition + "untyped_mdb' m \ + \p p' c n c' n'. + m p = Some (CTE c n) \ isUntypedCap c \ + m p' = Some (CTE c' n') \ \ isUntypedCap c' \ + capRange c' \ untypedRange c \ {} \ + p' \ descendants_of' p m" + +definition + "untyped_inc' m \ + \p p' c c' n n'. + m p = Some (CTE c n) \ isUntypedCap c \ + m p' = Some (CTE c' n') \ isUntypedCap c' \ + (untypedRange c \ untypedRange c' \ + untypedRange c' \ untypedRange c \ + untypedRange c \ untypedRange c' = {}) \ + (untypedRange c \ untypedRange c' \ (p \ descendants_of' p' m \ untypedRange c \ usableUntypedRange c' ={})) \ + (untypedRange c' \ untypedRange c \ (p' \ descendants_of' p m \ untypedRange c' \ usableUntypedRange c ={})) \ + (untypedRange c = untypedRange c' \ (p' \ descendants_of' p m \ usableUntypedRange c={} + \ p \ descendants_of' p' m \ usableUntypedRange c' = {} \ p = p'))" + +definition + "valid_nullcaps m \ \p n. m p = Some (CTE NullCap n) \ n = nullMDBNode" + +definition + "ut_revocable' m \ \p cap n. m p = Some (CTE cap n) \ isUntypedCap cap \ mdbRevocable n" + +definition + "irq_control m \ + \p n. m p = Some (CTE IRQControlCap n) \ + mdbRevocable n \ + (\p' n'. m p' = Some (CTE IRQControlCap n') \ p' = p)" + +definition + isArchFrameCap :: "capability \ bool" +where + "isArchFrameCap cap \ case cap of ArchObjectCap (FrameCap _ _ _ _ _) \ True | _ \ False" + +definition + distinct_zombie_caps :: "(machine_word \ capability option) \ bool" +where + "distinct_zombie_caps caps \ \ptr ptr' cap cap'. caps ptr = Some cap + \ caps ptr' = Some cap' \ ptr \ ptr' \ isZombie cap + \ capClass cap' = PhysicalClass \ \ isUntypedCap cap' \ \ isArchFrameCap cap' + \ capBits cap = capBits cap' \ capUntypedPtr cap \ capUntypedPtr cap'" + +definition + distinct_zombies :: "cte_heap \ bool" +where + "distinct_zombies m \ distinct_zombie_caps (option_map cteCap \ m)" + +definition + reply_masters_rvk_fb :: "cte_heap \ bool" +where + "reply_masters_rvk_fb ctes \ \cte \ ran ctes. + isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte) + \ mdbRevocable (cteMDBNode cte) \ mdbFirstBadged (cteMDBNode cte)" + +definition + valid_mdb_ctes :: "cte_heap \ bool" +where + "valid_mdb_ctes \ \m. valid_dlist m \ no_0 m \ mdb_chain_0 m \ + valid_badges m \ caps_contained' m \ + mdb_chunked m \ untyped_mdb' m \ + untyped_inc' m \ valid_nullcaps m \ + ut_revocable' m \ class_links m \ distinct_zombies m + \ irq_control m \ reply_masters_rvk_fb m" + +definition + valid_mdb' :: "kernel_state \ bool" +where + "valid_mdb' \ \s. valid_mdb_ctes (ctes_of s)" + +definition + "no_0_obj' \ \s. ksPSpace s 0 = None" + +definition + valid_pspace' :: "kernel_state \ bool" +where + "valid_pspace' \ valid_objs' and + pspace_aligned' and + pspace_distinct' and + no_0_obj' and + valid_mdb'" + +primrec + runnable' :: "Structures_H.thread_state \ bool" +where + "runnable' (Structures_H.Running) = True" +| "runnable' (Structures_H.Inactive) = False" +| "runnable' (Structures_H.Restart) = True" +| "runnable' (Structures_H.IdleThreadState) = False" +| "runnable' (Structures_H.BlockedOnReceive a b) = False" +| "runnable' (Structures_H.BlockedOnReply) = False" +| "runnable' (Structures_H.BlockedOnSend a b c d e) = False" +| "runnable' (Structures_H.BlockedOnNotification x) = False" + +definition + inQ :: "domain \ priority \ tcb \ bool" +where + "inQ d p tcb \ tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d" + +definition + (* for given domain and priority, the scheduler bitmap indicates a thread is in the queue *) + (* second level of the bitmap is stored in reverse for better cache locality in common case *) + bitmapQ :: "domain \ priority \ kernel_state \ bool" +where + "bitmapQ d p s \ ksReadyQueuesL1Bitmap s d !! prioToL1Index p + \ ksReadyQueuesL2Bitmap s (d, invertL1Index (prioToL1Index p)) + !! unat (p && mask wordRadix)" + +definition + valid_queues_no_bitmap :: "kernel_state \ bool" +where + "valid_queues_no_bitmap \ \s. + (\d p. (\t \ set (ksReadyQueues s (d, p)). obj_at' (inQ d p and runnable' \ tcbState) t s) + \ distinct (ksReadyQueues s (d, p)) + \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" + +definition + (* A priority is used as a two-part key into the bitmap structure. If an L2 bitmap entry + is set without an L1 entry, updating the L1 entry (shared by many priorities) may make + unexpected threads schedulable *) + bitmapQ_no_L2_orphans :: "kernel_state \ bool" +where + "bitmapQ_no_L2_orphans \ \s. + \d i j. ksReadyQueuesL2Bitmap s (d, invertL1Index i) !! j \ i < l2BitmapSize + \ (ksReadyQueuesL1Bitmap s d !! i)" + +definition + (* If the scheduler finds a set bit in L1 of the bitmap, it must find some bit set in L2 + when it looks there. This lets it omit a check. + L2 entries have wordBits bits each. That means the L1 word only indexes + a small number of L2 entries, despite being stored in a wordBits word. + We allow only bits corresponding to L2 indices to be set. + *) + bitmapQ_no_L1_orphans :: "kernel_state \ bool" +where + "bitmapQ_no_L1_orphans \ \s. + \d i. ksReadyQueuesL1Bitmap s d !! i \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ + i < l2BitmapSize" + +definition + valid_bitmapQ :: "kernel_state \ bool" +where + "valid_bitmapQ \ \s. (\d p. bitmapQ d p s \ ksReadyQueues s (d,p) \ [])" + +definition + valid_queues :: "kernel_state \ bool" +where + "valid_queues \ \s. valid_queues_no_bitmap s \ valid_bitmapQ s \ + bitmapQ_no_L2_orphans s \ bitmapQ_no_L1_orphans s" + +definition + (* when a thread gets added to / removed from a queue, but before bitmap updated *) + valid_bitmapQ_except :: "domain \ priority \ kernel_state \ bool" +where + "valid_bitmapQ_except d' p' \ \s. + (\d p. (d \ d' \ p \ p') \ (bitmapQ d p s \ ksReadyQueues s (d,p) \ []))" + +lemmas bitmapQ_defs = valid_bitmapQ_def valid_bitmapQ_except_def bitmapQ_def + bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def + +definition + valid_queues' :: "kernel_state \ bool" +where + "valid_queues' \ \s. \d p t. obj_at' (inQ d p) t s \ t \ set (ksReadyQueues s (d, p))" + +definition tcb_in_cur_domain' :: "machine_word \ kernel_state \ bool" where + "tcb_in_cur_domain' t \ \s. obj_at' (\tcb. ksCurDomain s = tcbDomain tcb) t s" + +definition + ct_idle_or_in_cur_domain' :: "kernel_state \ bool" where + "ct_idle_or_in_cur_domain' \ \s. ksSchedulerAction s = ResumeCurrentThread \ + ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s" + +definition + "ct_in_state' test \ \s. st_tcb_at' test (ksCurThread s) s" + +definition + "ct_not_inQ \ \s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s" + +abbreviation + "idle' \ \st. st = Structures_H.IdleThreadState" + +abbreviation + "activatable' st \ runnable' st \ idle' st" + +primrec + sch_act_wf :: "scheduler_action \ kernel_state \ bool" +where + "sch_act_wf ResumeCurrentThread = ct_in_state' activatable'" +| "sch_act_wf ChooseNewThread = \" +| "sch_act_wf (SwitchToThread t) = (\s. st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" + +definition sch_act_simple :: "kernel_state \ bool" where + "sch_act_simple \ \s. (ksSchedulerAction s = ResumeCurrentThread) \ + (ksSchedulerAction s = ChooseNewThread)" + +definition sch_act_sane :: "kernel_state \ bool" where + "sch_act_sane \ \s. \t. ksSchedulerAction s = SwitchToThread t \ t \ ksCurThread s" + +abbreviation + "sch_act_not t \ \s. ksSchedulerAction s \ SwitchToThread t" + +definition idle_tcb'_2 :: "Structures_H.thread_state \ machine_word option \ bool" where + "idle_tcb'_2 \ \(st, ntfn_opt). (idle' st \ ntfn_opt = None)" + +abbreviation + "idle_tcb' tcb \ idle_tcb'_2 (tcbState tcb, tcbBoundNotification tcb)" + +lemmas idle_tcb'_def = idle_tcb'_2_def + +definition valid_idle' :: "kernel_state \ bool" where + "valid_idle' \ \s. obj_at' idle_tcb' (ksIdleThread s) s \ idle_thread_ptr = ksIdleThread s" + +lemma valid_idle'_tcb_at': + "valid_idle' s \ obj_at' idle_tcb' (ksIdleThread s) s" + by (clarsimp simp: valid_idle'_def) + +definition valid_irq_node' :: "machine_word \ kernel_state \ bool" where + "valid_irq_node' x \ + \s. is_aligned x (size (0::irq) + cteSizeBits) \ + (\irq :: irq. real_cte_at' (x + (ucast irq << cteSizeBits)) s)" + +definition valid_refs' :: "machine_word set \ cte_heap \ bool" where + "valid_refs' R \ \m. \c \ ran m. R \ capRange (cteCap c) = {}" + +(* Addresses of all PTEs in a VSRoot table at p *) +definition table_refs' :: "machine_word \ machine_word set" where + "table_refs' p \ (\i. p + (i << pte_bits)) ` mask_range 0 (ptTranslationBits VSRootPT_T)" + +definition global_refs' :: "kernel_state \ obj_ref set" where + "global_refs' \ \s. + {ksIdleThread s} \ + table_refs' (armKSGlobalUserVSpace (ksArchState s)) \ + range (\irq :: irq. irq_node' s + (ucast irq << cteSizeBits))" + +definition valid_cap_sizes' :: "nat \ cte_heap \ bool" where + "valid_cap_sizes' n hp \ \cte \ ran hp. 2 ^ capBits (cteCap cte) \ n" + +definition valid_global_refs' :: "kernel_state \ bool" where + "valid_global_refs' \ \s. + valid_refs' kernel_data_refs (ctes_of s) + \ global_refs' s \ kernel_data_refs + \ valid_cap_sizes' (gsMaxObjectSize s) (ctes_of s)" + +definition pspace_domain_valid :: "kernel_state \ bool" where + "pspace_domain_valid \ \s. + \x ko. ksPSpace s x = Some ko \ mask_range x (objBitsKO ko) \ kernel_data_refs = {}" + +definition valid_asid_table' :: "(asid \ machine_word) \ bool" where + "valid_asid_table' table \ dom table \ mask_range 0 asid_high_bits \ 0 \ ran table" + +definition "is_vcpu' \ \ko. \vcpu. ko = (KOArch (KOVCPU vcpu))" + +definition max_armKSGICVCPUNumListRegs :: nat where + "max_armKSGICVCPUNumListRegs \ 63" + +definition valid_arch_state' :: "kernel_state \ bool" where + "valid_arch_state' \ \s. + valid_asid_table' (armKSASIDTable (ksArchState s)) \ + (case armHSCurVCPU (ksArchState s) of + Some (v, b) \ ko_wp_at' (is_vcpu' and hyp_live') v s + | _ \ True) \ + armKSGICVCPUNumListRegs (ksArchState s) \ max_armKSGICVCPUNumListRegs" + +definition irq_issued' :: "irq \ kernel_state \ bool" where + "irq_issued' irq \ \s. intStateIRQTable (ksInterruptState s) irq = IRQSignal" + +definition cteCaps_of :: "kernel_state \ machine_word \ capability option" where + "cteCaps_of s \ option_map cteCap \ ctes_of s" + +definition valid_irq_handlers' :: "kernel_state \ bool" where + "valid_irq_handlers' \ \s. \cap \ ran (cteCaps_of s). \irq. + cap = IRQHandlerCap irq \ irq_issued' irq s" + +definition + "irqs_masked' \ \s. \irq > maxIRQ. intStateIRQTable (ksInterruptState s) irq = IRQInactive" + +definition + "valid_irq_masks' table masked \ \irq. table irq = IRQInactive \ masked irq" + +abbreviation + "valid_irq_states' s \ + valid_irq_masks' (intStateIRQTable (ksInterruptState s)) (irq_masks (ksMachineState s))" + +defs pointerInUserData_def: + "pointerInUserData p \ typ_at' UserDataT (p && ~~ mask pageBits)" + +(* pointerInDeviceData is not defined in spec but is necessary for valid_machine_state' *) +definition pointerInDeviceData :: "machine_word \ kernel_state \ bool" where + "pointerInDeviceData p \ typ_at' UserDataDeviceT (p && ~~ mask pageBits)" + +definition + "valid_machine_state' \ + \s. \p. pointerInUserData p s \ pointerInDeviceData p s \ underlying_memory (ksMachineState s) p = 0" + +definition + "untyped_ranges_zero_inv cps urs \ urs = ran (untypedZeroRange \\<^sub>m cps)" + +abbreviation + "untyped_ranges_zero' s \ untyped_ranges_zero_inv (cteCaps_of s) (gsUntypedZeroRanges s)" + +(* FIXME: this really should be a definition like the above. *) +(* The schedule is invariant. *) +abbreviation + "valid_dom_schedule' \ + \s. ksDomSchedule s \ [] \ (\x\set (ksDomSchedule s). dschDomain x \ maxDomain \ 0 < dschLength x) + \ ksDomSchedule s = ksDomSchedule (newKernelState undefined) + \ ksDomScheduleIdx s < length (ksDomSchedule (newKernelState undefined))" + +definition valid_state' :: "kernel_state \ bool" where + "valid_state' \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ valid_queues s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s + \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s + \ valid_irq_handlers' s + \ valid_irq_states' s + \ valid_machine_state' s + \ irqs_masked' s + \ valid_queues' s + \ ct_not_inQ s + \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s + \ untyped_ranges_zero' s" + +definition + "cur_tcb' s \ tcb_at' (ksCurThread s) s" + +definition + invs' :: "kernel_state \ bool" where + "invs' \ valid_state' and cur_tcb'" + + +subsection "Derived concepts" + +abbreviation + "awaiting_reply' ts \ ts = Structures_H.BlockedOnReply" + + (* x-symbol doesn't have a reverse leadsto.. *) +definition + mdb_prev :: "cte_heap \ machine_word \ machine_word \ bool" ("_ \ _ \ _" [60,0,60] 61) +where + "m \ c \ c' \ (\z. m c' = Some z \ c = mdbPrev (cteMDBNode z))" + +definition + makeObjectT :: "kernel_object_type \ kernel_object" + where + "makeObjectT tp \ case tp of + EndpointT \ injectKO (makeObject :: endpoint) + | NotificationT \ injectKO (makeObject :: Structures_H.notification) + | CTET \ injectKO (makeObject :: cte) + | TCBT \ injectKO (makeObject :: tcb) + | UserDataT \ injectKO (makeObject :: user_data) + | UserDataDeviceT \ injectKO (makeObject :: user_data_device) + | KernelDataT \ KOKernelData + | ArchT atp \ (case atp of + PTET \ injectKO (makeObject :: pte) + | ASIDPoolT \ injectKO (makeObject :: asidpool) + | VCPUT \ injectKO (makeObject :: vcpu))" + +definition + objBitsT :: "kernel_object_type \ nat" + where + "objBitsT tp \ objBitsKO (makeObjectT tp)" + + +abbreviation + "active' st \ st = Structures_H.Running \ st = Structures_H.Restart" + +abbreviation + "simple' st \ st = Structures_H.Inactive \ + st = Structures_H.Running \ + st = Structures_H.Restart \ + idle' st \ awaiting_reply' st" + +abbreviation + "ct_active' \ ct_in_state' active'" + +abbreviation + "ct_running' \ ct_in_state' (\st. st = Structures_H.Running)" + +abbreviation(input) + "all_invs_but_sym_refs_ct_not_inQ' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ valid_queues s \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s + \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +abbreviation(input) + "all_invs_but_ct_not_inQ' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ valid_queues s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s + \ cur_tcb' s \ valid_queues' s \ ct_idle_or_in_cur_domain' s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +lemma all_invs_but_sym_refs_not_ct_inQ_check': + "(all_invs_but_sym_refs_ct_not_inQ' and sym_refs \ state_refs_of' and sym_refs \ state_hyp_refs_of' and ct_not_inQ) = invs'" + by (simp add: pred_conj_def conj_commute conj_left_commute invs'_def valid_state'_def) + +lemma all_invs_but_not_ct_inQ_check': + "(all_invs_but_ct_not_inQ' and ct_not_inQ) = invs'" + by (simp add: pred_conj_def conj_commute conj_left_commute invs'_def valid_state'_def) + +definition + "all_invs_but_ct_idle_or_in_cur_domain' + \ \s. valid_pspace' s \ sch_act_wf (ksSchedulerAction s) s + \ valid_queues s \ sym_refs (state_refs_of' s) \ sym_refs (state_hyp_refs_of' s) + \ if_live_then_nonz_cap' s \ if_unsafe_then_cap' s + \ valid_idle' s \ valid_global_refs' s \ valid_arch_state' s + \ valid_irq_node' (irq_node' s) s \ valid_irq_handlers' s + \ valid_irq_states' s \ irqs_masked' s \ valid_machine_state' s + \ cur_tcb' s \ valid_queues' s \ ct_not_inQ s + \ pspace_domain_valid s + \ ksCurDomain s \ maxDomain + \ valid_dom_schedule' s \ untyped_ranges_zero' s" + +lemmas invs_no_cicd'_def = all_invs_but_ct_idle_or_in_cur_domain'_def + +lemma all_invs_but_ct_idle_or_in_cur_domain_check': + "(all_invs_but_ct_idle_or_in_cur_domain' and ct_idle_or_in_cur_domain') = invs'" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def pred_conj_def + conj_left_commute conj_commute invs'_def valid_state'_def) + +abbreviation (input) + "invs_no_cicd' \ all_invs_but_ct_idle_or_in_cur_domain'" + +lemma invs'_to_invs_no_cicd'_def: + "invs' = (all_invs_but_ct_idle_or_in_cur_domain' and ct_idle_or_in_cur_domain')" + by (fastforce simp: invs'_def all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def ) +end + +locale mdb_next = + fixes m :: cte_heap + + fixes greater_eq + defines "greater_eq a b \ m \ a \\<^sup>* b" + + fixes greater + defines "greater a b \ m \ a \\<^sup>+ b" + +locale mdb_order = mdb_next + + assumes no_0: "no_0 m" + assumes chain: "mdb_chain_0 m" + +\ \---------------------------------------------------------------------------\ +section "Alternate split rules for preserving subgoal order" +context begin interpretation Arch . (*FIXME: arch_split*) +lemma ntfn_splits[split]: + " P (case ntfn of Structures_H.ntfn.IdleNtfn \ f1 + | Structures_H.ntfn.ActiveNtfn x \ f2 x + | Structures_H.ntfn.WaitingNtfn x \ f3 x) = + ((ntfn = Structures_H.ntfn.IdleNtfn \ P f1) \ + (\x2. ntfn = Structures_H.ntfn.ActiveNtfn x2 \ + P (f2 x2)) \ + (\x3. ntfn = Structures_H.ntfn.WaitingNtfn x3 \ + P (f3 x3)))" + "P (case ntfn of Structures_H.ntfn.IdleNtfn \ f1 + | Structures_H.ntfn.ActiveNtfn x \ f2 x + | Structures_H.ntfn.WaitingNtfn x \ f3 x) = + (\ (ntfn = Structures_H.ntfn.IdleNtfn \ \ P f1 \ + (\x2. ntfn = Structures_H.ntfn.ActiveNtfn x2 \ + \ P (f2 x2)) \ + (\x3. ntfn = Structures_H.ntfn.WaitingNtfn x3 \ + \ P (f3 x3))))" + by (case_tac ntfn; simp)+ +\ \---------------------------------------------------------------------------\ + +section "Lemmas" + +schematic_goal wordBits_def': "wordBits = numeral ?n" (* arch-specific consequence *) + by (simp add: wordBits_def word_size) + +lemma valid_bound_ntfn'_None[simp]: + "valid_bound_ntfn' None = \" + by (auto simp: valid_bound_ntfn'_def) + +lemma valid_bound_ntfn'_Some[simp]: + "valid_bound_ntfn' (Some x) = ntfn_at' x" + by (auto simp: valid_bound_ntfn'_def) + +lemma valid_bound_tcb'_None[simp]: + "valid_bound_tcb' None = \" + by (auto simp: valid_bound_tcb'_def) + +lemma valid_bound_tcb'_Some[simp]: + "valid_bound_tcb' (Some x) = tcb_at' x" + by (auto simp: valid_bound_tcb'_def) + +lemma objBitsKO_Data: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBits_def objBitsKO_def word_size_def) + +lemmas objBits_defs = tcbBlockSizeBits_def epSizeBits_def ntfnSizeBits_def cteSizeBits_def vcpuBits_def +lemmas untypedBits_defs = minUntypedSizeBits_def maxUntypedSizeBits_def +lemmas objBits_simps = objBits_def objBitsKO_def word_size_def archObjSize_def +lemmas objBits_simps' = objBits_simps objBits_defs + +lemmas wordRadix_def' = wordRadix_def[simplified] + +lemma ps_clear_def2: + "p \ p + 1 \ ps_clear p n s = ({p + 1 .. p + (1 << n) - 1} \ dom (ksPSpace s) = {})" + apply (simp add: ps_clear_def mask_def add_diff_eq) + apply safe + apply (drule_tac a=x in equals0D) + apply clarsimp + apply (drule mp, simp) + apply (erule disjE) + apply simp + apply clarsimp + apply (drule_tac a=x in equals0D) + apply clarsimp + apply (case_tac "p + 1 \ x") + apply clarsimp + apply (simp add: linorder_not_le) + apply (drule plus_one_helper, simp) + done + +lemma projectKO_stateI: + "fst (projectKO e s) = {(obj, s)} \ fst (projectKO e s') = {(obj, s')}" + unfolding projectKO_def + by (auto simp: fail_def return_def valid_def split: option.splits) + +lemma singleton_in_magnitude_check: + "(x, s) \ fst (magnitudeCheck a b c s') \ \s'. fst (magnitudeCheck a b c s') = {(x, s')}" + by (simp add: magnitudeCheck_def when_def in_monad return_def + split: if_split_asm option.split_asm) + +lemma wordSizeCase_simp [simp]: "wordSizeCase a b = b" + by (simp add: wordSizeCase_def wordBits_def word_size) + +lemma projectKO_eq: + "(fst (projectKO ko c) = {(obj, c)}) = (projectKO_opt ko = Some obj)" + by (simp add: projectKO_def fail_def return_def split: option.splits) + +lemma obj_at'_def': + "obj_at' P p s = (\ko obj. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) + \ fst (projectKO ko s) = {(obj,s)} \ P obj + \ ps_clear p (objBitsKO ko) s)" + apply (simp add: obj_at'_real_def ko_wp_at'_def projectKO_eq + True_notin_set_replicate_conv objBits_def) + apply fastforce + done + +lemma obj_at'_def: + "obj_at' P p s \ \ko obj. ksPSpace s p = Some ko \ is_aligned p (objBitsKO ko) + \ fst (projectKO ko s) = {(obj,s)} \ P obj + \ ps_clear p (objBitsKO ko) s" + by (simp add: obj_at'_def') + +lemma obj_atE' [elim?]: + assumes objat: "obj_at' P ptr s" + and rl: "\ko obj. + \ ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + fst (projectKO ko s) = {(obj,s)}; P obj; + ps_clear ptr (objBitsKO ko) s \ \ R" + shows "R" + using objat unfolding obj_at'_def by (auto intro!: rl) + +lemma obj_atI' [intro?]: + "\ ksPSpace s ptr = Some ko; is_aligned ptr (objBitsKO ko); + fst (projectKO ko s) = {(obj, s)}; P obj; + ps_clear ptr (objBitsKO ko) s \ + \ obj_at' P ptr s" + unfolding obj_at'_def by (auto) + +lemma vcpu_at_is_vcpu': + "vcpu_at' v = ko_wp_at' is_vcpu' v" + apply (rule ext) + apply (clarsimp simp: typ_at'_def is_vcpu'_def ko_wp_at'_def) + apply (rule iffI; clarsimp?) + apply (case_tac ko; simp; rename_tac ako; case_tac ako; simp) + done + +lemma cte_at'_def: + "cte_at' p s \ \cte::cte. fst (getObject p s) = {(cte,s)}" + by (simp add: cte_wp_at'_def) + + +lemma tcb_cte_cases_simps[simp]: + "tcb_cte_cases 0 = Some (tcbCTable, tcbCTable_update)" + "tcb_cte_cases 32 = Some (tcbVTable, tcbVTable_update)" + "tcb_cte_cases 64 = Some (tcbReply, tcbReply_update)" + "tcb_cte_cases 96 = Some (tcbCaller, tcbCaller_update)" + "tcb_cte_cases 128 = Some (tcbIPCBufferFrame, tcbIPCBufferFrame_update)" + by (simp add: tcb_cte_cases_def cteSizeBits_def)+ + +lemma refs_of'_simps[simp]: + "refs_of' (KOTCB tcb) = tcb_st_refs_of' (tcbState tcb) \ tcb_bound_refs' (tcbBoundNotification tcb)" + "refs_of' (KOCTE cte) = {}" + "refs_of' (KOEndpoint ep) = ep_q_refs_of' ep" + "refs_of' (KONotification ntfn) = ntfn_q_refs_of' (ntfnObj ntfn) \ ntfn_bound_refs' (ntfnBoundTCB ntfn)" + "refs_of' (KOUserData) = {}" + "refs_of' (KOUserDataDevice) = {}" + "refs_of' (KOKernelData) = {}" + "refs_of' (KOArch ako) = {}" + by (auto simp: refs_of'_def) + +lemma tcb_st_refs_of'_simps[simp]: + "tcb_st_refs_of' (Running) = {}" + "tcb_st_refs_of' (Inactive) = {}" + "tcb_st_refs_of' (Restart) = {}" + "tcb_st_refs_of' (BlockedOnReceive x'' a') = {(x'', TCBBlockedRecv)}" + "tcb_st_refs_of' (BlockedOnSend x a b c d) = {(x, TCBBlockedSend)}" + "tcb_st_refs_of' (BlockedOnNotification x') = {(x', TCBSignal)}" + "tcb_st_refs_of' (BlockedOnReply) = {}" + "tcb_st_refs_of' (IdleThreadState) = {}" + by (auto simp: tcb_st_refs_of'_def) + +lemma ep_q_refs_of'_simps[simp]: + "ep_q_refs_of' IdleEP = {}" + "ep_q_refs_of' (RecvEP q) = set q \ {EPRecv}" + "ep_q_refs_of' (SendEP q) = set q \ {EPSend}" + by (auto simp: ep_q_refs_of'_def) + +lemma ntfn_q_refs_of'_simps[simp]: + "ntfn_q_refs_of' IdleNtfn = {}" + "ntfn_q_refs_of' (WaitingNtfn q) = set q \ {NTFNSignal}" + "ntfn_q_refs_of' (ActiveNtfn b) = {}" + by (auto simp: ntfn_q_refs_of'_def) + +lemma ntfn_bound_refs'_simps[simp]: + "ntfn_bound_refs' (Some t) = {(t, NTFNBound)}" + "ntfn_bound_refs' None = {}" + by (auto simp: ntfn_bound_refs'_def) + +lemma tcb_bound_refs'_simps[simp]: + "tcb_bound_refs' (Some a) = {(a, TCBBound)}" + "tcb_bound_refs' None = {}" + by (auto simp: tcb_bound_refs'_def) + +lemma refs_of_rev': + "(x, TCBBlockedRecv) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (\a. tcbState tcb = BlockedOnReceive x a))" + "(x, TCBBlockedSend) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (\a b c d. tcbState tcb = BlockedOnSend x a b c d))" + "(x, TCBSignal) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ tcbState tcb = BlockedOnNotification x)" + "(x, EPRecv) \ refs_of' ko = + (\ep. ko = KOEndpoint ep \ (\q. ep = RecvEP q \ x \ set q))" + "(x, EPSend) \ refs_of' ko = + (\ep. ko = KOEndpoint ep \ (\q. ep = SendEP q \ x \ set q))" + "(x, NTFNSignal) \ refs_of' ko = + (\ntfn. ko = KONotification ntfn \ (\q. ntfnObj ntfn = WaitingNtfn q \ x \ set q))" + "(x, TCBBound) \ refs_of' ko = + (\tcb. ko = KOTCB tcb \ (tcbBoundNotification tcb = Some x))" + "(x, NTFNBound) \ refs_of' ko = + (\ntfn. ko = KONotification ntfn \ (ntfnBoundTCB ntfn = Some x))" + by (auto simp: refs_of'_def + tcb_st_refs_of'_def + ep_q_refs_of'_def + ntfn_q_refs_of'_def + ntfn_bound_refs'_def + tcb_bound_refs'_def + split: Structures_H.kernel_object.splits + Structures_H.thread_state.splits + Structures_H.endpoint.splits + Structures_H.notification.splits + Structures_H.ntfn.splits)+ + +lemma tcb_hyp_refs_of'_simps[simp]: + "tcb_hyp_refs' atcb = tcb_vcpu_refs' (atcbVCPUPtr atcb)" + by (auto simp: tcb_hyp_refs'_def) + +lemma tcb_vcpu_refs_of'_simps[simp]: + "tcb_vcpu_refs' (Some vc) = {(vc, TCBHypRef)}" + "tcb_vcpu_refs' None = {}" + by (auto simp: tcb_vcpu_refs'_def) + +lemma vcpu_tcb_refs_of'_simps[simp]: + "vcpu_tcb_refs' (Some tcb) = {(tcb, HypTCBRef)}" + "vcpu_tcb_refs' None = {}" + by (auto simp: vcpu_tcb_refs'_def) + +lemma refs_of_a'_simps[simp]: + "refs_of_a' (KOASIDPool p) = {}" + "refs_of_a' (KOPTE pt) = {}" + "refs_of_a' (KOVCPU v) = vcpu_tcb_refs' (vcpuTCBPtr v)" + by (auto simp: refs_of_a'_def) + +lemma hyp_refs_of'_simps[simp]: + "hyp_refs_of' (KOCTE cte) = {}" + "hyp_refs_of' (KOTCB tcb) = tcb_hyp_refs' (tcbArch tcb)" + "hyp_refs_of' (KOEndpoint ep) = {}" + "hyp_refs_of' (KONotification ntfn) = {}" + "hyp_refs_of' (KOUserData) = {}" + "hyp_refs_of' (KOUserDataDevice) = {}" + "hyp_refs_of' (KOKernelData) = {}" + "hyp_refs_of' (KOArch ao) = refs_of_a' ao" + by (auto simp: hyp_refs_of'_def) + +lemma hyp_refs_of_rev': + "(x, TCBHypRef) \ hyp_refs_of' ko = + (\tcb. ko = KOTCB tcb \ (atcbVCPUPtr (tcbArch tcb) = Some x))" + "(x, HypTCBRef) \ hyp_refs_of' ko = + (\v. ko = KOArch (KOVCPU v) \ (vcpuTCBPtr v = Some x))" + by (auto simp: hyp_refs_of'_def tcb_hyp_refs'_def tcb_vcpu_refs'_def + vcpu_tcb_refs'_def refs_of_a'_def + split: kernel_object.splits arch_kernel_object.splits option.split) + +lemma ko_wp_at'_weakenE: + "\ ko_wp_at' P p s; \ko. P ko \ Q ko \ \ ko_wp_at' Q p s" + by (clarsimp simp: ko_wp_at'_def) + +lemma projectKO_opt_tcbD: + "projectKO_opt ko = Some (tcb :: tcb) \ ko = KOTCB tcb" + by (cases ko, simp_all add: projectKO_opt_tcb) + +lemma st_tcb_at_refs_of_rev': + "ko_wp_at' (\ko. (x, TCBBlockedRecv) \ refs_of' ko) t s + = st_tcb_at' (\ts. \a. ts = BlockedOnReceive x a) t s" + "ko_wp_at' (\ko. (x, TCBBlockedSend) \ refs_of' ko) t s + = st_tcb_at' (\ts. \a b c d. ts = BlockedOnSend x a b c d) t s" + "ko_wp_at' (\ko. (x, TCBSignal) \ refs_of' ko) t s + = st_tcb_at' (\ts. ts = BlockedOnNotification x) t s" + by (fastforce simp: refs_of_rev' pred_tcb_at'_def obj_at'_real_def + projectKO_opt_tcb[where e="KOTCB y" for y] + elim!: ko_wp_at'_weakenE + dest!: projectKO_opt_tcbD)+ + +lemma state_refs_of'_elemD: + "\ ref \ state_refs_of' s x \ \ ko_wp_at' (\obj. ref \ refs_of' obj) x s" + by (clarsimp simp add: state_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm) + +lemma obj_at_state_refs_ofD': + "obj_at' P p s \ \obj. P obj \ state_refs_of' s p = refs_of' (injectKO obj)" + apply (clarsimp simp: obj_at'_real_def project_inject ko_wp_at'_def conj_commute) + apply (rule exI, erule conjI) + apply (clarsimp simp: state_refs_of'_def) + done + +lemma ko_at_state_refs_ofD': + "ko_at' ko p s \ state_refs_of' s p = refs_of' (injectKO ko)" + by (clarsimp dest!: obj_at_state_refs_ofD') + +definition + tcb_ntfn_is_bound' :: "machine_word option \ tcb \ bool" +where + "tcb_ntfn_is_bound' ntfn tcb \ tcbBoundNotification tcb = ntfn" + +lemma st_tcb_at_state_refs_ofD': + "st_tcb_at' P t s \ \ts ntfnptr. P ts \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = (tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr)" + by (auto simp: pred_tcb_at'_def tcb_ntfn_is_bound'_def obj_at'_def projectKO_eq + project_inject state_refs_of'_def) + +lemma bound_tcb_at_state_refs_ofD': + "bound_tcb_at' P t s \ \ts ntfnptr. P ntfnptr \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = (tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr)" + by (auto simp: pred_tcb_at'_def obj_at'_def tcb_ntfn_is_bound'_def projectKO_eq + project_inject state_refs_of'_def) + +lemma sym_refs_obj_atD': + "\ obj_at' P p s; sym_refs (state_refs_of' s) \ \ + \obj. P obj \ state_refs_of' s p = refs_of' (injectKO obj) + \ (\(x, tp)\refs_of' (injectKO obj). ko_wp_at' (\ko. (p, symreftype tp) \ refs_of' ko) x s)" + apply (drule obj_at_state_refs_ofD') + apply (erule exEI, clarsimp) + apply (drule sym, simp) + apply (drule(1) sym_refsD) + apply (erule state_refs_of'_elemD) + done + +lemma sym_refs_ko_atD': + "\ ko_at' ko p s; sym_refs (state_refs_of' s) \ \ + state_refs_of' s p = refs_of' (injectKO ko) \ + (\(x, tp)\refs_of' (injectKO ko). ko_wp_at' (\ko. (p, symreftype tp) \ refs_of' ko) x s)" + by (drule(1) sym_refs_obj_atD', simp) + +lemma sym_refs_st_tcb_atD': + "\ st_tcb_at' P t s; sym_refs (state_refs_of' s) \ \ + \ts ntfnptr. P ts \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr + \ (\(x, tp)\tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr. ko_wp_at' (\ko. (t, symreftype tp) \ refs_of' ko) x s)" + apply (drule st_tcb_at_state_refs_ofD') + apply (erule exE)+ + apply (rule_tac x=ts in exI) + apply (rule_tac x=ntfnptr in exI) + apply clarsimp + apply (frule obj_at_state_refs_ofD') + apply (drule (1)sym_refs_obj_atD') + apply auto + done + +lemma sym_refs_bound_tcb_atD': + "\ bound_tcb_at' P t s; sym_refs (state_refs_of' s) \ \ + \ts ntfnptr. P ntfnptr \ obj_at' (tcb_ntfn_is_bound' ntfnptr) t s + \ state_refs_of' s t = tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr + \ (\(x, tp)\tcb_st_refs_of' ts \ tcb_bound_refs' ntfnptr. ko_wp_at' (\ko. (t, symreftype tp) \ refs_of' ko) x s)" + apply (drule bound_tcb_at_state_refs_ofD') + apply (erule exE)+ + apply (rule_tac x=ts in exI) + apply (rule_tac x=ntfnptr in exI) + apply clarsimp + apply (frule obj_at_state_refs_ofD') + apply (drule (1)sym_refs_obj_atD') + apply auto + done + +lemma state_hyp_refs_of'_elemD: + "\ ref \ state_hyp_refs_of' s x \ \ ko_wp_at' (\obj. ref \ hyp_refs_of' obj) x s" + by (clarsimp simp add: state_hyp_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm) + +lemma obj_at_state_hyp_refs_ofD': + "obj_at' P p s \ \ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko)" + apply (clarsimp simp: obj_at'_real_def project_inject ko_wp_at'_def conj_commute) + apply (rule exI, erule conjI) + apply (clarsimp simp: state_hyp_refs_of'_def) + done + +lemma ko_at_state_hyp_refs_ofD': + "ko_at' ko p s \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko)" + by (clarsimp dest!: obj_at_state_hyp_refs_ofD') + +lemma hyp_sym_refs_obj_atD': + "\ obj_at' P p s; sym_refs (state_hyp_refs_of' s) \ \ + \ko. P ko \ state_hyp_refs_of' s p = hyp_refs_of' (injectKO ko) \ + (\(x, tp)\hyp_refs_of' (injectKO ko). ko_wp_at' (\ko. (p, symreftype tp) \ hyp_refs_of' ko) x s)" + apply (drule obj_at_state_hyp_refs_ofD') + apply (erule exEI, clarsimp) + apply (drule sym, simp) + apply (drule(1) sym_refsD) + apply (erule state_hyp_refs_of'_elemD) + done + +lemma refs_of_live': + "refs_of' ko \ {} \ live' ko" + apply (cases ko, simp_all add: live'_def) + apply clarsimp + apply (rename_tac notification) + apply (case_tac "ntfnObj notification"; simp) + apply fastforce+ + done + +lemma hyp_refs_of_hyp_live': + "hyp_refs_of' ko \ {} \ hyp_live' ko" + apply (cases ko, simp_all) + apply (rename_tac tcb_ext) + apply (simp add: tcb_hyp_refs'_def hyp_live'_def) + apply (case_tac "atcbVCPUPtr (tcbArch tcb_ext)"; clarsimp) + apply (clarsimp simp: hyp_live'_def arch_live'_def refs_of_a'_def vcpu_tcb_refs'_def + split: arch_kernel_object.splits option.splits) + done + +lemma hyp_refs_of_live': + "hyp_refs_of' ko \ {} \ live' ko" + by (cases ko, simp_all add: live'_def hyp_refs_of_hyp_live') + +lemma if_live_then_nonz_capE': + "\ if_live_then_nonz_cap' s; ko_wp_at' live' p s \ + \ ex_nonz_cap_to' p s" + by (fastforce simp: if_live_then_nonz_cap'_def) + +lemma if_live_then_nonz_capD': + assumes x: "if_live_then_nonz_cap' s" "ko_wp_at' P p s" + assumes y: "\obj. \ P obj; ksPSpace s p = Some obj; is_aligned p (objBitsKO obj) \ \ live' obj" + shows "ex_nonz_cap_to' p s" using x + by (clarsimp elim!: if_live_then_nonz_capE' y + simp: ko_wp_at'_def) + +lemma if_live_state_refsE: + "\ if_live_then_nonz_cap' s; + state_refs_of' s p \ {} \ \ ex_nonz_cap_to' p s" + by (clarsimp simp: state_refs_of'_def ko_wp_at'_def + split: option.splits if_split_asm + elim!: refs_of_live' if_live_then_nonz_capE') + +lemmas ex_cte_cap_to'_def = ex_cte_cap_wp_to'_def + +lemma if_unsafe_then_capD': + "\ cte_wp_at' P p s; if_unsafe_then_cap' s; \cte. P cte \ cteCap cte \ NullCap \ + \ ex_cte_cap_to' p s" + unfolding if_unsafe_then_cap'_def + apply (erule allE, erule mp) + apply (clarsimp simp: cte_wp_at'_def) + done + +lemmas valid_cap_simps' = + valid_cap'_def[split_simps capability.split arch_capability.split] + +lemma max_ipc_words: + "max_ipc_words = 0x80" + unfolding max_ipc_words_def + by (simp add: msgMaxLength_def msgLengthBits_def msgMaxExtraCaps_def msgExtraCapBits_def capTransferDataSize_def) + +lemma valid_objsE' [elim]: + "\ valid_objs' s; ksPSpace s x = Some obj; valid_obj' obj s \ R \ \ R" + unfolding valid_objs'_def by auto + +lemma pspace_distinctD': + "\ ksPSpace s x = Some v; pspace_distinct' s \ \ ps_clear x (objBitsKO v) s" + apply (simp add: pspace_distinct'_def) + apply (drule bspec, erule domI) + apply simp + done + +lemma pspace_alignedD': + "\ ksPSpace s x = Some v; pspace_aligned' s \ \ is_aligned x (objBitsKO v)" + apply (simp add: pspace_aligned'_def) + apply (drule bspec, erule domI) + apply simp + done + +lemma next_unfold: + "mdb_next s c = + (case s c of Some cte \ Some (mdbNext (cteMDBNode cte)) | None \ None)" + by (simp add: mdb_next_def split: option.split) + +lemma is_physical_cases: + "(capClass cap = PhysicalClass) = + (case cap of NullCap \ False + | DomainCap \ False + | IRQControlCap \ False + | IRQHandlerCap irq \ False + | ReplyCap r m cr \ False + | ArchObjectCap ASIDControlCap \ False + | _ \ True)" + by (simp split: capability.splits arch_capability.splits zombie_type.splits) + +lemma sch_act_sane_not: + "sch_act_sane s = sch_act_not (ksCurThread s) s" + by (auto simp: sch_act_sane_def) + +lemma objBits_cte_conv: "objBits (cte :: cte) = cteSizeBits" + by (simp add: objBits_simps word_size) + +lemmas valid_irq_states'_def = valid_irq_masks'_def + +lemma valid_pspaceE' [elim]: + "\valid_pspace' s; + \ valid_objs' s; pspace_aligned' s; pspace_distinct' s; no_0_obj' s; + valid_mdb' s \ \ R \ \ R" + unfolding valid_pspace'_def by simp + +lemma idle'_no_refs: + "valid_idle' s \ state_refs_of' s (ksIdleThread s) = {}" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def tcb_ntfn_is_bound'_def + projectKO_eq project_inject state_refs_of'_def idle_tcb'_def) + +lemma idle'_not_queued': + "\valid_idle' s; sym_refs (state_refs_of' s); + state_refs_of' s ptr = insert t queue \ {rt}\\ + ksIdleThread s \ queue" + by (frule idle'_no_refs, fastforce simp: valid_idle'_def sym_refs_def) + +lemma idle'_not_queued: + "\valid_idle' s; sym_refs (state_refs_of' s); + state_refs_of' s ptr = queue \ {rt}\ \ + ksIdleThread s \ queue" + by (frule idle'_no_refs, fastforce simp: valid_idle'_def sym_refs_def) + + +lemma obj_at_conj': + "\ obj_at' P p s; obj_at' Q p s \ \ obj_at' (\k. P k \ Q k) p s" + by (auto simp: obj_at'_def) + +lemma pred_tcb_at_conj': + "\ pred_tcb_at' proj P t s; pred_tcb_at' proj Q t s \ \ pred_tcb_at' proj (\a. P a \ Q a) t s" + apply (simp add: pred_tcb_at'_def) + apply (erule (1) obj_at_conj') + done + +lemma obj_at_False' [simp]: + "obj_at' (\k. False) t s = False" + by (simp add: obj_at'_def) + +lemma pred_tcb_at_False' [simp]: + "pred_tcb_at' proj (\st. False) t s = False" + by (simp add: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_pspaceI: + "obj_at' t ref s \ ksPSpace s = ksPSpace s' \ obj_at' t ref s'" + by (auto intro!: projectKO_stateI simp: obj_at'_def ps_clear_def) + +lemma cte_wp_at'_pspaceI: + "\cte_wp_at' P p s; ksPSpace s = ksPSpace s'\ \ cte_wp_at' P p s'" + apply (clarsimp simp add: cte_wp_at'_def getObject_def) + apply (drule equalityD2) + apply (clarsimp simp: in_monad loadObject_cte gets_def + get_def bind_def return_def split_def) + apply (case_tac b) + apply (simp_all add: in_monad typeError_def) + prefer 2 + apply (simp add: in_monad return_def alignError_def assert_opt_def + alignCheck_def magnitudeCheck_def when_def bind_def + split: if_split_asm option.splits) + apply (clarsimp simp: in_monad return_def alignError_def fail_def assert_opt_def + alignCheck_def bind_def when_def + objBits_cte_conv tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def objBits_defs + split: if_split_asm cong: image_cong + dest!: singleton_in_magnitude_check) + done + +lemma valid_untyped'_pspaceI: + "\ksPSpace s = ksPSpace s'; valid_untyped' d p n idx s\ + \ valid_untyped' d p n idx s'" + by (simp add: valid_untyped'_def ko_wp_at'_def ps_clear_def) + +lemma typ_at'_pspaceI: + "typ_at' T p s \ ksPSpace s = ksPSpace s' \ typ_at' T p s'" + by (simp add: typ_at'_def ko_wp_at'_def ps_clear_def) + +lemma frame_at'_pspaceI: + "frame_at' p sz d s \ ksPSpace s = ksPSpace s' \ frame_at' p sz d s'" + by (simp add: frame_at'_def typ_at'_def ko_wp_at'_def ps_clear_def) + +lemma valid_cap'_pspaceI: + "s \' cap \ ksPSpace s = ksPSpace s' \ s' \' cap" + unfolding valid_cap'_def + by (cases cap) + (auto intro: obj_at'_pspaceI[rotated] + cte_wp_at'_pspaceI valid_untyped'_pspaceI + typ_at'_pspaceI[rotated] frame_at'_pspaceI[rotated] + simp: vspace_table_at'_defs valid_arch_cap'_def valid_arch_cap_ref'_def + split: arch_capability.split zombie_type.split option.splits) + +lemma valid_obj'_pspaceI: + "valid_obj' obj s \ ksPSpace s = ksPSpace s' \ valid_obj' obj s'" + unfolding valid_obj'_def + by (cases obj) + (auto simp: valid_ep'_def valid_ntfn'_def valid_tcb'_def valid_cte'_def + valid_tcb_state'_def valid_bound_tcb'_def + valid_bound_ntfn'_def valid_arch_tcb'_def + split: Structures_H.endpoint.splits Structures_H.notification.splits + Structures_H.thread_state.splits ntfn.splits option.splits + intro: obj_at'_pspaceI valid_cap'_pspaceI typ_at'_pspaceI) + +lemma pred_tcb_at'_pspaceI: + "pred_tcb_at' proj P t s \ ksPSpace s = ksPSpace s' \ pred_tcb_at' proj P t s'" + unfolding pred_tcb_at'_def by (fast intro: obj_at'_pspaceI) + +lemma valid_mdb'_pspaceI: + "valid_mdb' s \ ksPSpace s = ksPSpace s' \ valid_mdb' s'" + unfolding valid_mdb'_def by simp + +lemma state_refs_of'_pspaceI: + "P (state_refs_of' s) \ ksPSpace s = ksPSpace s' \ P (state_refs_of' s')" + unfolding state_refs_of'_def ps_clear_def by (simp cong: option.case_cong) + +lemma state_hyp_refs_of'_pspaceI: + "P (state_hyp_refs_of' s) \ ksPSpace s = ksPSpace s' \ P (state_hyp_refs_of' s')" + unfolding state_hyp_refs_of'_def ps_clear_def by (simp cong: option.case_cong) + +lemma valid_pspace': + "valid_pspace' s \ ksPSpace s = ksPSpace s' \ valid_pspace' s'" + by (auto simp add: valid_pspace'_def valid_objs'_def pspace_aligned'_def + pspace_distinct'_def ps_clear_def no_0_obj'_def ko_wp_at'_def + typ_at'_def + intro: valid_obj'_pspaceI valid_mdb'_pspaceI) + +lemma ex_cte_cap_to_pspaceI'[elim]: + "ex_cte_cap_to' p s \ ksPSpace s = ksPSpace s' \ + intStateIRQNode (ksInterruptState s) = intStateIRQNode (ksInterruptState s') + \ ex_cte_cap_to' p s'" + by (fastforce simp: ex_cte_cap_to'_def elim: cte_wp_at'_pspaceI) + +lemma valid_idle'_pspace_itI[elim]: + "\ valid_idle' s; ksPSpace s = ksPSpace s'; ksIdleThread s = ksIdleThread s' \ + \ valid_idle' s'" + apply (clarsimp simp: valid_idle'_def ex_nonz_cap_to'_def) + apply (erule obj_at'_pspaceI, assumption) + done + +lemma obj_at'_weaken: + assumes x: "obj_at' P t s" + assumes y: "\obj. P obj \ P' obj" + shows "obj_at' P' t s" + by (insert x, clarsimp simp: obj_at'_def y) + +lemma cte_wp_at_weakenE': + "\cte_wp_at' P t s; \c. P c \ P' c\ \ cte_wp_at' P' t s" + by (fastforce simp: cte_wp_at'_def) + +lemma obj_at'_weakenE: + "\ obj_at' P p s; \k. P k \ P' k \ \ obj_at' P' p s" + by (clarsimp simp: obj_at'_def) + +lemma pred_tcb'_weakenE: + "\ pred_tcb_at' proj P t s; \st. P st \ P' st \ \ pred_tcb_at' proj P' t s" + apply (simp add: pred_tcb_at'_def) + apply (erule obj_at'_weakenE) + apply clarsimp + done + +lemma lookupAround2_char1: + "(fst (lookupAround2 x s) = Some (y, v)) = + (y \ x \ s y = Some v \ (\z. y < z \ z \ x \ s z = None))" + apply (simp add: lookupAround2_def Let_def split_def lookupAround_def + split del: if_split + split: option.split) + apply (intro conjI impI iffI) + apply (clarsimp split: if_split_asm) + apply (rule Max_prop) + apply (simp add: order_less_imp_le) + apply fastforce + apply (clarsimp split: if_split_asm) + apply (rule Max_prop) + apply clarsimp + apply fastforce + apply (clarsimp split: if_split_asm) + apply (subst(asm) Max_less_iff) + apply simp + apply fastforce + apply (fastforce intro: order_neq_le_trans) + apply (clarsimp cong: conj_cong) + apply (rule conjI) + apply fastforce + apply (rule order_antisym) + apply (subst Max_le_iff) + apply simp + apply fastforce + apply clarsimp + apply (rule ccontr) + apply (fastforce simp add: linorder_not_le) + apply (rule Max_ge) + apply simp + apply fastforce + apply (intro allI impI iffI) + apply clarsimp + apply simp + apply clarsimp + apply (drule spec[where x=x]) + apply simp + done + +lemma lookupAround2_None1: + "(fst (lookupAround2 x s) = None) = (\y \ x. s y = None)" + apply (simp add: lookupAround2_def Let_def split_def lookupAround_def + split del: if_split + split: option.split) + apply safe + apply (fastforce split: if_split_asm) + apply (clarsimp simp: order_less_imp_le) + apply fastforce + done + +lemma lookupAround2_None2: + "(snd (lookupAround2 x s) = None) = (\y. x < y \ s y = None)" + apply (simp add: lookupAround2_def Let_def split_def del: maybe_def + split: option.splits) + apply (simp add: o_def map_option_is_None [where f=fst, unfolded map_option_case]) + apply (simp add: lookupAround_def Let_def) + apply fastforce + done + +lemma lookupAround2_char2: + "(snd (lookupAround2 x s) = Some y) = (x < y \ s y \ None \ (\z. x < z \ z < y \ s z = None))" + apply (simp add: lookupAround2_def Let_def split_def o_def + del: maybe_def + split: option.splits) + apply (simp add: o_def map_option_is_None [where f=fst, unfolded map_option_case]) + apply (simp add: lookupAround_def Let_def) + apply (rule conjI) + apply fastforce + apply clarsimp + apply (rule iffI) + apply (frule subst[where P="\x. x \ y2" for y2, OF _ Min_in]) + apply simp + apply fastforce + apply clarsimp + apply (subst(asm) Min_gr_iff, simp, fastforce, simp(no_asm_use), fastforce) + apply clarsimp + apply (rule order_antisym) + apply (fastforce intro: Min_le) + apply (subst Min_ge_iff) + apply simp + apply fastforce + apply clarsimp + apply (rule ccontr, simp add: linorder_not_le) + done + +lemma ps_clearI: + "\ is_aligned p n; (1 :: machine_word) < 2 ^ n; + \x. \ x > p; x \ p + 2 ^ n - 1 \ \ ksPSpace s x = None \ + \ ps_clear p n s" + apply (subgoal_tac "p \ p + 1") + apply (simp add: ps_clear_def2) + apply (rule ccontr, erule nonemptyE, clarsimp) + apply (drule word_leq_le_minus_one[where x="z + 1" for z]) + apply clarsimp + apply simp + apply (erule is_aligned_get_word_bits) + apply (erule(1) is_aligned_no_wrap') + apply simp + done + +lemma ps_clear_lookupAround2: + "\ ps_clear p' n s; ksPSpace s p' = Some x; + p' \ p; p \ p' + 2 ^ n - 1; + \ fst (lookupAround2 p (ksPSpace s)) = Some (p', x); + case_option True (\x. x - p' >= 2 ^ n) (snd (lookupAround2 p (ksPSpace s))) + \ \ P (lookupAround2 p (ksPSpace s)) \ \ P (lookupAround2 p (ksPSpace s))" + apply (drule meta_mp) + apply (cases "fst (lookupAround2 p (ksPSpace s))") + apply (simp add: lookupAround2_None1) + apply clarsimp + apply (clarsimp simp: lookupAround2_char1) + apply (frule spec[where x=p']) + apply (simp add: linorder_not_less ps_clear_def mask_def add_diff_eq) + apply (drule_tac f="\S. a \ S" in arg_cong) + apply (simp add: domI) + apply (frule(1) order_trans, simp) + apply (erule meta_mp) + apply (clarsimp split: option.split) + apply (clarsimp simp: lookupAround2_char2 ps_clear_def mask_def add_diff_eq) + apply (drule_tac a=x2 in equals0D) + apply (simp add: domI) + apply (subst(asm) order_less_imp_le[OF order_le_less_trans[where y=p]], + assumption, assumption) + apply simp + apply (erule impCE, simp_all) + apply (simp add: linorder_not_le) + apply (subst(asm) add_diff_eq[symmetric], + subst(asm) add.commute, + drule word_l_diffs(2), + fastforce simp only: field_simps) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq, fastforce) + done + +lemma in_magnitude_check: + "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some y \ \ + ((v, s') \ fst (magnitudeCheck x (snd (lookupAround2 x (ksPSpace s))) n s)) + = (s' = s \ ps_clear x n s)" + apply (rule iffI) + apply (clarsimp simp: magnitudeCheck_def in_monad lookupAround2_None2 + lookupAround2_char2 + split: option.split_asm) + apply (erule(1) ps_clearI) + apply simp + apply (erule(1) ps_clearI) + apply (simp add: linorder_not_less) + apply (drule word_leq_le_minus_one[where x="2 ^ n"]) + apply (clarsimp simp: power_overflow) + apply (drule word_l_diffs) + apply simp + apply (simp add: field_simps) + apply clarsimp + apply (erule is_aligned_get_word_bits) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: is_aligned_no_overflow) + apply (clarsimp simp add: magnitudeCheck_def in_monad + split: option.split_asm) + apply simp + apply (simp add: power_overflow) + done + +lemma in_magnitude_check3: + "\ \z. x < z \ z \ y \ ksPSpace s z = None; is_aligned x n; + (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some v; x \ y; y - x < 2 ^ n \ \ + fst (magnitudeCheck x (snd (lookupAround2 y (ksPSpace s))) n s) + = (if ps_clear x n s then {((), s)} else {})" + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: magnitudeCheck_def lookupAround2_char2 + lookupAround2_None2 in_monad + split: option.split_asm) + apply (drule(1) range_convergence1) + apply (erule(1) ps_clearI) + apply simp + apply (erule is_aligned_get_word_bits) + apply (drule(1) range_convergence2) + apply (erule(1) ps_clearI) + apply (simp add: linorder_not_less) + apply (drule word_leq_le_minus_one[where x="2 ^ n" for n], simp) + apply (drule word_l_diffs, simp) + apply (simp add: field_simps) + apply (simp add: power_overflow) + apply (clarsimp split: if_split_asm) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (drule word_le_minus_one_leq[where x="y - x"]) + apply (drule word_plus_mono_right[where x=x and y="y - x"]) + apply (erule is_aligned_get_word_bits) + apply (simp add: field_simps is_aligned_no_overflow) + apply simp + apply (simp add: field_simps) + apply (simp add: magnitudeCheck_def return_def + iffD2[OF linorder_not_less] when_def + split: option.split_asm) + done + +lemma in_alignCheck[simp]: + "((v, s') \ fst (alignCheck x n s)) = (s' = s \ is_aligned x n)" + by (simp add: alignCheck_def in_monad is_aligned_mask[symmetric] + alignError_def conj_comms + cong: conj_cong) + +lemma tcb_space_clear: + "\ tcb_cte_cases (y - x) = Some (getF, setF); + is_aligned x tcbBlockSizeBits; ps_clear x tcbBlockSizeBits s; + ksPSpace s x = Some (KOTCB tcb); ksPSpace s y = Some v; + \ x = y; getF = tcbCTable; setF = tcbCTable_update \ \ P + \ \ P" + apply (cases "x = y") + apply simp + apply (clarsimp simp: ps_clear_def mask_def add_diff_eq) + apply (drule_tac a=y in equals0D) + apply (simp add: domI) + apply (subgoal_tac "\z. y = x + z \ z < 2 ^ tcbBlockSizeBits") + apply (elim exE conjE) + apply (frule(1) is_aligned_no_wrap'[rotated, rotated]) + apply (simp add: word_bits_conv objBits_defs) + apply (erule notE, subst field_simps, rule word_plus_mono_right) + apply (drule word_le_minus_one_leq,simp,erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (simp add: objBits_defs) + apply (rule_tac x="y - x" in exI) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + done + +lemma tcb_ctes_clear: + "\ tcb_cte_cases (y - x) = Some (getF, setF); + is_aligned x tcbBlockSizeBits; ps_clear x tcbBlockSizeBits s; + ksPSpace s x = Some (KOTCB tcb) \ + \ \ ksPSpace s y = Some (KOCTE cte)" + apply clarsimp + apply (erule(4) tcb_space_clear) + apply simp + done + +lemma cte_wp_at_cases': + shows "cte_wp_at' P p s = + ((\cte. ksPSpace s p = Some (KOCTE cte) \ is_aligned p cte_level_bits + \ P cte \ ps_clear p cteSizeBits s) \ + (\n tcb getF setF. ksPSpace s (p - n) = Some (KOTCB tcb) \ is_aligned (p - n) tcbBlockSizeBits + \ tcb_cte_cases n = Some (getF, setF) \ P (getF tcb) \ ps_clear (p - n) tcbBlockSizeBits s))" + (is "?LHS = ?RHS") + apply (rule iffI) + apply (clarsimp simp: cte_wp_at'_def split_def + getObject_def bind_def simpler_gets_def + assert_opt_def return_def fail_def + split: option.splits + del: disjCI) + apply (clarsimp simp: loadObject_cte typeError_def alignError_def + fail_def return_def objBits_simps' + is_aligned_mask[symmetric] alignCheck_def + tcbVTableSlot_def field_simps tcbCTableSlot_def + tcbReplySlot_def tcbCallerSlot_def + tcbIPCBufferSlot_def + lookupAround2_char1 + cte_level_bits_def Ball_def + unless_def when_def bind_def + split: kernel_object.splits if_split_asm option.splits + del: disjCI) + apply (subst(asm) in_magnitude_check3, simp+, + simp split: if_split_asm, (rule disjI2)?, intro exI, rule conjI, + erule rsubst[where P="\x. ksPSpace s x = v" for s v], + fastforce simp add: field_simps, simp)+ + apply (subst(asm) in_magnitude_check3, simp+) + apply (simp split: if_split_asm + add: ) + apply (simp add: cte_wp_at'_def getObject_def split_def + bind_def simpler_gets_def return_def + assert_opt_def fail_def objBits_defs + split: option.splits) + apply (elim disjE conjE exE) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: cte_level_bits_def word_bits_conv) + apply (simp add: cte_level_bits_def) + apply (simp add: loadObject_cte unless_def alignCheck_def + is_aligned_mask[symmetric] objBits_simps' + cte_level_bits_def magnitudeCheck_def + return_def fail_def) + apply (clarsimp simp: bind_def return_def when_def fail_def + split: option.splits) + apply simp + apply (erule(1) ps_clear_lookupAround2) + prefer 3 + apply (simp add: loadObject_cte unless_def alignCheck_def + is_aligned_mask[symmetric] objBits_simps' + cte_level_bits_def magnitudeCheck_def + return_def fail_def tcbCTableSlot_def tcbVTableSlot_def + tcbIPCBufferSlot_def tcbReplySlot_def tcbCallerSlot_def + split: option.split_asm) + apply (clarsimp simp: bind_def tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (clarsimp simp: bind_def tcb_cte_cases_def iffD2[OF linorder_not_less] + return_def cteSizeBits_def + split: if_split_asm) + apply (subgoal_tac "p - n \ (p - n) + n", simp) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (subgoal_tac "(p - n) + n \ (p - n) + 0x7FF") + apply (simp add: field_simps) + apply (rule word_plus_mono_right) + apply (simp add: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (erule is_aligned_no_wrap') + apply simp + done + +lemma tcb_at_cte_at': + "tcb_at' t s \ cte_at' t s" + apply (clarsimp simp add: cte_wp_at_cases' obj_at'_def projectKO_def + del: disjCI) + apply (case_tac ko) + apply (simp_all add: projectKO_opt_tcb fail_def) + apply (rule exI[where x=0]) + apply (clarsimp simp add: return_def objBits_simps) + done + +lemma cte_wp_atE' [consumes 1, case_names CTE TCB]: + assumes cte: "cte_wp_at' P ptr s" + and r1: "\cte. + \ ksPSpace s ptr = Some (KOCTE cte); ps_clear ptr cte_level_bits s; + is_aligned ptr cte_level_bits; P cte \ \ R" + and r2: "\ tcb ptr' getF setF. + \ ksPSpace s ptr' = Some (KOTCB tcb); ps_clear ptr' tcbBlockSizeBits s; is_aligned ptr' tcbBlockSizeBits; + tcb_cte_cases (ptr - ptr') = Some (getF, setF); P (getF tcb) \ \ R" + shows "R" + by (rule disjE [OF iffD1 [OF cte_wp_at_cases' cte]]) (auto intro: r1 r2 simp: cte_level_bits_def objBits_defs) + +lemma cte_wp_at_cteI': + assumes "ksPSpace s ptr = Some (KOCTE cte)" + assumes "is_aligned ptr cte_level_bits" + assumes "ps_clear ptr cte_level_bits s" + assumes "P cte" + shows "cte_wp_at' P ptr s" + using assms by (simp add: cte_wp_at_cases' cte_level_bits_def objBits_defs) + +lemma cte_wp_at_tcbI': + assumes "ksPSpace s ptr' = Some (KOTCB tcb)" + assumes "is_aligned ptr' tcbBlockSizeBits" + assumes "ps_clear ptr' tcbBlockSizeBits s" + and "tcb_cte_cases (ptr - ptr') = Some (getF, setF)" + and "P (getF tcb)" + shows "cte_wp_at' P ptr s" + using assms + apply (simp add: cte_wp_at_cases') + apply (rule disjI2, rule exI[where x="ptr - ptr'"]) + apply simp + done + +lemma obj_at_ko_at': + "obj_at' P p s \ \ko. ko_at' ko p s \ P ko" + by (auto simp add: obj_at'_def) + +lemma obj_at_aligned': + fixes P :: "('a :: pspace_storable) \ bool" + assumes oat: "obj_at' P p s" + and oab: "\(v :: 'a) (v' :: 'a). objBits v = objBits v'" + shows "is_aligned p (objBits (obj :: 'a))" + using oat + apply (clarsimp simp add: obj_at'_def) + apply (clarsimp simp add: projectKO_def fail_def return_def + project_inject objBits_def[symmetric] + split: option.splits) + apply (erule subst[OF oab]) + done + +(* locateSlot *) +lemma locateSlot_conv: + "locateSlotBasic A B = return (A + 2 ^ cte_level_bits * B)" + "locateSlotTCB = locateSlotBasic" + "locateSlotCNode A bits B = (do + x \ stateAssert (\s. case (gsCNodes s A) of None \ False | Some n \ n = bits \ B < 2 ^ n) []; + locateSlotBasic A B od)" + "locateSlotCap c B = (do + x \ stateAssert (\s. ((isCNodeCap c \ (isZombie c \ capZombieType c \ ZombieTCB)) + \ (case gsCNodes s (capUntypedPtr c) of None \ False + | Some n \ (isCNodeCap c \ n = capCNodeBits c + \ isZombie c \ n = zombieCTEBits (capZombieType c)) \ B < 2 ^ n)) + \ isThreadCap c \ (isZombie c \ capZombieType c = ZombieTCB)) []; + locateSlotBasic (capUntypedPtr c) B od)" + apply (simp_all add: locateSlotCap_def locateSlotTCB_def fun_eq_iff) + apply (simp add: locateSlotBasic_def objBits_simps cte_level_bits_def objBits_defs) + apply (simp add: locateSlotCNode_def stateAssert_def) + apply (cases c, simp_all add: locateSlotCNode_def isZombie_def isThreadCap_def + isCNodeCap_def capUntypedPtr_def stateAssert_def + bind_assoc exec_get locateSlotTCB_def + objBits_simps + split: zombie_type.split cong: option.case_cong) + done + +lemma typ_at_tcb': + "typ_at' TCBT = tcb_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_tcb)[9] + apply (case_tac ko) + apply (auto simp: projectKO_opt_tcb) + done + +lemma typ_at_ep: + "typ_at' EndpointT = ep_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ep)[9] + apply (case_tac ko) + apply (auto simp: projectKO_opt_ep) + done + +lemma typ_at_ntfn: + "typ_at' NotificationT = ntfn_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ntfn)[8] + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_ntfn) + done + +lemma typ_at_cte: + "typ_at' CTET = real_cte_at'" + apply (rule ext)+ + apply (simp add: obj_at'_real_def typ_at'_def) + apply (simp add: ko_wp_at'_def) + apply (rule iffI) + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_cte)[8] + apply clarsimp + apply (case_tac ko) + apply (auto simp: projectKO_opt_cte) + done + +lemma cte_at_typ': + "cte_at' c = (\s. typ_at' CTET c s \ (\n. typ_at' TCBT (c - n) s \ n \ dom tcb_cte_cases))" +proof - + have P: "\ko. (koTypeOf ko = CTET) = (\cte. ko = KOCTE cte)" + "\ko. (koTypeOf ko = TCBT) = (\tcb. ko = KOTCB tcb)" + by (case_tac ko, simp_all)+ + have Q: "\P f. (\x. (\y. x = f y) \ P x) = (\y. P (f y))" + by fastforce + show ?thesis + by (fastforce simp: cte_wp_at_cases' obj_at'_real_def typ_at'_def + ko_wp_at'_def objBits_simps' P Q conj_comms cte_level_bits_def) +qed + +lemma typ_at_lift_tcb': + "\typ_at' TCBT p\ f \\_. typ_at' TCBT p\ \ \tcb_at' p\ f \\_. tcb_at' p\" + by (simp add: typ_at_tcb') + +lemma typ_at_lift_ep': + "\typ_at' EndpointT p\ f \\_. typ_at' EndpointT p\ \ \ep_at' p\ f \\_. ep_at' p\" + by (simp add: typ_at_ep) + +lemma typ_at_lift_ntfn': + "\typ_at' NotificationT p\ f \\_. typ_at' NotificationT p\ \ \ntfn_at' p\ f \\_. ntfn_at' p\" + by (simp add: typ_at_ntfn) + +lemma typ_at_lift_cte': + "\typ_at' CTET p\ f \\_. typ_at' CTET p\ \ \real_cte_at' p\ f \\_. real_cte_at' p\" + by (simp add: typ_at_cte) + +lemma typ_at_lift_cte_at': + assumes x: "\T p. \typ_at' T p\ f \\rv. typ_at' T p\" + shows "\cte_at' c\ f \\rv. cte_at' c\" + apply (simp only: cte_at_typ') + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift x) + done + +lemma typ_at_lift_page_table_at': + assumes x: "\T p. f \typ_at' T p\" + shows "f \page_table_at' pt_t p\" + unfolding page_table_at'_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' x) + +lemma ko_wp_typ_at': + "ko_wp_at' P p s \ \T. typ_at' T p s" + by (clarsimp simp: typ_at'_def ko_wp_at'_def) + +lemma koType_obj_range': + "koTypeOf k = koTypeOf k' \ obj_range' p k = obj_range' p k'" + apply (rule ccontr) + apply (simp add: obj_range'_def objBitsKO_def archObjSize_def + split: kernel_object.splits arch_kernel_object.splits) + done + +lemma typ_at_lift_valid_untyped': + assumes P: "\T p. \\s. \typ_at' T p s\ f \\rv s. \typ_at' T p s\" + shows "\\s. valid_untyped' d p n idx s\ f \\rv s. valid_untyped' d p n idx s\" + apply (clarsimp simp: valid_untyped'_def split del:if_split) + apply (rule hoare_vcg_all_lift) + apply (clarsimp simp: valid_def split del:if_split) + apply (frule ko_wp_typ_at') + apply clarsimp + apply (cut_tac T=T and p=ptr' in P) + apply (simp add: valid_def) + apply (erule_tac x=s in allE) + apply (erule impE) + prefer 2 + apply (drule (1) bspec) + apply simp + apply (clarsimp simp: typ_at'_def ko_wp_at'_def simp del:atLeastAtMost_iff) + apply (elim disjE) + apply (clarsimp simp:psubset_eq simp del:atLeastAtMost_iff) + apply (drule_tac p=ptr' in koType_obj_range') + apply (erule impE) + apply simp + apply simp + apply (drule_tac p = ptr' in koType_obj_range') + apply (clarsimp split:if_splits) + done + +lemma typ_at_lift_asid_at': + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ \asid_pool_at' p\ f \\_. asid_pool_at' p\" + by assumption + +lemma typ_at_lift_vcpu_at': + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ \vcpu_at' p\ f \\_. vcpu_at' p\" + by assumption + +lemma typ_at_lift_frame_at': + assumes "\T p. f \typ_at' T p\" + shows "f \frame_at' p sz d\" + unfolding frame_at'_def + by (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift assms split_del: if_split) + +lemma typ_at_lift_valid_cap': + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\\s. valid_cap' cap s\ f \\rv s. valid_cap' cap s\" + including no_pre + apply (simp add: valid_cap'_def) + apply wp + apply (case_tac cap; + simp add: valid_cap'_def P[of id, simplified] typ_at_lift_tcb' + hoare_vcg_prop typ_at_lift_ep' + typ_at_lift_ntfn' typ_at_lift_cte_at' + hoare_vcg_conj_lift [OF typ_at_lift_cte_at']) + apply (rename_tac zombie_type nat) + apply (case_tac zombie_type; simp) + apply (wp typ_at_lift_tcb' P hoare_vcg_all_lift typ_at_lift_cte')+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability, + simp_all add: P[of id, simplified] vspace_table_at'_defs + hoare_vcg_prop All_less_Ball + split del: if_split) + apply (wp hoare_vcg_const_Ball_lift P typ_at_lift_valid_untyped' + hoare_vcg_all_lift typ_at_lift_cte' typ_at_lift_frame_at')+ + done + + +lemma typ_at_lift_valid_irq_node': + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\valid_irq_node' p\ f \\_. valid_irq_node' p\" + apply (simp add: valid_irq_node'_def) + apply (wp hoare_vcg_all_lift P typ_at_lift_cte') + done + +lemma valid_bound_tcb_lift: + "(\T p. \typ_at' T p\ f \\_. typ_at' T p\) \ + \valid_bound_tcb' tcb\ f \\_. valid_bound_tcb' tcb\" + by (auto simp: valid_bound_tcb'_def valid_def typ_at_tcb'[symmetric] split: option.splits) + +lemma valid_arch_tcb_lift': + assumes x: "\T p. \typ_at' T p\ f \\rv. typ_at' T p\" + shows "\\s. valid_arch_tcb' tcb s\ f \\rv s. valid_arch_tcb' tcb s\" + apply (clarsimp simp add: valid_arch_tcb'_def) + apply (cases "atcbVCPUPtr tcb"; simp) + apply (wp x)+ + done + +lemmas typ_at_lifts = typ_at_lift_tcb' typ_at_lift_ep' + typ_at_lift_ntfn' typ_at_lift_cte' + typ_at_lift_cte_at' + typ_at_lift_page_table_at' + typ_at_lift_asid_at' + typ_at_lift_vcpu_at' + typ_at_lift_valid_untyped' + typ_at_lift_valid_cap' + valid_bound_tcb_lift + valid_arch_tcb_lift' + +lemma mdb_next_unfold: + "s \ c \ c' = (\z. s c = Some z \ c' = mdbNext (cteMDBNode z))" + by (auto simp add: mdb_next_rel_def mdb_next_def) + +lemma valid_dlist_prevD: + "\ valid_dlist m; c \ 0; c' \ 0 \ \ m \ c \ c' = m \ c \ c'" + by (fastforce simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + + +lemma no_0_simps [simp]: + assumes "no_0 m" + shows "((m 0 = Some cte) = False) \ ((Some cte = m 0) = False)" + using assms by (simp add: no_0_def) + +lemma valid_dlist_def2: + "no_0 m \ valid_dlist m = (\c c'. c \ 0 \ c' \ 0 \ m \ c \ c' = m \ c \ c')" + apply (rule iffI) + apply (simp add: valid_dlist_prevD) + apply (clarsimp simp: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply (subgoal_tac "p\0") + prefer 2 + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (erule_tac x="mdbPrev (cteMDBNode cte)" in allE) + apply simp + apply (erule_tac x=p in allE) + apply clarsimp + apply clarsimp + apply (erule_tac x=p in allE) + apply simp + apply (erule_tac x="mdbNext (cteMDBNode cte)" in allE) + apply clarsimp + done + +lemma valid_dlist_def3: + "valid_dlist m = ((\c c'. m \ c \ c' \ c' \ 0 \ m \ c \ c') \ + (\c c'. m \ c \ c' \ c \ 0 \ m \ c \ c'))" + apply (rule iffI) + apply (simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply fastforce + apply (clarsimp simp add: valid_dlist_def Let_def mdb_next_unfold mdb_prev_def) + apply fastforce + done + +lemma vdlist_prevD: + "\ m \ c \ c'; m c = Some cte; valid_dlist m; no_0 m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_nextD: + "\ m \ c \ c'; m c' = Some cte; valid_dlist m; no_0 m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_prevD0: + "\ m \ c \ c'; c \ 0; valid_dlist m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_nextD0: + "\ m \ c \ c'; c' \ 0; valid_dlist m \ \ m \ c \ c'" + by (fastforce simp: valid_dlist_def3) + +lemma vdlist_prev_src_unique: + "\ m \ p \ x; m \ p \ y; p \ 0; valid_dlist m \ \ x = y" + by (drule (2) vdlist_prevD0)+ (clarsimp simp: mdb_next_unfold) + +lemma vdlist_next_src_unique: + "\ m \ x \ p; m \ y \ p; p \ 0; valid_dlist m \ \ x = y" + by (drule (2) vdlist_nextD0)+ (clarsimp simp: mdb_prev_def) + +lemma cte_at_cte_wp_atD: + "cte_at' p s \ \cte. cte_wp_at' ((=) cte) p s" + by (clarsimp simp add: cte_wp_at'_def) + +lemma valid_pspace_no_0 [elim]: + "valid_pspace' s \ no_0 (ctes_of s)" + by (auto simp: valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + +lemma valid_pspace_dlist [elim]: + "valid_pspace' s \ valid_dlist (ctes_of s)" + by (auto simp: valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + +lemma next_rtrancl_tranclE [consumes 1, case_names eq trancl]: + assumes major: "m \ x \\<^sup>* y" + and r1: "x = y \ P" + and r2: "\ x \ y; m \ x \\<^sup>+ y \ \ P" + shows "P" + using major + by (auto dest: rtranclD intro: r1 r2) + +lemmas trancl_induct' [induct set] = trancl_induct [consumes 1, case_names base step] + +lemma next_single_value: + "\ m \ x \ y; m \ x \ z \ \ y = z" + unfolding mdb_next_rel_def by simp + +lemma loop_split: + assumes loop: "m \ c \\<^sup>+ c" + and split: "m \ c \\<^sup>+ c'" + shows "m \ c' \\<^sup>+ c" + using split loop +proof induct + case base + thus ?case + by (auto dest: next_single_value elim: tranclE2) +next + case (step y z) + hence "m \ y \\<^sup>+ c" by simp + hence "m \ z \\<^sup>* c" using step.hyps + by (metis next_single_value tranclD) + + thus ?case using step.prems + by (cases rule: next_rtrancl_tranclE, simp_all) +qed + +lemma no_0_lhs: + "\ m \ c \ y; no_0 m \ \ c \ 0" + unfolding no_0_def + by (erule contrapos_pn, simp add: mdb_next_unfold) + +lemma no_0_lhs_trancl: + "\ m \ c \\<^sup>+ y; no_0 m \ \ c \ 0" + by (erule tranclE2, (rule no_0_lhs, simp_all)+) + +lemma mdb_chain_0_no_loops: + assumes asm: "mdb_chain_0 m" + and no0: "no_0 m" + shows "no_loops m" +proof - + { + fix c + assume mc: "m \ c \\<^sup>+ c" + + with asm have "m \ c \\<^sup>+ 0" + unfolding mdb_chain_0_def + apply - + apply (erule bspec, erule tranclE2) + apply (auto intro: domI simp: mdb_next_unfold) + done + + with mc have "m \ 0 \\<^sup>+ c" by (rule loop_split) + hence False using no0 + by (clarsimp dest!: no_0_lhs_trancl) + } + thus "no_loops m" unfolding no_loops_def by auto +qed + +lemma valid_mdb_ctesE [elim]: + "\valid_mdb_ctes m; + \ valid_dlist m; no_0 m; mdb_chain_0 m; valid_badges m; + caps_contained' m; mdb_chunked m; untyped_mdb' m; + untyped_inc' m; valid_nullcaps m; ut_revocable' m; + class_links m; distinct_zombies m; irq_control m; + reply_masters_rvk_fb m \ + \ P\ \ P" + unfolding valid_mdb_ctes_def by auto + +lemma valid_mdb_ctesI [intro]: + "\valid_dlist m; no_0 m; mdb_chain_0 m; valid_badges m; + caps_contained' m; mdb_chunked m; untyped_mdb' m; + untyped_inc' m; valid_nullcaps m; ut_revocable' m; + class_links m; distinct_zombies m; irq_control m; + reply_masters_rvk_fb m \ + \ valid_mdb_ctes m" + unfolding valid_mdb_ctes_def by auto + +end +locale PSpace_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes pspace: "ksPSpace (f s) = ksPSpace s" +begin + +lemma state_refs_of'_eq[iff]: + "state_refs_of' (f s) = state_refs_of' s" + by (rule state_refs_of'_pspaceI [OF _ pspace], rule refl) + +lemma state_hyp_refs_of'_eq[iff]: + "state_hyp_refs_of' (f s) = state_hyp_refs_of' s" + by (rule state_hyp_refs_of'_pspaceI [OF _ pspace], rule refl) + +lemma valid_space_update [iff]: + "valid_pspace' (f s) = valid_pspace' s" + by (fastforce simp: valid_pspace' pspace) + +lemma obj_at_update [iff]: + "obj_at' P p (f s) = obj_at' P p s" + by (fastforce intro: obj_at'_pspaceI simp: pspace) + +lemma ko_wp_at_update [iff]: + "ko_wp_at' P p (f s) = ko_wp_at' P p s" + by (simp add: pspace ko_wp_at'_def ps_clear_def) + +lemma cte_wp_at_update [iff]: + "cte_wp_at' P p (f s) = cte_wp_at' P p s" + by (fastforce intro: cte_wp_at'_pspaceI simp: pspace) + +lemma ex_nonz_cap_to_eq'[iff]: + "ex_nonz_cap_to' p (f s) = ex_nonz_cap_to' p s" + by (simp add: ex_nonz_cap_to'_def) + +lemma iflive_update [iff]: + "if_live_then_nonz_cap' (f s) = if_live_then_nonz_cap' s" + by (simp add: if_live_then_nonz_cap'_def ex_nonz_cap_to'_def) + +lemma valid_objs_update [iff]: + "valid_objs' (f s) = valid_objs' s" + apply (simp add: valid_objs'_def pspace) + apply (fastforce intro: valid_obj'_pspaceI simp: pspace) + done + +lemma pspace_aligned_update [iff]: + "pspace_aligned' (f s) = pspace_aligned' s" + by (simp add: pspace pspace_aligned'_def) + +lemma pspace_distinct_update [iff]: + "pspace_distinct' (f s) = pspace_distinct' s" + by (simp add: pspace pspace_distinct'_def ps_clear_def) + +lemma pred_tcb_at_update [iff]: + "pred_tcb_at' proj P p (f s) = pred_tcb_at' proj P p s" + by (simp add: pred_tcb_at'_def) + +lemma valid_cap_update [iff]: + "(f s) \' c = s \' c" + by (auto intro: valid_cap'_pspaceI simp: pspace) + +lemma typ_at_update' [iff]: + "typ_at' T p (f s) = typ_at' T p s" + by (simp add: typ_at'_def) + +lemma page_table_at_update' [iff]: + "page_table_at' pt_t p (f s) = page_table_at' pt_t p s" + by (simp add: page_table_at'_def) + +lemma frame_at_update' [iff]: + "frame_at' p sz d (f s) = frame_at' p sz d s" + by (simp add: frame_at'_def) + +lemma no_0_obj'_update [iff]: + "no_0_obj' (f s) = no_0_obj' s" + by (simp add: no_0_obj'_def pspace) + +lemma pointerInUserData_update[iff]: + "pointerInUserData p (f s) = pointerInUserData p s" + by (simp add: pointerInUserData_def) + +lemma pointerInDeviceData_update[iff]: + "pointerInDeviceData p (f s) = pointerInDeviceData p s" + by (simp add: pointerInDeviceData_def) + +lemma pspace_domain_valid_update [iff]: + "pspace_domain_valid (f s) = pspace_domain_valid s" + by (simp add: pspace_domain_valid_def pspace) + +end + +locale Arch_Idle_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes arch: "ksArchState (f s) = ksArchState s" + assumes idle: "ksIdleThread (f s) = ksIdleThread s" + assumes int_nd: "intStateIRQNode (ksInterruptState (f s)) + = intStateIRQNode (ksInterruptState s)" + assumes maxObj: "gsMaxObjectSize (f s) = gsMaxObjectSize s" +begin + +lemma global_refs_update' [iff]: + "global_refs' (f s) = global_refs' s" + by (simp add: global_refs'_def arch idle int_nd) + +end + +locale P_Arch_Idle_update_eq = PSpace_update_eq + Arch_Idle_update_eq +begin + +lemma valid_global_refs_update' [iff]: + "valid_global_refs' (f s) = valid_global_refs' s" + by (simp add: valid_global_refs'_def pspace arch idle maxObj) + +lemma valid_arch_state_update' [iff]: + "valid_arch_state' (f s) = valid_arch_state' s" + by (simp add: valid_arch_state'_def arch cong: option.case_cong) + +lemma valid_idle_update' [iff]: + "valid_idle' (f s) = valid_idle' s" + by (auto simp: pspace idle) + +lemma ifunsafe_update [iff]: + "if_unsafe_then_cap' (f s) = if_unsafe_then_cap' s" + by (simp add: if_unsafe_then_cap'_def ex_cte_cap_to'_def int_nd) + +end + +locale Int_update_eq = + fixes f :: "kernel_state \ kernel_state" + assumes int: "ksInterruptState (f s) = ksInterruptState s" +begin + +lemma irqs_masked_update [iff]: + "irqs_masked' (f s) = irqs_masked' s" + by (simp add: irqs_masked'_def int) + +lemma irq_issued_update'[iff]: + "irq_issued' irq (f s) = irq_issued' irq s" + by (simp add: irq_issued'_def int) + +end + +locale P_Cur_update_eq = PSpace_update_eq + + assumes curt: "ksCurThread (f s) = ksCurThread s" + assumes curd: "ksCurDomain (f s) = ksCurDomain s" +begin + +lemma sch_act_wf[iff]: + "sch_act_wf ks (f s) = sch_act_wf ks s" +apply (cases ks) +apply (simp_all add: ct_in_state'_def st_tcb_at'_def tcb_in_cur_domain'_def curt curd) +done + +end + +locale P_Int_update_eq = PSpace_update_eq + Int_update_eq +begin + +lemma valid_irq_handlers_update'[iff]: + "valid_irq_handlers' (f s) = valid_irq_handlers' s" + by (simp add: valid_irq_handlers'_def cteCaps_of_def pspace) + +end + +locale P_Int_Cur_update_eq = + P_Int_update_eq + P_Cur_update_eq + +locale P_Arch_Idle_Int_update_eq = P_Arch_Idle_update_eq + P_Int_update_eq + +locale P_Arch_Idle_Int_Cur_update_eq = + P_Arch_Idle_Int_update_eq + P_Cur_update_eq + +interpretation sa_update: + P_Arch_Idle_Int_Cur_update_eq "ksSchedulerAction_update f" + by unfold_locales auto + +interpretation ready_queue_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueues_update f" + by unfold_locales auto + +interpretation ready_queue_bitmap1_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueuesL1Bitmap_update f" + by unfold_locales auto + +interpretation ready_queue_bitmap2_update: + P_Arch_Idle_Int_Cur_update_eq "ksReadyQueuesL2Bitmap_update f" + by unfold_locales auto + +interpretation cur_thread_update': + P_Arch_Idle_Int_update_eq "ksCurThread_update f" + by unfold_locales auto + +interpretation machine_state_update': + P_Arch_Idle_Int_Cur_update_eq "ksMachineState_update f" + by unfold_locales auto + +interpretation interrupt_state_update': + P_Cur_update_eq "ksInterruptState_update f" + by unfold_locales auto + +interpretation idle_update': + P_Int_Cur_update_eq "ksIdleThread_update f" + by unfold_locales auto + +interpretation arch_state_update': + P_Int_Cur_update_eq "ksArchState_update f" + by unfold_locales auto + +interpretation wu_update': + P_Arch_Idle_Int_Cur_update_eq "ksWorkUnitsCompleted_update f" + by unfold_locales auto + +interpretation gsCNodes_update: P_Arch_Idle_update_eq "gsCNodes_update f" + by unfold_locales simp_all + +interpretation gsUserPages_update: P_Arch_Idle_update_eq "gsUserPages_update f" + by unfold_locales simp_all +lemma ko_wp_at_aligned: + "ko_wp_at' ((=) ko) p s \ is_aligned p (objBitsKO ko)" + by (simp add: ko_wp_at'_def) + +interpretation ksCurDomain: + P_Arch_Idle_Int_update_eq "ksCurDomain_update f" + by unfold_locales auto + +interpretation ksDomScheduleIdx: + P_Arch_Idle_Int_Cur_update_eq "ksDomScheduleIdx_update f" + by unfold_locales auto + +interpretation ksDomSchedule: + P_Arch_Idle_Int_Cur_update_eq "ksDomSchedule_update f" + by unfold_locales auto + +interpretation ksDomainTime: + P_Arch_Idle_Int_Cur_update_eq "ksDomainTime_update f" + by unfold_locales auto + +interpretation gsUntypedZeroRanges: + P_Arch_Idle_Int_Cur_update_eq "gsUntypedZeroRanges_update f" + by unfold_locales auto + +lemma ko_wp_at_norm: + "ko_wp_at' P p s \ \ko. P ko \ ko_wp_at' ((=) ko) p s" + by (auto simp add: ko_wp_at'_def) + +lemma valid_mdb_machine_state [iff]: + "valid_mdb' (ksMachineState_update f s) = valid_mdb' s" + by (simp add: valid_mdb'_def) + +lemma cte_wp_at_norm': + "cte_wp_at' P p s \ \cte. cte_wp_at' ((=) cte) p s \ P cte" + by (simp add: cte_wp_at'_def) + +lemma pred_tcb_at' [elim!]: + "pred_tcb_at' proj P t s \ tcb_at' t s" + by (auto simp add: pred_tcb_at'_def obj_at'_def) + +lemma valid_pspace_mdb' [elim!]: + "valid_pspace' s \ valid_mdb' s" + by (simp add: valid_pspace'_def) + +lemmas hoare_use_eq_irq_node' = hoare_use_eq[where f=irq_node'] + +lemma ex_cte_cap_to'_pres: + "\ \P p. \cte_wp_at' P p\ f \\rv. cte_wp_at' P p\; + \P. \\s. P (irq_node' s)\ f \\rv s. P (irq_node' s)\ \ + \ \ex_cte_cap_wp_to' P p\ f \\rv. ex_cte_cap_wp_to' P p\" + apply (simp add: ex_cte_cap_wp_to'_def) + apply (rule hoare_pre) + apply (erule hoare_use_eq_irq_node') + apply (rule hoare_vcg_ex_lift) + apply assumption + apply simp + done + +section "Relationship of Executable Spec to Kernel Configuration" + +text \ + Some values are set per kernel configuration (e.g. number of domains), but other related + values (e.g. maximum domain) are derived from storage constraints (e.g. bytes used). + To relate the two, we must look at the values of kernel configuration constants. + To allow the proofs to work for all permitted values of these constants, their definitions + should only be unfolded in this section, and the derived properties kept to a minimum.\ + +lemma le_maxDomain_eq_less_numDomains: + shows "x \ unat maxDomain \ x < Kernel_Config.numDomains" + "y \ maxDomain \ unat y < Kernel_Config.numDomains" + by (auto simp: Kernel_Config.numDomains_def maxDomain_def word_le_nat_alt) + + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma page_table_pte_atI': + "\ page_table_at' pt_t p s; i \ mask (ptTranslationBits pt_t) \ \ + pte_at' (p + (i << pte_bits)) s" + by (simp add: page_table_at'_def) + +lemma valid_global_refsD': + "\ ctes_of s p = Some cte; valid_global_refs' s \ \ + kernel_data_refs \ capRange (cteCap cte) = {} \ global_refs' s \ kernel_data_refs" + by (clarsimp simp: valid_global_refs'_def valid_refs'_def ran_def) blast + +lemma no_0_prev: + "no_0 m \ \ m \ p \ 0" + by (simp add: mdb_prev_def) + +lemma ut_revocableD': + "\m p = Some (CTE cap n); isUntypedCap cap; ut_revocable' m \ \ mdbRevocable n" + unfolding ut_revocable'_def by blast + +lemma nullcapsD': + "\m p = Some (CTE NullCap n); valid_nullcaps m \ \ n = nullMDBNode" + unfolding valid_nullcaps_def by blast + +lemma untyped_mdbD': + "\m p = Some (CTE c n); isUntypedCap c; + m p' = Some (CTE c' n'); \isUntypedCap c'; + capRange c' \ untypedRange c \ {}; untyped_mdb' m \ \ + p' \ descendants_of' p m" + unfolding untyped_mdb'_def by blast + +lemma untyped_incD': + "\ m p = Some (CTE c n); isUntypedCap c; + m p' = Some (CTE c' n'); isUntypedCap c'; untyped_inc' m \ \ + (untypedRange c \ untypedRange c' \ untypedRange c' \ untypedRange c \ untypedRange c \ untypedRange c' = {}) \ + (untypedRange c \ untypedRange c' \ (p \ descendants_of' p' m \ untypedRange c \ usableUntypedRange c' = {})) \ + (untypedRange c' \ untypedRange c \ (p' \ descendants_of' p m \ untypedRange c' \ usableUntypedRange c = {})) \ + (untypedRange c = untypedRange c' \ (p' \ descendants_of' p m \ usableUntypedRange c = {} + \ p \ descendants_of' p' m \ usableUntypedRange c' = {} \ p = p'))" + unfolding untyped_inc'_def + apply (drule_tac x = p in spec) + apply (drule_tac x = p' in spec) + apply (elim allE impE) + apply simp+ + done + +lemma caps_containedD': + "\ m p = Some (CTE c n); m p' = Some (CTE c' n'); + \ isUntypedCap c'; capRange c' \ untypedRange c \ {}; + caps_contained' m\ + \ capRange c' \ untypedRange c" + unfolding caps_contained'_def by blast + +lemma class_linksD: + "\ m p = Some cte; m p' = Some cte'; m \ p \ p'; class_links m \ \ + capClass (cteCap cte) = capClass (cteCap cte')" + using class_links_def by blast + +lemma mdb_chunkedD: + "\ m p = Some (CTE cap n); m p' = Some (CTE cap' n'); + sameRegionAs cap cap'; p \ p'; mdb_chunked m \ + \ (m \ p \\<^sup>+ p' \ m \ p' \\<^sup>+ p) \ + (m \ p \\<^sup>+ p' \ is_chunk m cap p p') \ + (m \ p' \\<^sup>+ p \ is_chunk m cap' p' p)" + using mdb_chunked_def by blast + +lemma irq_controlD: + "\ m p = Some (CTE IRQControlCap n); m p' = Some (CTE IRQControlCap n'); + irq_control m \ \ p' = p" + unfolding irq_control_def by blast + +lemma irq_revocable: + "\ m p = Some (CTE IRQControlCap n); irq_control m \ \ mdbRevocable n" + unfolding irq_control_def by blast + +lemma sch_act_wf_arch [simp]: + "sch_act_wf sa (ksArchState_update f s) = sch_act_wf sa s" + by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) + +lemma valid_queues_arch [simp]: + "valid_queues (ksArchState_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma if_unsafe_then_cap_arch' [simp]: + "if_unsafe_then_cap' (ksArchState_update f s) = if_unsafe_then_cap' s" + by (simp add: if_unsafe_then_cap'_def ex_cte_cap_to'_def) + +lemma valid_idle_arch' [simp]: + "valid_idle' (ksArchState_update f s) = valid_idle' s" + by (simp add: valid_idle'_def) + +lemma valid_irq_node_arch' [simp]: + "valid_irq_node' w (ksArchState_update f s) = valid_irq_node' w s" + by (simp add: valid_irq_node'_def) + +lemma sch_act_wf_machine_state [simp]: + "sch_act_wf sa (ksMachineState_update f s) = sch_act_wf sa s" + by (cases sa) (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def) + +lemma valid_queues_machine_state [simp]: + "valid_queues (ksMachineState_update f s) = valid_queues s" + by (simp add: valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs) + +lemma valid_queues_arch' [simp]: + "valid_queues' (ksArchState_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_queues_machine_state' [simp]: + "valid_queues' (ksMachineState_update f s) = valid_queues' s" + by (simp add: valid_queues'_def) + +lemma valid_irq_node'_machine_state [simp]: + "valid_irq_node' x (ksMachineState_update f s) = valid_irq_node' x s" + by (simp add: valid_irq_node'_def) + +(* these should be reasonable safe for automation because of the 0 pattern *) +lemma no_0_ko_wp' [elim!]: + "\ ko_wp_at' Q 0 s; no_0_obj' s \ \ P" + by (simp add: ko_wp_at'_def no_0_obj'_def) + +lemma no_0_obj_at' [elim!]: + "\ obj_at' Q 0 s; no_0_obj' s \ \ P" + by (simp add: obj_at'_def no_0_obj'_def) + +lemma no_0_typ_at' [elim!]: + "\ typ_at' T 0 s; no_0_obj' s \ \ P" + by (clarsimp simp: typ_at'_def) + +lemma no_0_ko_wp'_eq [simp]: + "no_0_obj' s \ ko_wp_at' P 0 s = False" + by (simp add: ko_wp_at'_def no_0_obj'_def) + +lemma no_0_obj_at'_eq [simp]: + "no_0_obj' s \ obj_at' P 0 s = False" + by (simp add: obj_at'_def no_0_obj'_def) + +lemma no_0_typ_at'_eq [simp]: + "no_0_obj' s \ typ_at' P 0 s = False" + by (simp add: typ_at'_def) + +lemma valid_pspace_valid_objs'[elim!]: + "valid_pspace' s \ valid_objs' s" + by (simp add: valid_pspace'_def) + +declare badgeBits_def [simp] + +lemma simple_sane_strg: + "sch_act_simple s \ sch_act_sane s" + by (simp add: sch_act_sane_def sch_act_simple_def) + +lemma sch_act_wf_cases: + "sch_act_wf action = (case action of + ResumeCurrentThread \ ct_in_state' activatable' + | ChooseNewThread \ \ + | SwitchToThread t \ \s. st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" +by (cases action) auto +end + +lemma (in PSpace_update_eq) cteCaps_of_update[iff]: "cteCaps_of (f s) = cteCaps_of s" + by (simp add: cteCaps_of_def pspace) + +lemma vms_sch_act_update'[iff]: + "valid_machine_state' (ksSchedulerAction_update f s) = + valid_machine_state' s" + by (simp add: valid_machine_state'_def ) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas bit_simps' = pteBits_def asidHighBits_def asidPoolBits_def asid_low_bits_def + asid_high_bits_def bit_simps + +lemma objBitsT_simps: + "objBitsT EndpointT = epSizeBits" + "objBitsT NotificationT = ntfnSizeBits" + "objBitsT CTET = cteSizeBits" + "objBitsT TCBT = tcbBlockSizeBits" + "objBitsT UserDataT = pageBits" + "objBitsT UserDataDeviceT = pageBits" + "objBitsT KernelDataT = pageBits" + "objBitsT (ArchT PTET) = word_size_bits" + "objBitsT (ArchT ASIDPoolT) = pageBits" + "objBitsT (ArchT VCPUT) = vcpuBits" + unfolding objBitsT_def makeObjectT_def + by (simp add: makeObject_simps objBits_simps bit_simps')+ + + +lemma objBitsT_koTypeOf : + "(objBitsT (koTypeOf ko)) = objBitsKO ko" + apply (cases ko; simp add: objBits_simps objBitsT_simps) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp add: archObjSize_def objBitsT_simps bit_simps') + done + +lemma typ_at_aligned': + "\ typ_at' tp p s \ \ is_aligned p (objBitsT tp)" + by (clarsimp simp add: typ_at'_def ko_wp_at'_def objBitsT_koTypeOf) + +lemma valid_queues_obj_at'D: + "\ t \ set (ksReadyQueues s (d, p)); valid_queues s \ + \ obj_at' (inQ d p) t s" + apply (unfold valid_queues_def valid_queues_no_bitmap_def) + apply (elim conjE) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp) + apply (drule(1) bspec) + apply (erule obj_at'_weakenE) + apply (clarsimp) + done + +lemma obj_at'_and: + "obj_at' (P and P') t s = (obj_at' P t s \ obj_at' P' t s)" + by (rule iffI, (clarsimp simp: obj_at'_def)+) + +lemma obj_at'_activatable_st_tcb_at': + "obj_at' (activatable' \ tcbState) t = st_tcb_at' activatable' t" + by (rule ext, clarsimp simp: st_tcb_at'_def) + +lemma st_tcb_at'_runnable_is_activatable: + "st_tcb_at' runnable' t s \ st_tcb_at' activatable' t s" + by (simp add: st_tcb_at'_def) + (fastforce elim: obj_at'_weakenE) + +lemma tcb_at'_has_tcbPriority: + "tcb_at' t s \ \p. obj_at' (\tcb. tcbPriority tcb = p) t s" + by (clarsimp simp add: obj_at'_def) + +lemma pred_tcb_at'_Not: + "pred_tcb_at' f (Not o P) t s = (tcb_at' t s \ \ pred_tcb_at' f P t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_conj_distrib: + "obj_at' (\ko. P ko \ Q ko) p s \ obj_at' P p s \ obj_at' Q p s" + by (auto simp: obj_at'_def) + +lemma obj_at'_conj: + "obj_at' (\ko. P ko \ Q ko) p s = (obj_at' P p s \ obj_at' Q p s)" + using obj_at'_conj_distrib obj_at_conj' by blast + +lemma not_obj_at'_strengthen: + "obj_at' (Not \ P) p s \ \ obj_at' P p s" + by (clarsimp simp: obj_at'_def) + +lemma not_pred_tcb_at'_strengthen: + "pred_tcb_at' f (Not \ P) p s \ \ pred_tcb_at' f P p s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_def) + +lemma obj_at'_ko_at'_prop: + "ko_at' ko t s \ obj_at' P t s = P ko" + by (drule obj_at_ko_at', clarsimp simp: obj_at'_def) + +lemma valid_queues_no_bitmap_def': + "valid_queues_no_bitmap = + (\s. \d p. (\t\set (ksReadyQueues s (d, p)). + obj_at' (inQ d p) t s \ st_tcb_at' runnable' t s) \ + distinct (ksReadyQueues s (d, p)) \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" + apply (rule ext, rule iffI) + apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_and pred_tcb_at'_def o_def + elim!: obj_at'_weakenE)+ + done + +lemma valid_queues_running: + assumes Q: "t \ set(ksReadyQueues s (d, p))" "valid_queues s" + shows "st_tcb_at' runnable' t s" + using assms by (clarsimp simp add: valid_queues_def valid_queues_no_bitmap_def') + +lemma valid_refs'_cteCaps: + "valid_refs' S (ctes_of s) = (\c \ ran (cteCaps_of s). S \ capRange c = {})" + by (fastforce simp: valid_refs'_def cteCaps_of_def elim!: ranE) + +lemma valid_cap_sizes_cteCaps: + "valid_cap_sizes' n (ctes_of s) = (\c \ ran (cteCaps_of s). 2 ^ capBits c \ n)" + apply (simp add: valid_cap_sizes'_def cteCaps_of_def) + apply (fastforce elim!: ranE) + done + +lemma cte_at_valid_cap_sizes_0: + "valid_cap_sizes' n ctes \ ctes p = Some cte \ 0 < n" + apply (clarsimp simp: valid_cap_sizes'_def) + apply (drule bspec, erule ranI) + apply (rule Suc_le_lessD, erule order_trans[rotated]) + apply simp + done + +lemma invs_valid_stateI' [elim!]: + "invs' s \ valid_state' s" + by (simp add: invs'_def) + +lemma tcb_at_invs' [elim!]: + "invs' s \ tcb_at' (ksCurThread s) s" + by (simp add: invs'_def cur_tcb'_def) + +lemma invs_valid_objs' [elim!]: + "invs' s \ valid_objs' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_pspace_aligned' [elim!]: + "invs' s \ pspace_aligned' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_pspace_distinct' [elim!]: + "invs' s \ pspace_distinct' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs_valid_pspace' [elim!]: + "invs' s \ valid_pspace' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_arch_state' [elim!]: + "invs' s \ valid_arch_state' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_cur' [elim!]: + "invs' s \ cur_tcb' s" + by (simp add: invs'_def) + +lemma invs_mdb' [elim!]: + "invs' s \ valid_mdb' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma valid_mdb_no_loops [elim!]: + "valid_mdb_ctes m \ no_loops m" + by (auto intro: mdb_chain_0_no_loops) + +lemma invs_no_loops [elim!]: + "invs' s \ no_loops (ctes_of s)" + apply (rule valid_mdb_no_loops) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def) + done + +lemma invs_iflive'[elim!]: + "invs' s \ if_live_then_nonz_cap' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_unsafe_then_cap' [elim!]: + "invs' s \ if_unsafe_then_cap' s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym' [elim!]: + "invs' s \ sym_refs (state_refs_of' s)" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sym_hyp' [elim!]: + "invs' s \ sym_refs (state_hyp_refs_of' s)" + by (simp add: invs'_def valid_state'_def) + +lemma invs_sch_act_wf' [elim!]: + "invs' s \ sch_act_wf (ksSchedulerAction s) s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_queues [elim!]: + "invs' s \ valid_queues s" + by (simp add: invs'_def valid_state'_def) + +lemma invs_valid_idle'[elim!]: + "invs' s \ valid_idle' s" + by (fastforce simp: invs'_def valid_state'_def) + +lemma invs_valid_global'[elim!]: + "invs' s \ valid_global_refs' s" + by (fastforce simp: invs'_def valid_state'_def) + +lemma invs'_invs_no_cicd: + "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" + by (simp add: invs'_to_invs_no_cicd'_def) + +lemma invs_valid_queues'_strg: + "invs' s \ valid_queues' s" + by (clarsimp simp: invs'_def valid_state'_def) + +lemmas invs_valid_queues'[elim!] = invs_valid_queues'_strg[rule_format] + +lemma einvs_valid_etcbs: "einvs s \ valid_etcbs s" + by (clarsimp simp: valid_sched_def) + +lemma invs'_bitmapQ_no_L1_orphans: + "invs' s \ bitmapQ_no_L1_orphans s" + by (drule invs_queues, simp add: valid_queues_def) + +lemma invs_ksCurDomain_maxDomain' [elim!]: + "invs' s \ ksCurDomain s \ maxDomain" + by (simp add: invs'_def valid_state'_def) + +lemma simple_st_tcb_at_state_refs_ofD': + "st_tcb_at' simple' t s \ bound_tcb_at' (\x. tcb_bound_refs' x = state_refs_of' s t) t s" + by (fastforce simp: pred_tcb_at'_def obj_at'_def state_refs_of'_def + projectKO_eq project_inject) + +lemma cur_tcb_arch' [iff]: + "cur_tcb' (ksArchState_update f s) = cur_tcb' s" + by (simp add: cur_tcb'_def) + +lemma cur_tcb'_machine_state [simp]: + "cur_tcb' (ksMachineState_update f s) = cur_tcb' s" + by (simp add: cur_tcb'_def) + +lemma invs_no_0_obj'[elim!]: + "invs' s \ no_0_obj' s" + by (simp add: invs'_def valid_state'_def valid_pspace'_def) + +lemma invs'_gsCNodes_update[simp]: + "invs' (gsCNodes_update f s') = invs' s'" + apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def + bitmapQ_defs + valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def + cur_tcb'_def) + apply (cases "ksSchedulerAction s'") + apply (simp_all add: ct_in_state'_def tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def ct_not_inQ_def) + done + +lemma invs'_gsUserPages_update[simp]: + "invs' (gsUserPages_update f s') = invs' s'" + apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def + bitmapQ_defs + valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def + cur_tcb'_def) + apply (cases "ksSchedulerAction s'") + apply (simp_all add: ct_in_state'_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def ct_not_inQ_def) + done + +lemma invs_queues_tcb_in_cur_domain': + "\ ksReadyQueues s (d, p) = x # xs; invs' s; d = ksCurDomain s\ + \ tcb_in_cur_domain' x s" + apply (subgoal_tac "x \ set (ksReadyQueues s (d, p))") + apply (drule (1) valid_queues_obj_at'D[OF _ invs_queues]) + apply (auto simp: inQ_def tcb_in_cur_domain'_def elim: obj_at'_weakenE) + done + +lemma pred_tcb'_neq_contra: + "\ pred_tcb_at' proj P p s; pred_tcb_at' proj Q p s; \st. P st \ Q st \ \ False" + by (clarsimp simp: pred_tcb_at'_def obj_at'_def) + +lemma invs'_ksDomSchedule: + "invs' s \ KernelStateData_H.ksDomSchedule s = KernelStateData_H.ksDomSchedule (newKernelState undefined)" +unfolding invs'_def valid_state'_def by clarsimp + +lemma invs'_ksDomScheduleIdx: + "invs' s \ KernelStateData_H.ksDomScheduleIdx s < length (KernelStateData_H.ksDomSchedule (newKernelState undefined))" +unfolding invs'_def valid_state'_def by clarsimp + +lemma valid_bitmap_valid_bitmapQ_exceptE: + "\ valid_bitmapQ_except d p s ; (bitmapQ d p s \ ksReadyQueues s (d,p) \ []) ; + bitmapQ_no_L2_orphans s \ + \ valid_bitmapQ s" + unfolding valid_bitmapQ_def valid_bitmapQ_except_def + by force + +lemma valid_bitmap_valid_bitmapQ_exceptI[intro]: + "valid_bitmapQ s \ valid_bitmapQ_except d p s" + unfolding valid_bitmapQ_except_def valid_bitmapQ_def + by simp + +lemma mask_wordRadix_less_wordBits: + assumes sz: "wordRadix \ size w" + shows "unat ((w::'a::len word) && mask wordRadix) < wordBits" + using word_unat_mask_lt[where m=wordRadix and w=w] assms + by (simp add: wordRadix_def wordBits_def') + +lemma priority_mask_wordRadix_size: + "unat ((w::priority) && mask wordRadix) < wordBits" + by (rule mask_wordRadix_less_wordBits, simp add: wordRadix_def word_size) + +end +(* The normalise_obj_at' tactic was designed to simplify situations similar to: + ko_at' ko p s \ + obj_at' (complicated_P (obj_at' (complicated_Q (obj_at' ...)) p s)) p s + + It seems to also offer assistance in cases where there is lots of st_tcb_at', ko_at', obj_at' + confusion. If your goal looks like that kind of mess, try it out. It can help to not unfold + obj_at'_def which speeds up proofs. + *) +context begin + +private definition + "ko_at'_defn v \ ko_at' v" + +private lemma ko_at_defn_rewr: + "ko_at'_defn ko p s \ (obj_at' P p s = P ko)" + unfolding ko_at'_defn_def + by (auto simp: obj_at'_def) + +private lemma ko_at_defn_uniqueD: + "ko_at'_defn ko p s \ ko_at'_defn ko' p s \ ko' = ko" + unfolding ko_at'_defn_def + by (auto simp: obj_at'_def) + +private lemma ko_at_defn_pred_tcb_at': + "ko_at'_defn ko p s \ (pred_tcb_at' proj P p s = P (proj (tcb_to_itcb' ko)))" + by (auto simp: pred_tcb_at'_def ko_at_defn_rewr) + +private lemma ko_at_defn_ko_wp_at': + "ko_at'_defn ko p s \ (ko_wp_at' P p s = P (injectKO ko))" + by (clarsimp simp: ko_at'_defn_def obj_at'_real_def + ko_wp_at'_def project_inject) + +method normalise_obj_at' = + (clarsimp?, elim obj_at_ko_at'[folded ko_at'_defn_def, elim_format], + clarsimp simp: ko_at_defn_rewr ko_at_defn_pred_tcb_at' ko_at_defn_ko_wp_at', + ((drule(1) ko_at_defn_uniqueD)+)?, + clarsimp simp: ko_at'_defn_def) + +end + +add_upd_simps "invs' (gsUntypedZeroRanges_update f s)" + (obj_at'_real_def) +declare upd_simps[simp] + +lemma neq_out_intv: + "\ a \ b; b \ {a..a + c - 1} - {a} \ \ b \ {a..a + c - 1}" + by simp + +lemma rule_out_intv: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; a \ b \ + \ b \ mask_range a (objBitsKO obj)" + apply (drule(1) pspace_distinctD') + apply (subst (asm) ps_clear_def) + apply (drule_tac x = b in orthD2) + apply fastforce + apply (drule neq_out_intv) + apply (simp add: mask_def add_diff_eq) + apply (simp add: mask_def add_diff_eq) + done + +lemma ptr_range_mask_range: + "{ptr..ptr + 2 ^ bits - 1} = mask_range ptr bits" + unfolding mask_def + by simp + +lemma distinct_obj_range'_not_subset: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ \ obj_range' b obj' \ obj_range' a obj" + unfolding obj_range'_def + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule (3) rule_out_intv) + using is_aligned_no_overflow_mask + by fastforce + +lemma obj_range'_disjoint: + "\ ksPSpace s a = Some obj; ksPSpace s b = Some obj'; pspace_distinct' s; + pspace_aligned' s; a \ b \ + \ obj_range' a obj \ obj_range' b obj' = {}" + apply (frule_tac x=a in pspace_alignedD') + apply assumption + apply (frule_tac x=b in pspace_alignedD') + apply assumption + apply (frule_tac p=a and p'=b in aligned_mask_range_cases) + apply assumption + apply (fastforce dest: distinct_obj_range'_not_subset + simp: obj_range'_def) + done + +qualify AARCH64_H (in Arch) + +(* + Then idea with this class is to be able to generically constrain + predicates over pspace_storable values to are not of type VCPU, + this is useful for invariants such as obj_at' that are trivially + true (sort of) if the predicate and the function (in the Hoare triple) + manipulate different types of objects +*) + +class no_vcpu = pspace_storable + + assumes not_vcpu: "koType TYPE('a) \ ArchT AARCH64_H.VCPUT" + +instance tcb :: no_vcpu by intro_classes auto +instance endpoint :: no_vcpu by intro_classes auto +instance notification :: no_vcpu by intro_classes auto +instance cte :: no_vcpu by intro_classes auto +instance user_data :: no_vcpu by intro_classes auto +instance user_data_device :: no_vcpu by intro_classes auto + +end_qualify + +instantiation AARCH64_H.asidpool :: no_vcpu +begin +interpretation Arch . +instance by intro_classes auto +end + +instantiation AARCH64_H.pte :: no_vcpu +begin +interpretation Arch . +instance by intro_classes auto +end + +end diff --git a/proof/refine/AARCH64/Invocations_R.thy b/proof/refine/AARCH64/Invocations_R.thy new file mode 100644 index 0000000000..0bc901c2ef --- /dev/null +++ b/proof/refine/AARCH64/Invocations_R.thy @@ -0,0 +1,26 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Invocations_R +imports Bits_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma invocationType_eq[simp]: + "invocationType = invocation_type" + unfolding invocationType_def invocation_type_def Let_def + by (rule ext, simp) (metis from_to_enum maxBound_is_bound') + +lemma genInvocationType_eq[simp]: + "genInvocationType = gen_invocation_type" + by (rule ext) (simp add: genInvocationType_def gen_invocation_type_def) + +end + +declare resolveAddressBits.simps[simp del] + +end diff --git a/proof/refine/AARCH64/IpcCancel_R.thy b/proof/refine/AARCH64/IpcCancel_R.thy new file mode 100644 index 0000000000..64225e6774 --- /dev/null +++ b/proof/refine/AARCH64/IpcCancel_R.thy @@ -0,0 +1,2844 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory IpcCancel_R +imports + Schedule_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +crunch aligned'[wp]: cancelAllIPC pspace_aligned' + (wp: crunch_wps mapM_x_wp' simp: unless_def) +crunch distinct'[wp]: cancelAllIPC pspace_distinct' + (wp: crunch_wps mapM_x_wp' simp: unless_def) + +crunch aligned'[wp]: cancelAllSignals pspace_aligned' + (wp: crunch_wps mapM_x_wp') +crunch distinct'[wp]: cancelAllSignals pspace_distinct' + (wp: crunch_wps mapM_x_wp') + +lemma cancelSignal_simple[wp]: + "\\\ cancelSignal t ntfn \\rv. st_tcb_at' simple' t\" + apply (simp add: cancelSignal_def Let_def) + apply (wp setThreadState_st_tcb | simp)+ + done + +lemma cancelSignal_pred_tcb_at': + "\pred_tcb_at' proj P t' and K (t \ t')\ + cancelSignal t ntfnptr + \\rv. pred_tcb_at' proj P t'\" + apply (simp add: cancelSignal_def) + apply (wp sts_pred_tcb_neq' getNotification_wp | wpc | clarsimp)+ + done + +crunch pred_tcb_at'[wp]: emptySlot "pred_tcb_at' proj P t" + (wp: setCTE_pred_tcb_at') + +(* valid_queues is too strong *) +definition valid_inQ_queues :: "KernelStateData_H.kernel_state \ bool" where + "valid_inQ_queues \ + \s. \d p. (\t\set (ksReadyQueues s (d, p)). obj_at' (inQ d p) t s) \ distinct (ksReadyQueues s (d, p))" + +lemma valid_inQ_queues_ksSchedulerAction_update[simp]: + "valid_inQ_queues (ksSchedulerAction_update f s) = valid_inQ_queues s" + by (simp add: valid_inQ_queues_def) + +lemma valid_inQ_queues_ksReadyQueuesL1Bitmap_upd[simp]: + "valid_inQ_queues (ksReadyQueuesL1Bitmap_update f s) = valid_inQ_queues s" + unfolding valid_inQ_queues_def + by simp + +lemma valid_inQ_queues_ksReadyQueuesL2Bitmap_upd[simp]: + "valid_inQ_queues (ksReadyQueuesL2Bitmap_update f s) = valid_inQ_queues s" + unfolding valid_inQ_queues_def + by simp + +defs capHasProperty_def: + "capHasProperty ptr P \ cte_wp_at' (\c. P (cteCap c)) ptr" + +end + +(* Assume various facts about cteDeleteOne, proved in Finalise_R *) +locale delete_one_conc_pre = + assumes delete_one_st_tcb_at: + "\P. (\st. simple' st \ P st) \ + \st_tcb_at' P t\ cteDeleteOne slot \\rv. st_tcb_at' P t\" + assumes delete_one_typ_at: + "\P. \\s. P (typ_at' T p s)\ cteDeleteOne slot \\rv s. P (typ_at' T p s)\" + assumes delete_one_aligned: + "\pspace_aligned'\ cteDeleteOne slot \\rv. pspace_aligned'\" + assumes delete_one_distinct: + "\pspace_distinct'\ cteDeleteOne slot \\rv. pspace_distinct'\" + assumes delete_one_it: + "\P. \\s. P (ksIdleThread s)\ cteDeleteOne cap \\rv s. P (ksIdleThread s)\" + assumes delete_one_queues: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + cteDeleteOne sl \\rv. Invariants_H.valid_queues\" + assumes delete_one_inQ_queues: + "\valid_inQ_queues\ cteDeleteOne sl \\rv. valid_inQ_queues\" + assumes delete_one_sch_act_simple: + "\sch_act_simple\ cteDeleteOne sl \\rv. sch_act_simple\" + assumes delete_one_sch_act_not: + "\t. \sch_act_not t\ cteDeleteOne sl \\rv. sch_act_not t\" + assumes delete_one_reply_st_tcb_at: + "\P t. \\s. st_tcb_at' P t s \ (\t' r. cte_wp_at' (\cte. cteCap cte = ReplyCap t' False r) slot s)\ + cteDeleteOne slot + \\rv. st_tcb_at' P t\" + assumes delete_one_ksCurDomain: + "\P. \\s. P (ksCurDomain s)\ cteDeleteOne sl \\_ s. P (ksCurDomain s)\" + assumes delete_one_tcbDomain_obj_at': + "\P. \obj_at' (\tcb. P (tcbDomain tcb)) t'\ cteDeleteOne slot \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + +lemma (in delete_one_conc_pre) cancelIPC_simple[wp]: + "\\\ cancelIPC t \\rv. st_tcb_at' simple' t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def + cong: Structures_H.thread_state.case_cong list.case_cong) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule hoare_pre) + apply (wpc + | wp sts_st_tcb_at'_cases hoare_vcg_conj_lift + hoare_vcg_const_imp_lift delete_one_st_tcb_at + threadSet_pred_tcb_no_state + hoare_strengthen_post [OF cancelSignal_simple] + | simp add: o_def if_fun_split + | rule hoare_drop_imps + | clarsimp elim!: pred_tcb'_weakenE)+ + apply (auto simp: pred_tcb_at' + elim!: pred_tcb'_weakenE) + done + +lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at': + "\st_tcb_at' P t' and K (t \ t')\ + cancelIPC t + \\rv. st_tcb_at' P t'\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv + capHasProperty_def isCap_simps) + apply (wp sts_pred_tcb_neq' hoare_drop_imps delete_one_reply_st_tcb_at + | wpc | clarsimp)+ + apply (wp getCTE_wp | clarsimp)+ + apply (wp hoare_vcg_ex_lift threadSet_cte_wp_at' hoare_vcg_imp_lift + cancelSignal_pred_tcb_at' sts_pred_tcb_neq' getEndpoint_wp gts_wp' + threadSet_pred_tcb_no_state + | wpc | clarsimp)+ + apply (auto simp: cte_wp_at_ctes_of isCap_simps) + done + +context begin interpretation Arch . +crunch typ_at'[wp]: emptySlot "\s. P (typ_at' T p s)" +end + +crunch tcb_at'[wp]: cancelSignal "tcb_at' t" + (wp: crunch_wps simp: crunch_simps) + +context delete_one_conc_pre +begin + +lemmas delete_one_typ_ats[wp] = typ_at_lifts [OF delete_one_typ_at] + +lemma cancelIPC_tcb_at'[wp]: + "\tcb_at' t\ cancelIPC t' \\_. tcb_at' t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) + apply (wp delete_one_typ_ats hoare_drop_imps + | simp add: o_def if_apply_def2 | wpc | assumption)+ + done + +end + +declare delete_remove1 [simp] +declare delete.simps [simp del] + +lemma invs_weak_sch_act_wf[elim!]: + "invs' s \ weak_sch_act_wf (ksSchedulerAction s) s" + apply (drule invs_sch_act_wf') + apply (clarsimp simp: weak_sch_act_wf_def) + done + +lemma blocked_cancelIPC_corres: + "\ st = Structures_A.BlockedOnReceive epPtr p' \ + st = Structures_A.BlockedOnSend epPtr p; thread_state_relation st st' \ \ + corres dc (invs and st_tcb_at ((=) st) t) (invs' and st_tcb_at' ((=) st') t) + (blocked_cancel_ipc st t) + (do ep \ getEndpoint epPtr; + y \ assert (\ (case ep of IdleEP \ True | _ \ False)); + ep' \ + if remove1 t (epQueue ep) = [] then return IdleEP + else + return $ epQueue_update (%_. (remove1 t (epQueue ep))) ep; + y \ setEndpoint epPtr ep'; + setThreadState Structures_H.thread_state.Inactive t + od)" + apply (simp add: blocked_cancel_ipc_def gbep_ret) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres]) + apply (rule_tac F="ep \ IdleEP" in corres_gen_asm2) + apply (rule corres_assert_assume[rotated]) + apply (clarsimp split: endpoint.splits) + apply (rule_tac P="invs and st_tcb_at ((=) st) t" and + P'="invs' and st_tcb_at' ((=) st') t" in corres_inst) + apply (case_tac rv) + apply (simp add: ep_relation_def) + apply (simp add: get_ep_queue_def ep_relation_def split del: if_split) + apply (rename_tac list) + apply (case_tac "remove1 t list") + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (wp weak_sch_act_wf_lift)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (simp add: get_ep_queue_def ep_relation_def split del: if_split) + apply (rename_tac list) + apply (case_tac "remove1 t list") + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (wp weak_sch_act_wf_lift)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply (clarsimp simp: pred_tcb_at') + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (wp getEndpoint_wp)+ + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (erule pspace_valid_objsE) + apply fastforce + apply (auto simp: valid_tcb_state_def valid_tcb_def + valid_obj_def obj_at_def)[1] + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarify + apply (drule ko_at_valid_objs') + apply fastforce + apply simp + apply (auto simp add: valid_obj'_def valid_tcb'_def + valid_tcb_state'_def)[1] + apply (fastforce simp: ko_wp_at'_def obj_at'_def dest: sym_refs_st_tcb_atD') + done + +lemma cancelSignal_corres: + "corres dc + (invs and st_tcb_at ((=) (Structures_A.BlockedOnNotification ntfn)) t) + (invs' and st_tcb_at' ((=) (BlockedOnNotification ntfn)) t) + (cancel_signal t ntfn) + (cancelSignal t ntfn)" + apply (simp add: cancel_signal_def cancelSignal_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule_tac F="isWaitingNtfn (ntfnObj ntfnaa)" in corres_gen_asm2) + apply (case_tac "ntfn_obj ntfna") + apply (simp add: ntfn_relation_def isWaitingNtfn_def) + apply (simp add: isWaitingNtfn_def ntfn_relation_def split del: if_split) + apply (rename_tac list) + apply (rule_tac R="remove1 t list = []" in corres_cases) + apply (simp del: dc_simp) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (simp add: list_case_If del: dc_simp) + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp add: ntfn_relation_def neq_Nil_conv) + apply (rule setThreadState_corres) + apply simp + apply (wp)+ + apply (simp add: isWaitingNtfn_def ntfn_relation_def) + apply (wp getNotification_wp)+ + apply (clarsimp simp: conj_comms st_tcb_at_tcb_at) + apply (clarsimp simp: st_tcb_at_def obj_at_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (erule pspace_valid_objsE) + apply fastforce + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_tcb_state_def) + apply (drule sym, simp add: obj_at_def) + apply (clarsimp simp: conj_comms pred_tcb_at' cong: conj_cong) + apply (rule conjI) + apply (simp add: pred_tcb_at'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (frule ko_at_valid_objs') + apply fastforce + apply simp + apply (clarsimp simp: valid_obj'_def valid_tcb'_def valid_tcb_state'_def) + apply (drule sym, simp) + apply (clarsimp simp: invs_weak_sch_act_wf) + apply (drule sym_refs_st_tcb_atD', fastforce) + apply (fastforce simp: isWaitingNtfn_def ko_wp_at'_def obj_at'_def + ntfn_bound_refs'_def + split: Structures_H.notification.splits ntfn.splits option.splits) + done + +lemma cte_map_tcb_2: + "cte_map (t, tcb_cnode_index 2) = t + 2*2^cte_level_bits" + by (simp add: cte_map_def tcb_cnode_index_def to_bl_1 shiftl_t2n) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cte_wp_at_master_reply_cap_to_ex_rights: + "cte_wp_at (is_master_reply_cap_to t) ptr + = (\s. \rights. cte_wp_at ((=) (cap.ReplyCap t True rights)) ptr s)" + by (rule ext, rule iffI; clarsimp simp: cte_wp_at_def is_master_reply_cap_to_def) + +lemma cte_wp_at_reply_cap_to_ex_rights: + "cte_wp_at (is_reply_cap_to t) ptr + = (\s. \rights. cte_wp_at ((=) (cap.ReplyCap t False rights)) ptr s)" + by (rule ext, rule iffI; clarsimp simp: cte_wp_at_def is_reply_cap_to_def) + +lemma reply_no_descendants_mdbNext_null: + assumes descs: "descendants_of (t, tcb_cnode_index 2) (cdt s) = {}" + and sr: "(s, s') \ state_relation" + and invs: "valid_reply_caps s" "valid_reply_masters s" + "valid_objs s" "valid_mdb s" "valid_mdb' s'" "pspace_aligned' s'" + "pspace_distinct' s'" + and tcb: "st_tcb_at (Not \ halted) t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + shows "mdbNext (cteMDBNode cte) = nullPointer" +proof - + from invs st_tcb_at_reply_cap_valid[OF tcb] + have "cte_wp_at (is_master_reply_cap_to t) (t, tcb_cnode_index 2) s" + by (fastforce simp: cte_wp_at_caps_of_state is_cap_simps is_master_reply_cap_to_def) + + hence "\r. cteCap cte = capability.ReplyCap t True r" + using invs sr + by (fastforce simp: cte_wp_at_master_reply_cap_to_ex_rights shiftl_t2n cte_index_repair + cte_wp_at_ctes_of cte cte_map_def tcb_cnode_index_def + dest: pspace_relation_cte_wp_at state_relation_pspace_relation) + + hence class_link: + "\cte'. ctes_of s' (mdbNext (cteMDBNode cte)) = Some cte' \ + capClass (cteCap cte') = ReplyClass t" + using invs + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) + apply (drule class_linksD[where m="ctes_of s'", OF cte]) + apply (simp add: mdb_next_unfold cte) + apply assumption + apply simp + done + + from invs tcb descs have "\ptr m g. + cte_wp_at ((=) (cap.ReplyCap t m g)) ptr s \ ptr = (t, tcb_cnode_index 2)" + apply (intro allI impI) + apply (case_tac m) + apply (fastforce simp: invs_def valid_state_def valid_reply_masters_def + cte_wp_at_master_reply_cap_to_ex_rights) + apply (fastforce simp: has_reply_cap_def cte_wp_at_reply_cap_to_ex_rights + dest: reply_master_no_descendants_no_reply elim: st_tcb_at_tcb_at) + done + hence "\ptr m mdb r. + ctes_of s' ptr = Some (CTE (capability.ReplyCap t m r) mdb) \ ptr = t + 2*2^cte_level_bits" + using sr invs + apply (intro allI impI) + apply (drule(2) pspace_relation_cte_wp_atI + [OF state_relation_pspace_relation]) + apply (elim exE, case_tac c, simp_all del: split_paired_All) + apply (elim allE, erule impE, fastforce) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def shiftl_t2n) + done + hence class_unique: + "\ptr cte'. ctes_of s' ptr = Some cte' \ + capClass (cteCap cte') = ReplyClass t \ + ptr = t + 2*2^cte_level_bits" + apply (intro allI impI) + apply (case_tac cte', rename_tac cap node, case_tac cap, simp_all) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all) + done + + from invs have no_null: "ctes_of s' nullPointer = None" + by (clarsimp simp: no_0_def nullPointer_def valid_mdb'_def valid_mdb_ctes_def) + + from invs cte have no_loop: "mdbNext (cteMDBNode cte) \ t + 2*2^cte_level_bits" + by (fastforce simp: mdb_next_rel_def mdb_next_def + valid_mdb'_def + dest: valid_mdb_no_loops no_loops_direct_simp) + + from invs cte have + "mdbNext (cteMDBNode cte) \ nullPointer \ + (\cte'. ctes_of s' (mdbNext (cteMDBNode cte)) = Some cte')" + by (fastforce simp: valid_mdb'_def valid_mdb_ctes_def nullPointer_def + elim: valid_dlistEn) + hence + "mdbNext (cteMDBNode cte) \ nullPointer \ + mdbNext (cteMDBNode cte) = t + 2*2^cte_level_bits" + using class_link class_unique + by clarsimp + thus ?thesis + by (simp add: no_loop) +qed + +lemma reply_descendants_mdbNext_nonnull: + assumes descs: "descendants_of (t, tcb_cnode_index 2) (cdt s) \ {}" + and sr: "(s, s') \ state_relation" + and tcb: "st_tcb_at (Not \ halted) t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + shows "mdbNext (cteMDBNode cte) \ nullPointer" +proof - + from tcb have "cte_at (t, tcb_cnode_index 2) s" + by (simp add: st_tcb_at_tcb_at tcb_at_cte_at dom_tcb_cap_cases) + hence "descendants_of' (t + 2*2^cte_level_bits) (ctes_of s') \ {}" + using sr descs + by (fastforce simp: state_relation_def cdt_relation_def cte_map_def tcb_cnode_index_def shiftl_t2n mult_ac) + thus ?thesis + using cte unfolding nullPointer_def + by (fastforce simp: descendants_of'_def dest: subtree_next_0) +qed + +lemma reply_descendants_of_mdbNext: + "\ (s, s') \ state_relation; valid_reply_caps s; valid_reply_masters s; + valid_objs s; valid_mdb s; valid_mdb' s'; pspace_aligned' s'; + pspace_distinct' s'; st_tcb_at (Not \ halted) t s; + ctes_of s' (t + 2*2^cte_level_bits) = Some cte \ \ + (descendants_of (t, tcb_cnode_index 2) (cdt s) = {}) = + (mdbNext (cteMDBNode cte) = nullPointer)" + apply (case_tac "descendants_of (t, tcb_cnode_index 2) (cdt s) = {}") + apply (simp add: reply_no_descendants_mdbNext_null) + apply (simp add: reply_descendants_mdbNext_nonnull) + done + +lemma reply_mdbNext_is_descendantD: + assumes sr: "(s, s') \ state_relation" + and invs: "invs' s'" + and tcb: "tcb_at t s" + and cte: "ctes_of s' (t + 2*2^cte_level_bits) = Some cte" + and desc: "descendants_of (t, tcb_cnode_index 2) (cdt s) = {sl}" + shows "mdbNext (cteMDBNode cte) = cte_map sl" +proof - + from tcb have "cte_at (t, tcb_cnode_index 2) s" + by (simp add: tcb_at_cte_at dom_tcb_cap_cases) + hence "descendants_of' (t + 2*2^cte_level_bits) (ctes_of s') = {cte_map sl}" + using sr desc + by (fastforce simp: state_relation_def cdt_relation_def cte_map_def tcb_cnode_index_def shiftl_t2n mult_ac) + thus ?thesis + using cte invs + apply (clarsimp simp: descendants_of'_def) + apply (frule singleton_eqD, drule CollectD) + apply (erule subtree.cases) + apply (clarsimp simp: mdb_next_rel_def mdb_next_def) + apply (subgoal_tac "c' = cte_map sl") + apply (fastforce dest: invs_no_loops no_loops_direct_simp) + apply fastforce + done +qed +end + +locale delete_one_conc = delete_one_conc_pre + + assumes delete_one_invs: + "\p. \invs'\ cteDeleteOne p \\rv. invs'\" + +locale delete_one = delete_one_conc + delete_one_abs + + assumes delete_one_corres: + "corres dc (einvs and cte_wp_at can_fast_finalise ptr) + (invs' and cte_at' (cte_map ptr)) + (cap_delete_one ptr) (cteDeleteOne (cte_map ptr))" + +lemma (in delete_one) cancelIPC_ReplyCap_corres: + "corres dc (einvs and st_tcb_at awaiting_reply t) + (invs' and st_tcb_at' awaiting_reply' t) + (reply_cancel_ipc t) + (do y \ threadSet (\tcb. tcb \ tcbFault := None \) t; + slot \ getThreadReplySlot t; + callerCap \ liftM (mdbNext \ cteMDBNode) (getCTE slot); + when (callerCap \ nullPointer) (do + y \ stateAssert (capHasProperty callerCap (\cap. isReplyCap cap + \ \ capReplyMaster cap)) + []; + cteDeleteOne callerCap + od) + od)" + proof - + interpret Arch . (*FIXME: arch_split*) + show ?thesis + apply (simp add: reply_cancel_ipc_def getThreadReplySlot_def + locateSlot_conv liftM_def tcbReplySlot_def + del: split_paired_Ex) + apply (rule_tac Q="\_. invs and valid_list and valid_sched and st_tcb_at awaiting_reply t" + and Q'="\_. invs' and st_tcb_at' awaiting_reply' t" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule threadset_corresT) + apply (simp add: tcb_relation_def fault_rel_optionation_def) + apply (simp add: tcb_cap_cases_def) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: exst_same_def) + apply (fastforce simp: st_tcb_at_tcb_at) + apply clarsimp + defer + apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state + threadSet_invs_trivial threadSet_pred_tcb_no_state thread_set_not_state_valid_sched + | fastforce simp: tcb_cap_cases_def inQ_def + | wp (once) sch_act_simple_lift)+ + apply (rule corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule get_cap_corres [where cslot_ptr="(t, tcb_cnode_index 2)", + simplified cte_map_tcb_2 cte_index_repair_sym]) + apply (clarsimp dest!: st_tcb_at_tcb_at + tcb_at_cte_at [where ref="tcb_cnode_index 2"]) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + defer + apply (rule hoare_vcg_conj_lift [OF get_cap_inv get_cap_cte_wp_at, simplified]) + apply (rule hoare_vcg_conj_lift [OF getCTE_inv getCTE_cte_wp_at, simplified]) + apply (rename_tac cte) + apply (rule corres_symb_exec_l [OF _ _ gets_sp]) + apply (rule_tac F="\r. cap = cap.ReplyCap t True r \ + cteCap cte = capability.ReplyCap t True (AllowGrant \ r)" in corres_req) + apply (fastforce simp: cte_wp_at_caps_of_state is_cap_simps + dest!: st_tcb_at_reply_cap_valid) + apply (rule_tac F="(descs = {}) = (mdbNext (cteMDBNode cte) = nullPointer)" + in corres_req) + apply (fastforce simp: st_tcb_at_tcb_at cte_wp_at_ctes_of st_tcb_def2 cte_index_repair + dest: reply_descendants_of_mdbNext) + apply (elim exE) + apply (case_tac "descs = {}", simp add: when_def) + apply (rule_tac F="\sl. descs = {sl}" in corres_req) + apply (fastforce intro: st_tcb_at_tcb_at dest: reply_master_one_descendant) + apply (erule exE, frule singleton_eqD) + apply (rule_tac F="mdbNext (cteMDBNode cte) = cte_map sl" in corres_req) + apply (clarsimp dest!: st_tcb_at_tcb_at) + apply (fastforce simp: cte_wp_at_ctes_of cte_level_bits_def + elim!: reply_mdbNext_is_descendantD) + apply (simp add: when_def getSlotCap_def capHasProperty_def + del: split_paired_Ex) + apply (rule corres_guard_imp) + apply (rule_tac P'="\s. \r'. cte_wp_at ((=) (cap.ReplyCap t False r')) sl s" + in corres_stateAssert_implied [OF delete_one_corres]) + apply (fastforce dest: pspace_relation_cte_wp_at + state_relation_pspace_relation + simp: cte_wp_at_ctes_of isCap_simps) + apply (fastforce simp: invs_def valid_state_def valid_mdb_def reply_mdb_def + reply_masters_mdb_def cte_wp_at_caps_of_state + can_fast_finalise_def) + apply (fastforce simp: valid_mdb'_def valid_mdb_ctes_def + cte_wp_at_ctes_of nullPointer_def + elim: valid_dlistEn dest: invs_mdb') + apply (simp add: exs_valid_def gets_def get_def return_def bind_def + del: split_paired_Ex split_paired_All) + apply (wp) + done +qed + +lemma (in delete_one) cancel_ipc_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (cancel_ipc t) (cancelIPC t)" + apply (simp add: cancel_ipc_def cancelIPC_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac P="einvs and st_tcb_at ((=) state) t" and + P'="invs' and st_tcb_at' ((=) statea) t" in corres_inst) + apply (case_tac state, simp_all add: isTS_defs list_case_If)[1] + apply (rule corres_guard_imp) + apply (rule blocked_cancelIPC_corres) + apply fastforce + apply fastforce + apply simp + apply simp + apply (clarsimp simp add: isTS_defs list_case_If) + apply (rule corres_guard_imp) + apply (rule blocked_cancelIPC_corres) + apply fastforce + apply fastforce + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule cancelIPC_ReplyCap_corres) + apply (clarsimp elim!: st_tcb_weakenE) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply (rule corres_guard_imp [OF cancelSignal_corres], simp+) + apply (wp gts_sp[where P="\",simplified])+ + apply (rule hoare_strengthen_post) + apply (rule gts_sp'[where P="\"]) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply fastforce + apply simp + done + +lemma setNotification_utr[wp]: + "\untyped_ranges_zero'\ setNotification ntfn nobj \\rv. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +crunch gsUntypedZeroRanges[wp]: setEndpoint "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma setEndpoint_utr[wp]: + "\untyped_ranges_zero'\ setEndpoint p ep \\rv. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +declare cart_singleton_empty [simp] +declare cart_singleton_empty2[simp] + +crunch ksQ[wp]: setNotification "\s. P (ksReadyQueues s p)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +lemma sch_act_simple_not_t[simp]: "sch_act_simple s \ sch_act_not t s" + by (clarsimp simp: sch_act_simple_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma cancelSignal_invs': + "\invs' and st_tcb_at' (\st. st = BlockedOnNotification ntfn) t and sch_act_not t\ + cancelSignal t ntfn \\rv. invs'\" + proof - + have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ + \ \x. t \ set (ksReadyQueues s x)" + apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def + valid_queues_no_bitmap_def) + apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ + done + have NTFNSN: "\ntfn ntfn'. + \\s. sch_act_not (ksCurThread s) s \ setNotification ntfn ntfn' + \\_ s. sch_act_not (ksCurThread s) s\" + apply (rule hoare_weaken_pre) + apply (wps setNotification_ksCurThread) + apply (wp, simp) + done + show ?thesis + apply (simp add: cancelSignal_def invs'_def valid_state'_def Let_def) + apply (wp valid_irq_node_lift sts_sch_act' irqs_masked_lift + hoare_vcg_all_lift [OF setNotification_ksQ] sts_valid_queues + setThreadState_ct_not_inQ NTFNSN + hoare_vcg_all_lift setNotification_ksQ + | simp add: valid_tcb_state'_def list_case_If split del: if_split)+ + prefer 2 + apply assumption + apply (rule hoare_strengthen_post) + apply (rule get_ntfn_sp') + apply (rename_tac rv s) + apply (clarsimp simp: pred_tcb_at') + apply (frule NIQ) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: valid_ntfn'_def) + apply (case_tac "ntfnObj rv", simp_all add: isWaitingNtfn_def) + apply (frule ko_at_valid_objs') + apply (simp add: valid_pspace_valid_objs') + apply (clarsimp simp: projectKO_opt_ntfn split: kernel_object.splits) + apply (simp add: valid_obj'_def valid_ntfn'_def) + apply (frule st_tcb_at_state_refs_ofD') + apply (frule ko_at_state_refs_ofD') + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp simp: ntfn_bound_refs'_def split: if_split_asm) + apply (clarsimp split: if_split_asm) + subgoal + by (safe; simp add: ntfn_bound_refs'_def tcb_bound_refs'_def + obj_at'_def tcb_ntfn_is_bound'_def + split: option.splits) + subgoal + by (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def + tcb_bound_refs'_def) + subgoal + by (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def + tcb_bound_refs'_def ntfn_q_refs_of'_def remove1_empty + split: ntfn.splits) + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (fastforce simp: sym_refs_def dest!: idle'_no_refs) + apply (case_tac "ntfnObj rv", simp_all) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (rule conjI, clarsimp split: option.splits) + apply (frule st_tcb_at_state_refs_ofD') + apply (frule ko_at_state_refs_ofD') + apply (rule conjI) + apply (erule delta_sym_refs) + apply (fastforce simp: ntfn_bound_refs'_def split: if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def + set_eq_subset) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def + set_eq_subset) + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (rule conjI) + apply (case_tac "ntfnBoundTCB rv") + apply (clarsimp elim!: if_live_state_refsE)+ + apply (clarsimp dest!: idle'_no_refs) + done + qed + +lemmas setEndpoint_valid_arch[wp] + = valid_arch_state_lift' [OF setEndpoint_typ_at' set_ep_arch'] + +lemma ep_redux_simps3: + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ RecvEP (y # ys)) + = (set xs \ {EPRecv})" + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ SendEP (y # ys)) + = (set xs \ {EPSend})" + by (fastforce split: list.splits simp: valid_ep_def valid_ntfn_def)+ + +declare setEndpoint_ksMachine [wp] +declare setEndpoint_valid_irq_states' [wp] + +lemma setEndpoint_vms[wp]: + "\valid_machine_state'\ setEndpoint epptr ep' \\_. valid_machine_state'\" + by (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + +crunch ksQ[wp]: setEndpoint "\s. P (ksReadyQueues s p)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +crunch ksCurDomain[wp]: setEndpoint "\s. P (ksCurDomain s)" + (wp: setObject_ep_cur_domain) + +lemma setEndpoint_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setEndpoint ptr ep \\_ s. P (ksDomSchedule s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ setEndpoint ptr ep \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift setObject_ep_ct + | rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setEndpoint_def)+ + done + +lemma setEndpoint_ct_not_inQ[wp]: + "\ct_not_inQ\ setEndpoint eeptr ep' \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setEndpoint_nosch]) + apply (simp add: setEndpoint_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_ep_ct) + apply (wp obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setEndpoint_ksDomScheduleIdx[wp]: + "\\s. P (ksDomScheduleIdx s)\ setEndpoint ptr ep \\_ s. P (ksDomScheduleIdx s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done +end + +lemma (in delete_one_conc) cancelIPC_invs[wp]: + shows "\tcb_at' t and invs'\ cancelIPC t \\rv. invs'\" +proof - + have P: "\xs v f. (case xs of [] \ return v | y # ys \ return (f (y # ys))) + = return (case xs of [] \ v | y # ys \ f xs)" + by (clarsimp split: list.split) + have NIQ: "\s. \ Invariants_H.valid_queues s; st_tcb_at' (Not \ runnable') t s \ + \ \x. t \ set (ksReadyQueues s x)" + apply (clarsimp simp add: pred_tcb_at'_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def) + apply (drule spec | drule(1) bspec | clarsimp simp: obj_at'_def inQ_def)+ + done + have EPSCHN: "\eeptr ep'. \\s. sch_act_not (ksCurThread s) s\ + setEndpoint eeptr ep' + \\_ s. sch_act_not (ksCurThread s) s\" + apply (rule hoare_weaken_pre) + apply (wps setEndpoint_ct') + apply (wp, simp) + done + have Q: + "\epptr. \st_tcb_at' (\st. \a. (st = BlockedOnReceive epptr a) + \ (\a b c d. st = BlockedOnSend epptr a b c d)) t + and invs'\ + do ep \ getEndpoint epptr; + y \ assert (\ (case ep of IdleEP \ True | _ \ False)); + ep' \ case remove1 t (epQueue ep) + of [] \ return Structures_H.endpoint.IdleEP + | x # xs \ return (epQueue_update (%_. x # xs) ep); + y \ setEndpoint epptr ep'; + setThreadState Inactive t + od \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (subst P) + apply (wp valid_irq_node_lift valid_global_refs_lift' + irqs_masked_lift sts_sch_act' + hoare_vcg_all_lift [OF setEndpoint_ksQ] + sts_valid_queues setThreadState_ct_not_inQ EPSCHN + hoare_vcg_all_lift setNotification_ksQ getEndpoint_wp + | simp add: valid_tcb_state'_def split del: if_split + | wpc)+ + apply (clarsimp simp: pred_tcb_at' fun_upd_def[symmetric] conj_comms + split del: if_split cong: if_cong) + apply (rule conjI, clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def) + apply (rule conjI) + apply (clarsimp simp: obj_at'_def valid_ep'_def + dest!: pred_tcb_at') + apply (frule NIQ) + apply (erule pred_tcb'_weakenE, fastforce) + apply (clarsimp, rule conjI) + apply (auto simp: pred_tcb_at'_def obj_at'_def)[1] + apply (rule conjI) + apply (clarsimp split: Structures_H.endpoint.split_asm list.split + simp: valid_ep'_def) + apply (rename_tac list x xs) + apply (frule distinct_remove1[where x=t]) + apply (cut_tac xs=list in set_remove1_subset[where x=t]) + apply auto[1] + apply (rename_tac list x xs) + apply (frule distinct_remove1[where x=t]) + apply (cut_tac xs=list in set_remove1_subset[where x=t]) + apply auto[1] + apply (thin_tac "sym_refs (state_hyp_refs_of' s)" for s) + apply (frule(1) sym_refs_ko_atD') + apply (rule conjI) + apply (clarsimp elim!: if_live_state_refsE split: Structures_H.endpoint.split_asm) + apply (drule st_tcb_at_state_refs_ofD') + apply (clarsimp simp: ep_redux_simps3 valid_ep'_def + split: Structures_H.endpoint.split_asm + cong: list.case_cong) + apply (frule_tac x=t in distinct_remove1) + apply (frule_tac x=t in set_remove1_eq) + by (auto elim!: delta_sym_refs + simp: symreftype_inverse' tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits if_split_asm) + have R: + "\invs' and tcb_at' t\ + do y \ threadSet (\tcb. tcb \ tcbFault := None \) t; + slot \ getThreadReplySlot t; + callerCap \ liftM (mdbNext \ cteMDBNode) (getCTE slot); + when (callerCap \ nullPointer) (do + y \ stateAssert (capHasProperty callerCap (\cap. isReplyCap cap + \ \ capReplyMaster cap)) + []; + cteDeleteOne callerCap + od) + od + \\rv. invs'\" + unfolding getThreadReplySlot_def + by (wp valid_irq_node_lift delete_one_invs hoare_drop_imps + threadSet_invs_trivial irqs_masked_lift + | simp add: o_def if_apply_def2 + | fastforce simp: inQ_def)+ + show ?thesis + apply (simp add: cancelIPC_def crunch_simps + cong: if_cong list.case_cong) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (case_tac state, + simp_all add: isTS_defs) + apply (safe intro!: hoare_weaken_pre[OF Q] + hoare_weaken_pre[OF R] + hoare_weaken_pre[OF return_wp] + hoare_weaken_pre[OF cancelSignal_invs'] + elim!: pred_tcb'_weakenE) + apply (auto simp: pred_tcb_at'_def obj_at'_def + dest: invs_sch_act_wf') + done +qed + +lemma (in delete_one_conc_pre) cancelIPC_sch_act_simple[wp]: + "\sch_act_simple\ + cancelIPC t + \\rv. sch_act_simple\" + apply (simp add: cancelIPC_def cancelSignal_def Let_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (wp hoare_drop_imps delete_one_sch_act_simple + | simp add: getThreadReplySlot_def | wpcw + | rule sch_act_simple_lift + | (rule_tac Q="\rv. sch_act_simple" in hoare_post_imp, simp))+ + done + +lemma cancelSignal_st_tcb_at: + assumes x[simp]: "P Inactive" shows + "\st_tcb_at' P t\ + cancelSignal t' ntfn + \\rv. st_tcb_at' P t\" + apply (simp add: cancelSignal_def Let_def list_case_If) + apply (wp sts_st_tcb_at'_cases hoare_vcg_const_imp_lift + hoare_drop_imp[where R="%rv s. P' rv" for P']) + apply clarsimp+ + done + +lemma (in delete_one_conc_pre) cancelIPC_st_tcb_at: + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ + cancelIPC t' + \\rv. st_tcb_at' P t\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (case_tac x, simp_all add: isTS_defs list_case_If) + apply (wp sts_st_tcb_at'_cases delete_one_st_tcb_at + threadSet_pred_tcb_no_state + cancelSignal_st_tcb_at hoare_drop_imps + | clarsimp simp: o_def if_fun_split)+ + done + +lemma weak_sch_act_wf_lift_linear: + "\ \t. \\s. sa s \ SwitchToThread t\ f \\rv s. sa s \ SwitchToThread t\; + \t. \st_tcb_at' runnable' t\ f \\rv. st_tcb_at' runnable' t\; + \t. \tcb_in_cur_domain' t\ f \\rv. tcb_in_cur_domain' t\ \ + \ \\s. weak_sch_act_wf (sa s) s\ f \\rv s. weak_sch_act_wf (sa s) s\" + apply (simp only: weak_sch_act_wf_def imp_conv_disj) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_conj_lift) + apply simp_all + done + +lemma sts_sch_act_not[wp]: + "\sch_act_not t\ setThreadState st t' \\rv. sch_act_not t\" + apply (simp add: setThreadState_def rescheduleRequired_def) + apply (wp hoare_drop_imps | simp | wpcw)+ + done + +crunches cancelSignal, setBoundNotification + for sch_act_not[wp]: "sch_act_not t" + (wp: crunch_wps) + +lemma cancelSignal_tcb_at_runnable': + "t \ t' \ + \st_tcb_at' runnable' t'\ cancelSignal t ntfnptr \\_. st_tcb_at' runnable' t'\" + unfolding cancelSignal_def + by (wpsimp wp: sts_pred_tcb_neq' hoare_drop_imp) + +lemma cancelAllIPC_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cancelAllIPC epptr \\_. st_tcb_at' runnable' t\" + unfolding cancelAllIPC_def + by (wpsimp wp: mapM_x_wp' sts_st_tcb' hoare_drop_imp) + +lemma cancelAllSignals_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cancelAllSignals ntfnptr \\_. st_tcb_at' runnable' t\" + unfolding cancelAllSignals_def + by (wpsimp wp: mapM_x_wp' sts_st_tcb' hoare_drop_imp) + +crunches unbindNotification, bindNotification, unbindMaybeNotification + for st_tcb_at'[wp]: "st_tcb_at' P p" + (wp: threadSet_pred_tcb_no_state ignore: threadSet) + +lemma (in delete_one_conc_pre) finaliseCap_tcb_at_runnable': + "\st_tcb_at' runnable' t\ finaliseCap cap final True \\_. st_tcb_at' runnable' t\" + apply (clarsimp simp add: finaliseCap_def Let_def) + apply (rule conjI | clarsimp | wp cancelAllIPC_tcb_at_runnable' getObject_ntfn_inv + cancelAllSignals_tcb_at_runnable' + | wpc)+ + done + +crunch pred_tcb_at'[wp]: isFinalCapability "pred_tcb_at' proj st t" + (simp: crunch_simps) + +lemma (in delete_one_conc_pre) cteDeleteOne_tcb_at_runnable': + "\st_tcb_at' runnable' t\ cteDeleteOne callerCap \\_. st_tcb_at' runnable' t\" + apply (simp add: cteDeleteOne_def unless_def) + apply (wp finaliseCap_tcb_at_runnable' hoare_drop_imps | clarsimp)+ + done + +crunches getThreadReplySlot, getEndpoint + for pred_tcb_at'[wp]: "pred_tcb_at' proj st t" + +lemma (in delete_one_conc_pre) cancelIPC_tcb_at_runnable': + "\st_tcb_at' runnable' t'\ cancelIPC t \\_. st_tcb_at' runnable' t'\" + (is "\?PRE\ _ \_\") + apply (clarsimp simp: cancelIPC_def Let_def) + apply (case_tac "t'=t") + apply (rule_tac B="\st. st_tcb_at' runnable' t and K (runnable' st)" + in hoare_seq_ext) + apply (case_tac x; simp) + apply (wp sts_pred_tcb_neq' | simp | wpc)+ + apply (clarsimp) + apply (rule_tac Q="\rv. ?PRE" in hoare_post_imp, fastforce) + apply (wp cteDeleteOne_tcb_at_runnable' + threadSet_pred_tcb_no_state + cancelSignal_tcb_at_runnable' + sts_pred_tcb_neq' hoare_drop_imps + | wpc | simp add: o_def if_fun_split)+ + done + +crunch ksCurDomain[wp]: cancelSignal "\s. P (ksCurDomain s)" + (wp: crunch_wps) + +lemma (in delete_one_conc_pre) cancelIPC_ksCurDomain[wp]: + "\\s. P (ksCurDomain s)\ cancelIPC t \\_ s. P (ksCurDomain s)\" + unfolding cancelIPC_def Let_def + by (wpsimp wp: hoare_vcg_conj_lift delete_one_ksCurDomain hoare_drop_imps + simp: getThreadReplySlot_def o_def if_fun_split) + +(* FIXME move *) +lemma setBoundNotification_not_ntfn: + "(\tcb ntfn. P (tcb\tcbBoundNotification := ntfn\) \ P tcb) + \ \obj_at' P t'\ setBoundNotification ntfn t \\_. obj_at' P t'\" + apply (simp add: setBoundNotification_def) + apply (wp hoare_vcg_conj_lift + | wpc + | rule hoare_drop_imps + | simp)+ + done + +lemma setBoundNotification_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ setBoundNotification st t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp setBoundNotification_not_ntfn | simp)+ + done + +lemma cancelSignal_tcb_obj_at': + "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) + \ \obj_at' P t'\ cancelSignal t word \\_. obj_at' P t'\" + apply (simp add: cancelSignal_def setNotification_def) + apply (wp setThreadState_not_st getNotification_wp | wpc | simp)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_tcbDomain_obj_at': + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ cancelIPC t \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + setThreadState_not_st delete_one_tcbDomain_obj_at' cancelSignal_tcb_obj_at' + | wpc + | rule hoare_drop_imps + | simp add: getThreadReplySlot_def o_def if_fun_split)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_tcb_in_cur_domain': + "\tcb_in_cur_domain' t'\ cancelIPC t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp cancelIPC_tcbDomain_obj_at' | simp)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_sch_act_not: + "\sch_act_not t'\ cancelIPC t \\_. sch_act_not t'\" + apply (simp add: cancelIPC_def Let_def) + apply (wp hoare_vcg_conj_lift + delete_one_sch_act_not + | wpc + | simp add: getThreadReplySlot_def o_def if_apply_def2 + split del: if_split + | rule hoare_drop_imps)+ + done + +lemma (in delete_one_conc_pre) cancelIPC_weak_sch_act_wf: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelIPC t + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (rule weak_sch_act_wf_lift_linear) + apply (wp cancelIPC_sch_act_not cancelIPC_tcb_in_cur_domain' cancelIPC_tcb_at_runnable')+ + done + +text \The suspend operation, significant as called from delete\ + +lemma rescheduleRequired_weak_sch_act_wf: + "\\\ rescheduleRequired \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: rescheduleRequired_def setSchedulerAction_def) + apply (wp hoare_post_taut | simp add: weak_sch_act_wf_def)+ + done + +lemma sts_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s + \ (ksSchedulerAction s = SwitchToThread t \ runnable' st)\ + setThreadState st t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + including no_pre + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_weak_sch_act_wf) + apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp, simp) + apply (simp add: weak_sch_act_wf_def) + apply (wp hoare_vcg_all_lift) + apply (wps threadSet_nosch) + apply (wp hoare_vcg_const_imp_lift threadSet_pred_tcb_at_state threadSet_tcbDomain_triv | simp)+ + done + +lemma sbn_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ setBoundNotification ntfn t \\rv s. P (ksSchedulerAction s)\" + by (simp add: setBoundNotification_def, wp threadSet_nosch) + + +lemma sbn_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setBoundNotification ntfn t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift sbn_st_tcb') + + +lemma set_ep_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setEndpoint epptr ep + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (wp weak_sch_act_wf_lift) + done + +lemma setObject_ntfn_sa_unchanged[wp]: + "\\s. P (ksSchedulerAction s)\ + setObject ptr (ntfn::Structures_H.notification) + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp | simp add: updateObject_default_def)+ + done + +lemma setObject_oa_unchanged[wp]: + "\\s. obj_at' (\tcb::tcb. P tcb) t s\ + setObject ptr (ntfn::Structures_H.notification) + \\rv s. obj_at' P t s\" + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_type + updateObject_default_def + in_monad) + done + +lemma setNotification_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setNotification ntfnptr ntfn + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (wp hoare_vcg_all_lift hoare_convert_imp hoare_vcg_conj_lift + | simp add: setNotification_def weak_sch_act_wf_def st_tcb_at'_def tcb_in_cur_domain'_def)+ + apply (rule hoare_pre) + apply (wps setObject_ntfn_cur_domain) + apply (wp setObject_ntfn_obj_at'_tcb | simp add: o_def)+ + done + +lemmas ipccancel_weak_sch_act_wfs + = weak_sch_act_wf_lift[OF _ setCTE_pred_tcb_at'] + +lemma tcbSchedDequeue_corres': + "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) + (valid_inQ_queues) + (tcb_sched_action (tcb_sched_dequeue) t) (tcbSchedDequeue t)" + apply (rule corres_cross_over_guard[where P'=P' and Q="tcb_at' t and P'" for P']) + apply (fastforce simp: tcb_at_cross dest: state_relation_pspace_relation) + apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) + apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) + defer + apply (wp threadGet_obj_at', simp, simp) + apply (wp, simp) + apply (case_tac queued) + defer + apply (simp add: unless_def when_def) + apply (rule corres_no_failI) + apply (wp) + apply (clarsimp simp: in_monad ethread_get_def get_etcb_def set_tcb_queue_def is_etcb_at_def state_relation_def gets_the_def gets_def get_def return_def bind_def assert_opt_def get_tcb_queue_def modify_def put_def) + apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") + prefer 2 + apply (force simp: tcb_sched_dequeue_def valid_inQ_queues_def + ready_queues_relation_def obj_at'_def inQ_def project_inject) + apply (simp add: ready_queues_relation_def) + apply (simp add: unless_def when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres, simp add: etcb_relation_def) + apply (simp split del: if_split) + apply (rule corres_split_eqr) + apply (rule ethreadget_corres, simp add: etcb_relation_def) + apply (rule corres_split_eqr[OF getQueue_corres]) + apply (simp split del: if_split) + apply (subst bind_return_unit, rule corres_split[where r'=dc]) + apply (simp add: tcb_sched_dequeue_def) + apply (rule setQueue_corres) + apply (rule corres_split_noop_rhs) + apply (clarsimp, rule removeFromBitmap_corres_noop) + apply (simp add: dc_def[symmetric]) + apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] + apply (wp | simp)+ + done + +lemma setQueue_valid_inQ_queues: + "\valid_inQ_queues + and (\s. \t \ set ts. obj_at' (inQ d p) t s) + and K (distinct ts)\ + setQueue d p ts + \\_. valid_inQ_queues\" + apply (simp add: setQueue_def valid_inQ_queues_def) + apply wp + apply clarsimp + done + +lemma threadSet_valid_inQ_queues: + "\valid_inQ_queues and (\s. \d p. (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) + \ obj_at' (\tcb. (inQ d p tcb) \ \(inQ d p (f tcb))) t s + \ t \ set (ksReadyQueues s (d, p)))\ + threadSet f t + \\rv. valid_inQ_queues\" + apply (simp add: threadSet_def) + apply wp + apply (simp add: valid_inQ_queues_def pred_tcb_at'_def) + apply (wp setObject_queues_unchanged_tcb + hoare_Ball_helper + hoare_vcg_all_lift + setObject_tcb_strongest)[1] + apply (wp getObject_tcb_wp) + apply (clarsimp simp: valid_inQ_queues_def pred_tcb_at'_def) + apply (clarsimp simp: obj_at'_def) + apply (fastforce) + done + +(* reorder the threadSet before the setQueue, useful for lemmas that don't refer to bitmap *) +lemma setQueue_after_addToBitmap: + "(setQueue d p q >>= (\rv. (when P (addToBitmap d p)) >>= (\rv. threadSet f t))) = + (when P (addToBitmap d p) >>= (\rv. (threadSet f t) >>= (\rv. setQueue d p q)))" + apply (case_tac P, simp_all) + prefer 2 + apply (simp add: setQueue_after) + apply (simp add: setQueue_def when_def) + apply (subst oblivious_modify_swap) + apply (simp add: threadSet_def getObject_def setObject_def + loadObject_default_def bitmap_fun_defs + split_def projectKO_def2 alignCheck_assert + magnitudeCheck_assert updateObject_default_def) + apply (intro oblivious_bind, simp_all) + apply (clarsimp simp: bind_assoc) + done + +lemma tcbSchedEnqueue_valid_inQ_queues[wp]: + "\valid_inQ_queues\ tcbSchedEnqueue t \\_. valid_inQ_queues\" + apply (simp add: tcbSchedEnqueue_def setQueue_after_addToBitmap) + apply (rule hoare_pre) + apply (rule_tac B="\rv. valid_inQ_queues and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + apply (rename_tac queued) + apply (case_tac queued, simp_all add: unless_def)[1] + apply (wp setQueue_valid_inQ_queues threadSet_valid_inQ_queues threadGet_wp + hoare_vcg_const_Ball_lift + | simp add: inQ_def bitmap_fun_defs + | fastforce simp: valid_inQ_queues_def inQ_def obj_at'_def)+ + done + + (* prevents wp from splitting on the when; stronger technique than hoare_when_weak_wp + FIXME: possible to replace with hoare_when_weak_wp? + *) +definition + "removeFromBitmap_conceal d p q t \ when (null [x\q . x \ t]) (removeFromBitmap d p)" + +lemma removeFromBitmap_conceal_valid_inQ_queues[wp]: + "\ valid_inQ_queues \ removeFromBitmap_conceal d p q t \ \_. valid_inQ_queues \" + unfolding valid_inQ_queues_def removeFromBitmap_conceal_def + by (wp|clarsimp simp: bitmap_fun_defs)+ + +lemma rescheduleRequired_valid_inQ_queues[wp]: + "\valid_inQ_queues\ rescheduleRequired \\_. valid_inQ_queues\" + apply (simp add: rescheduleRequired_def) + apply wpsimp + done + +lemma sts_valid_inQ_queues[wp]: + "\valid_inQ_queues\ setThreadState st t \\rv. valid_inQ_queues\" + apply (simp add: setThreadState_def) + apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma updateObject_ep_inv: + "\P\ updateObject (obj::endpoint) ko p q n \\rv. P\" + by simp (rule updateObject_default_inv) + +lemma sbn_valid_inQ_queues[wp]: + "\valid_inQ_queues\ setBoundNotification ntfn t \\rv. valid_inQ_queues\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_valid_inQ_queues [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma setEndpoint_valid_inQ_queues[wp]: + "\valid_inQ_queues\ setEndpoint ptr ep \\rv. valid_inQ_queues\" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: valid_inQ_queues_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift setObject_queues_unchanged[OF updateObject_ep_inv]) + apply simp + done + +lemma set_ntfn_valid_inQ_queues[wp]: + "\valid_inQ_queues\ setNotification ptr ntfn \\rv. valid_inQ_queues\" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: valid_inQ_queues_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (wp updateObject_default_inv | simp)+ + done + +crunch valid_inQ_queues[wp]: cancelSignal valid_inQ_queues + (simp: updateObject_tcb_inv crunch_simps wp: crunch_wps) + +lemma (in delete_one_conc_pre) cancelIPC_valid_inQ_queues[wp]: + "\valid_inQ_queues\ cancelIPC t \\_. valid_inQ_queues\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) + apply (wp hoare_drop_imps delete_one_inQ_queues threadSet_valid_inQ_queues | wpc | simp add:if_apply_def2 Fun.comp_def)+ + apply (clarsimp simp: valid_inQ_queues_def inQ_def)+ + done + +lemma valid_queues_inQ_queues: + "Invariants_H.valid_queues s \ valid_inQ_queues s" + by (force simp: Invariants_H.valid_queues_def valid_inQ_queues_def obj_at'_def + valid_queues_no_bitmap_def) + +lemma asUser_tcbQueued_inv[wp]: + "\obj_at' (\tcb. P (tcbQueued tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbQueued tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma asUser_valid_inQ_queues[wp]: + "\valid_inQ_queues\ asUser t f \\rv. valid_inQ_queues\" + unfolding valid_inQ_queues_def Ball_def + apply (wpsimp wp: hoare_vcg_all_lift) + defer + apply (wp asUser_ksQ) + apply assumption + apply (simp add: inQ_def[abs_def] obj_at'_conj) + apply (rule hoare_convert_imp) + apply (wp asUser_ksQ) + apply wp + done + +context begin +interpretation Arch . + +crunches cancel_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_state \ _" + (simp: crunch_simps wp: crunch_wps) + +end + +lemma (in delete_one) suspend_corres: + "corres dc (einvs and tcb_at t) invs' + (IpcCancel_A.suspend t) (ThreadDecls_H.suspend t)" + apply (rule corres_cross_over_guard[where P'=P' and Q="tcb_at' t and P'" for P']) + apply (fastforce dest!: tcb_at_cross state_relation_pspace_relation) + apply (simp add: IpcCancel_A.suspend_def Thread_H.suspend_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF cancel_ipc_corres]) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_split_nor) + apply (rule corres_if) + apply (case_tac state; simp) + apply (simp add: update_restart_pc_def updateRestartPC_def) + apply (rule asUser_corres') + apply (simp add: AARCH64.nextInstructionRegister_def AARCH64.faultRegister_def + AARCH64_H.nextInstructionRegister_def AARCH64_H.faultRegister_def) + apply (simp add: AARCH64_H.Register_def) + apply (subst unit_dc_is_eq) + apply (rule corres_underlying_trivial) + apply (wpsimp simp: AARCH64.setRegister_def AARCH64.getRegister_def) + apply (rule corres_return_trivial) + apply (rule corres_split_nor[OF setThreadState_corres]) + apply wpsimp + apply (rule tcbSchedDequeue_corres') + apply wp + apply wpsimp + apply (wpsimp simp: update_restart_pc_def updateRestartPC_def)+ + apply (rule hoare_post_imp[where Q = "\rv s. tcb_at t s \ is_etcb_at t s \ pspace_aligned s \ pspace_distinct s"]) + apply simp + apply (wp | simp)+ + apply (fastforce simp: valid_sched_def tcb_at_is_etcb_at) + apply (clarsimp simp add: invs'_def valid_state'_def valid_queues_inQ_queues) + done + +context begin interpretation Arch . + +lemma archThreadGet_corres: + "(\a a'. arch_tcb_relation a a' \ f a = f' a') \ + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get f t) (archThreadGet f' t)" + unfolding arch_thread_get_def archThreadGet_def + apply (corresKsimp corres: getObject_TCB_corres) + apply (clarsimp simp: tcb_relation_def) + done + +lemma tcb_vcpu_relation: + "arch_tcb_relation a a' \ tcb_vcpu a = atcbVCPUPtr a'" + unfolding arch_tcb_relation_def by auto + +lemma archThreadGet_VCPU_corres[corres]: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_get tcb_vcpu t) (archThreadGet atcbVCPUPtr t)" + by (rule archThreadGet_corres) (erule tcb_vcpu_relation) + +lemma when_fail_assert: + "when P fail = assert (\P)" + by (simp add: when_def assert_def) + +lemma opt_case_when: + "(case x of None \ return () | Some (c, _) \ when (c = v) f) = + when (\a. x = Some (v, a)) f" + by (cases x) (auto simp add: split_def) + +lemma corres_gets_current_vcpu[corres]: + "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma vcpuInvalidateActive_corres[corres]: + "corres dc \ no_0_obj' vcpu_invalidate_active vcpuInvalidateActive" + unfolding vcpuInvalidateActive_def vcpu_invalidate_active_def + apply (corresKsimp corres: vcpuDisable_corres + corresK: corresK_modifyT + simp: modifyArchState_def) + apply (clarsimp simp: state_relation_def arch_state_relation_def) + done + +lemma tcb_ko_at': + "tcb_at' t s \ \ta::tcb. ko_at' ta t s" + by (clarsimp simp: obj_at'_def) + +lemma archThreadSet_corres: + assumes "\a a'. arch_tcb_relation a a' \ arch_tcb_relation (f a) (f' a')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set f t) (archThreadSet f' t)" +proof - + from assms + have tcb_rel: + "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (tcb\tcb_arch := f (tcb_arch tcb)\) + (tcbArch_update (\_. f' (tcbArch tcb')) tcb')" + by (simp add: tcb_relation_def) + show ?thesis + unfolding arch_thread_set_def archThreadSet_def + apply (corres' \rotate_tac, erule tcb_rel | rule ball_tcb_cte_casesI; simp\ + corres: getObject_TCB_corres setObject_update_TCB_corres' + wp: getObject_tcb_wp + simp: exst_same_def tcb_cap_cases_def tcb_ko_at') + done +qed + +lemma archThreadSet_VCPU_None_corres[corres]: + "t = t' \ corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_thread_set (tcb_vcpu_update Map.empty) t) (archThreadSet (atcbVCPUPtr_update Map.empty) t')" + apply simp + apply (rule archThreadSet_corres) + apply (simp add: arch_tcb_relation_def) + done + +lemmas corresK_as_user' = + asUser_corres'[atomized, THEN corresK_lift_rule, THEN mp] + +crunch typ_at'[wp]: vcpuInvalidateActive "\s. P (typ_at' T p s)" + +lemma getVCPU_wp: + "\\s. \ko. ko_at' (ko::vcpu) p s \ Q ko s\ getObject p \Q\" + by (clarsimp simp: getObject_def split_def loadObject_default_def + in_magnitude_check pageBits_def vcpuBits_def + in_monad valid_def obj_at'_def objBits_simps) + +lemma imp_drop_strg: + "Q \ P \ Q" + by simp + +lemma dissociateVCPUTCB_corres[corres]: + "\ v' = v; t' = t \ \ + corres dc (obj_at (\ko. \tcb. ko = TCB tcb \ tcb_vcpu (tcb_arch tcb) = Some v) t and + obj_at (\ko. \vcpu. ko = ArchObj (VCPU vcpu) \ vcpu_tcb vcpu = Some t) v and + pspace_aligned and pspace_distinct) + (no_0_obj') + (dissociate_vcpu_tcb v t) (dissociateVCPUTCB v' t')" + unfolding dissociate_vcpu_tcb_def dissociateVCPUTCB_def sanitiseRegister_def sanitise_register_def + apply (clarsimp simp: bind_assoc when_fail_assert opt_case_when) + apply (corres corres: getObject_vcpu_corres setObject_VCPU_corres asUser_corres' + simp: vcpu_relation_def archThreadSet_def tcb_ko_at' tcb_at_typ_at') + apply (wpsimp simp: tcb_at_typ_at' archThreadGet_def + wp: get_vcpu_wp getVCPU_wp arch_thread_get_wp getObject_tcb_wp)+ + apply (clarsimp simp: obj_at_def is_tcb in_omonad) + apply normalise_obj_at' + apply (rule context_conjI) + apply (rule vcpu_at_cross; assumption?) + apply (clarsimp simp: obj_at_def) + apply (clarsimp simp: obj_at_def) + apply (rename_tac tcb vcpu) + apply (prop_tac "ko_at (TCB tcb) t s", clarsimp simp: obj_at_def) + apply (drule (3) ko_tcb_cross) + apply (prop_tac "ako_at (VCPU vcpu) v s", clarsimp simp: obj_at_def) + apply (drule (3) ko_vcpu_cross) + apply normalise_obj_at' + apply (clarsimp simp: tcb_relation_def arch_tcb_relation_def vcpu_relation_def) + done + +lemma sym_refs_tcb_vcpu: + "\ ko_at (TCB tcb) t s; tcb_vcpu (tcb_arch tcb) = Some v; sym_refs (state_hyp_refs_of s) \ \ + \vcpu. ko_at (ArchObj (VCPU vcpu)) v s \ vcpu_tcb vcpu = Some t" + apply (drule (1) hyp_sym_refs_obj_atD) + apply (clarsimp simp: obj_at_def hyp_refs_of_def) + apply (rename_tac ko) + apply (case_tac ko; simp add: tcb_vcpu_refs_def split: option.splits) + apply (rename_tac koa) + apply (case_tac koa; simp add: vcpu_tcb_refs_def split: option.splits) + done + +lemma fpuThreadDelete_corres[corres]: + "t' = t \ corres dc \ \ (fpu_thread_delete t) (fpuThreadDelete t')" + by (corres simp: fpu_thread_delete_def fpuThreadDelete_def) + +crunches fpu_thread_delete + for aligned[wp]: pspace_aligned + and distinct[wp]: pspace_distinct + and obj_at[wp]: "\s. P (obj_at Q p s)" + +crunches fpuThreadDelete + for obj_at'[wp]: "\s. P (obj_at' Q p s)" + and no_0_obj'[wp]: no_0_obj' + +lemma prepareThreadDelete_corres[corres]: + "t' = t \ + corres dc (invs and tcb_at t) no_0_obj' + (prepare_thread_delete t) (prepareThreadDelete t')" + apply (simp add: prepare_thread_delete_def prepareThreadDelete_def) + apply (corres corres: archThreadGet_corres + wp: arch_thread_get_wp getObject_tcb_wp hoare_vcg_op_lift + simp: archThreadGet_def + | corres_cases_both)+ + apply (fastforce dest: sym_refs_tcb_vcpu simp: obj_at_def) + apply (clarsimp simp: tcb_ko_at') + done + +end + +lemma no_refs_simple_strg': + "st_tcb_at' simple' t s' \ P {} \ st_tcb_at' (\st. P (tcb_st_refs_of' st)) t s'" + by (fastforce elim!: pred_tcb'_weakenE)+ + +crunch it[wp]: cancelSignal "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) + +lemma (in delete_one_conc_pre) cancelIPC_it[wp]: + "\\s. P (ksIdleThread s)\ + cancelIPC t + \\_ s. P (ksIdleThread s)\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def) + apply (wp hoare_drop_imps delete_one_it | wpc | simp add:if_apply_def2 Fun.comp_def)+ + done + +lemma tcbSchedDequeue_notksQ: + "\\s. t' \ set(ksReadyQueues s p)\ + tcbSchedDequeue t + \\_ s. t' \ set(ksReadyQueues s p)\" + apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) + apply wp + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) + apply wp+ + apply clarsimp + apply (rule_tac Q="\_ s. t' \ set(ksReadyQueues s p)" in hoare_post_imp) + apply (wp | clarsimp)+ + done + +lemma rescheduleRequired_oa_queued: + "\ (\s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)) and sch_act_simple\ + rescheduleRequired + \\_ s. P (obj_at' (\tcb. Q (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s)\" + (is "\?OAQ t' p and sch_act_simple\ _ \_\") + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) + \ ?OAQ t' p s" in hoare_seq_ext) + including no_pre + apply (wp | clarsimp)+ + apply (case_tac x) + apply (wp | clarsimp)+ + done + +lemma setThreadState_oa_queued: + "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ + setThreadState st t + \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" + (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") + proof (rule P_bool_lift [where P=P']) + show pos: + "\R. \ ?Q R \ setThreadState st t \\_. ?Q R \" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_oa_queued) + apply (simp add: sch_act_simple_def) + apply (rule_tac Q="\_. ?Q R" in hoare_post_imp, clarsimp) + apply (wp threadSet_obj_at'_strongish) + apply (clarsimp) + done + show "\\s. \ ?Q P s\ setThreadState st t \\_ s. \ ?Q P s\" + by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) + qed + +lemma setBoundNotification_oa_queued: + "\\s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \ + setBoundNotification ntfn t + \\_ s. P' (obj_at' (\tcb. P (tcbQueued tcb) (tcbDomain tcb) (tcbPriority tcb)) t' s) \" + (is "\\s. P' (?Q P s)\ _ \\_ s. P' (?Q P s)\") + proof (rule P_bool_lift [where P=P']) + show pos: + "\R. \ ?Q R \ setBoundNotification ntfn t \\_. ?Q R \" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_obj_at'_strongish) + apply (clarsimp) + done + show "\\s. \ ?Q P s\ setBoundNotification ntfn t \\_ s. \ ?Q P s\" + by (simp add: not_obj_at' comp_def, wp hoare_convert_imp pos) + qed + +lemma tcbSchedDequeue_ksQ_distinct[wp]: + "\\s. distinct (ksReadyQueues s p)\ + tcbSchedDequeue t + \\_ s. distinct (ksReadyQueues s p)\" + apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) + apply wp + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) + apply wp+ + apply (rule_tac Q="\_ s. distinct (ksReadyQueues s p)" in hoare_post_imp) + apply (clarsimp | wp)+ + done + +lemma sts_valid_queues_partial: + "\Invariants_H.valid_queues and sch_act_simple\ + setThreadState st t + \\_ s. \t' d p. + (t' \ set(ksReadyQueues s (d, p)) \ + (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s + \ (t' \ t \ st_tcb_at' runnable' t' s))) + \ distinct (ksReadyQueues s (d, p))\" + (is "\_\ _ \\_ s. \t' d p. ?OA t' d p s \ ?DISTINCT d p s \") + apply (rule_tac Q="\_ s. (\t' d p. ?OA t' d p s) \ (\d p. ?DISTINCT d p s)" + in hoare_post_imp) + apply (clarsimp) + apply (rule hoare_conjI) + apply (rule_tac Q="\s. \t' d p. + ((t'\set(ksReadyQueues s (d, p)) + \ \ (sch_act_simple s)) + \ (obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s + \ st_tcb_at' runnable' t' s))" in hoare_pre_imp) + apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def + pred_tcb_at'_def obj_at'_def inQ_def) + apply (rule hoare_vcg_all_lift)+ + apply (rule hoare_convert_imp) + including no_pre + apply (wp sts_ksQ setThreadState_oa_queued hoare_impI sts_pred_tcb_neq' + | clarsimp)+ + apply (rule_tac Q="\s. \d p. ?DISTINCT d p s \ sch_act_simple s" in hoare_pre_imp) + apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) + apply (wp hoare_vcg_all_lift sts_ksQ) + apply (clarsimp) + done + +lemma tcbSchedDequeue_t_notksQ: + "\\s. t \ set (ksReadyQueues s (d, p)) \ + obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\ + tcbSchedDequeue t + \\_ s. t \ set (ksReadyQueues s (d, p))\" + apply (rule_tac Q="(\s. t \ set (ksReadyQueues s (d, p))) + or obj_at'(\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t" + in hoare_pre_imp, clarsimp) + apply (rule hoare_pre_disj) + apply (wp tcbSchedDequeue_notksQ)[1] + apply (simp add: tcbSchedDequeue_def removeFromBitmap_conceal_def[symmetric]) + apply wp + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: bitmap_fun_defs removeFromBitmap_conceal_def, wp, clarsimp) + apply (wp threadGet_wp)+ + apply (auto simp: obj_at'_real_def ko_wp_at'_def) + done + +lemma sts_invs_minor'_no_valid_queues: + "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st + \ (st \ Inactive \ \ idle' st \ + st' \ Inactive \ \ idle' st')) t + and (\s. t = ksIdleThread s \ idle' st) + and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) + and sch_act_simple + and invs'\ + setThreadState st t + \\_ s. (\t' d p. + (t' \ set(ksReadyQueues s (d, p)) \ + (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s + \ (t' \ t \ st_tcb_at' runnable' t' s))) + \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ + valid_bitmapQ s \ + bitmapQ_no_L2_orphans s \ + bitmapQ_no_L1_orphans s \ + valid_pspace' s \ + sch_act_wf (ksSchedulerAction s) s \ + sym_refs (state_refs_of' s) \ + sym_refs (state_hyp_refs_of' s) \ + if_live_then_nonz_cap' s \ + if_unsafe_then_cap' s \ + valid_idle' s \ + valid_global_refs' s \ + valid_arch_state' s \ + valid_irq_node' (irq_node' s) s \ + valid_irq_handlers' s \ + valid_irq_states' s \ + valid_machine_state' s \ + irqs_masked' s \ + valid_queues' s \ + ct_not_inQ s \ + ct_idle_or_in_cur_domain' s \ + pspace_domain_valid s \ + ksCurDomain s \ maxDomain \ + valid_dom_schedule' s \ + untyped_ranges_zero' s \ + cur_tcb' s \ + tcb_at' t s\" + apply (simp add: invs'_def valid_state'_def valid_queues_def) + apply (wp sts_valid_queues_partial sts_ksQ + setThreadState_oa_queued sts_st_tcb_at'_cases + irqs_masked_lift + valid_irq_node_lift + setThreadState_ct_not_inQ + sts_valid_bitmapQ_sch_act_simple + sts_valid_bitmapQ_no_L2_orphans_sch_act_simple + sts_valid_bitmapQ_no_L1_orphans_sch_act_simple + hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_all_lift)+ + apply (clarsimp simp: disj_imp) + apply (intro conjI) + apply (clarsimp simp: valid_queues_def) + apply (rule conjI, clarsimp) + apply (drule valid_queues_no_bitmap_objD, assumption) + apply (clarsimp simp: inQ_def comp_def) + apply (rule conjI) + apply (erule obj_at'_weaken) + apply (simp add: inQ_def) + apply (clarsimp simp: st_tcb_at'_def) + apply (erule obj_at'_weaken) + apply (simp add: inQ_def) + apply (simp add: valid_queues_no_bitmap_def) + apply clarsimp + apply (clarsimp simp: st_tcb_at'_def) + apply (drule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def) + subgoal + by (fastforce simp: valid_tcb_state'_def + split: Structures_H.thread_state.splits) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (fastforce simp: valid_queues_def inQ_def pred_tcb_at' pred_tcb_at'_def + elim!: st_tcb_ex_cap'' obj_at'_weakenE)+ + done + +crunch ct_idle_or_in_cur_domain'[wp]: tcbSchedDequeue ct_idle_or_in_cur_domain' + +lemma tcbSchedDequeue_invs'_no_valid_queues: + "\\s. (\t' d p. + (t' \ set(ksReadyQueues s (d, p)) \ + (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t' s + \ (t' \ t \ st_tcb_at' runnable' t' s))) + \ distinct (ksReadyQueues s (d, p)) \ (maxDomain < d \ maxPriority < p \ ksReadyQueues s (d, p) = [])) \ + valid_bitmapQ s \ + bitmapQ_no_L2_orphans s \ + bitmapQ_no_L1_orphans s \ + valid_pspace' s \ + sch_act_wf (ksSchedulerAction s) s \ + sym_refs (state_refs_of' s) \ + sym_refs (state_hyp_refs_of' s) \ + if_live_then_nonz_cap' s \ + if_unsafe_then_cap' s \ + valid_idle' s \ + valid_global_refs' s \ + valid_arch_state' s \ + valid_irq_node' (irq_node' s) s \ + valid_irq_handlers' s \ + valid_irq_states' s \ + valid_machine_state' s \ + irqs_masked' s \ + valid_queues' s \ + ct_not_inQ s \ + ct_idle_or_in_cur_domain' s \ + pspace_domain_valid s \ + ksCurDomain s \ maxDomain \ + valid_dom_schedule' s \ + untyped_ranges_zero' s \ + cur_tcb' s \ + tcb_at' t s\ + tcbSchedDequeue t + \\_. invs' \" + apply (simp add: invs'_def valid_state'_def) + apply (wp tcbSchedDequeue_valid_queues_weak valid_irq_handlers_lift + valid_irq_node_lift valid_irq_handlers_lift' + tcbSchedDequeue_irq_states irqs_masked_lift cur_tcb_lift + untyped_ranges_zero_lift + | clarsimp simp add: cteCaps_of_def valid_queues_def o_def)+ + apply (rule conjI) + apply (fastforce simp: obj_at'_def inQ_def st_tcb_at'_def valid_queues_no_bitmap_except_def) + apply (rule conjI, clarsimp simp: correct_queue_def) + apply (fastforce simp: valid_pspace'_def intro: obj_at'_conjI + elim: valid_objs'_maxDomain valid_objs'_maxPriority) + done + +lemmas sts_tcbSchedDequeue_invs' = + sts_invs_minor'_no_valid_queues + tcbSchedDequeue_invs'_no_valid_queues + +lemma asUser_sch_act_simple[wp]: + "\sch_act_simple\ asUser s t \\_. sch_act_simple\" + unfolding sch_act_simple_def + apply (rule asUser_nosch) + done + +lemma (in delete_one_conc) suspend_invs'[wp]: + "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ + ThreadDecls_H.suspend t \\rv. invs'\" + apply (simp add: suspend_def) + apply (wp sts_tcbSchedDequeue_invs') + apply (simp add: updateRestartPC_def | strengthen no_refs_simple_strg')+ + prefer 2 + apply (wpsimp wp: hoare_drop_imps hoare_vcg_imp_lift' + | strengthen no_refs_simple_strg')+ + done + +lemma (in delete_one_conc_pre) suspend_tcb'[wp]: + "\tcb_at' t'\ ThreadDecls_H.suspend t \\rv. tcb_at' t'\" + apply (simp add: suspend_def) + apply (wpsimp simp: updateRestartPC_def) + done + +lemma (in delete_one_conc_pre) suspend_sch_act_simple[wp]: + "\sch_act_simple\ + ThreadDecls_H.suspend t \\rv. sch_act_simple\" + apply (simp add: suspend_def when_def updateRestartPC_def) + apply (wp cancelIPC_sch_act_simple | simp add: unless_def + | rule sch_act_simple_lift)+ + apply (simp add: updateRestartPC_def) + apply (rule asUser_nosch) + apply wpsimp+ + done + +lemma (in delete_one_conc) suspend_objs': + "\invs' and sch_act_simple and tcb_at' t and (\s. t \ ksIdleThread s)\ + suspend t \\rv. valid_objs'\" + apply (rule_tac Q="\_. invs'" in hoare_strengthen_post) + apply (wp suspend_invs') + apply fastforce + done + +lemma (in delete_one_conc_pre) suspend_st_tcb_at': + assumes x[simp]: "\st. simple' st \ P st" shows + "\st_tcb_at' P t\ + suspend thread + \\rv. st_tcb_at' P t\" + apply (simp add: suspend_def) + unfolding updateRestartPC_def + apply (wp sts_st_tcb_at'_cases threadSet_pred_tcb_no_state + cancelIPC_st_tcb_at hoare_drop_imps + | simp)+ + apply clarsimp + done + +lemmas (in delete_one_conc_pre) suspend_makes_simple' = + suspend_st_tcb_at' [where P=simple', simplified] + +lemma valid_queues_not_runnable'_not_ksQ: + assumes "Invariants_H.valid_queues s" and "st_tcb_at' (Not \ runnable') t s" + shows "\d p. t \ set (ksReadyQueues s (d, p))" + using assms + apply - + apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def pred_tcb_at'_def) + apply (erule_tac x=d in allE) + apply (erule_tac x=p in allE) + apply (clarsimp) + apply (drule(1) bspec) + apply (clarsimp simp: obj_at'_def) + done + +declare valid_queues_not_runnable'_not_ksQ[OF ByAssum, simp] + +lemma cancelSignal_queues[wp]: + "\Invariants_H.valid_queues and st_tcb_at' (Not \ runnable') t\ + cancelSignal t ae \\_. Invariants_H.valid_queues \" + apply (simp add: cancelSignal_def) + apply (wp sts_valid_queues) + apply (rule_tac Q="\_ s. \p. t \ set (ksReadyQueues s p)" in hoare_post_imp, simp) + apply (wp hoare_vcg_all_lift) + apply (wpc) + apply (wp)+ + apply (rule_tac Q="\_ s. Invariants_H.valid_queues s \ (\p. t \ set (ksReadyQueues s p))" in hoare_post_imp) + apply (clarsimp) + apply (wp) + apply (clarsimp) + done + +lemma (in delete_one_conc_pre) cancelIPC_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)\ + cancelIPC t \\rv. Invariants_H.valid_queues\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def + cong: Structures_H.thread_state.case_cong list.case_cong) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule hoare_pre) + apply (wpc + | wp hoare_vcg_conj_lift delete_one_queues threadSet_valid_queues + threadSet_valid_objs' sts_valid_queues setEndpoint_ksQ + hoare_vcg_all_lift threadSet_sch_act threadSet_weak_sch_act_wf + | simp add: o_def if_apply_def2 inQ_def + | rule hoare_drop_imps + | clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def + elim!: pred_tcb'_weakenE)+ + apply (fastforce dest: valid_queues_not_runnable'_not_ksQ elim: pred_tcb'_weakenE) + done + +(* FIXME: move to Schedule_R *) +lemma tcbSchedDequeue_nonq[wp]: + "\Invariants_H.valid_queues and tcb_at' t and K (t = t')\ + tcbSchedDequeue t \\_ s. \d p. t' \ set (ksReadyQueues s (d, p))\" + apply (rule hoare_gen_asm) + apply (simp add: tcbSchedDequeue_def) + apply (wp threadGet_wp|simp)+ + apply (fastforce simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) + done + +lemma sts_ksQ_oaQ: + "\Invariants_H.valid_queues\ + setThreadState st t + \\_ s. t \ set (ksReadyQueues s (d, p)) \ + obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s\" + (is "\_\ _ \\_. ?POST\") + proof - + have RR: "\sch_act_simple and ?POST\ rescheduleRequired \\_. ?POST\" + apply (simp add: rescheduleRequired_def) + apply (wp) + apply (clarsimp) + apply (rule_tac + Q="(\s. action = ResumeCurrentThread \ action = ChooseNewThread) and ?POST" + in hoare_pre_imp, assumption) + apply (case_tac action) + apply (clarsimp)+ + apply (wp) + apply (clarsimp simp: sch_act_simple_def) + done + show ?thesis + apply (simp add: setThreadState_def) + apply (wp RR) + apply (rule_tac Q="\_. ?POST" in hoare_post_imp) + apply (clarsimp simp add: sch_act_simple_def) + apply (wp hoare_convert_imp) + apply (clarsimp simp: Invariants_H.valid_queues_def valid_queues_no_bitmap_def) + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (fastforce dest: bspec elim!: obj_at'_weakenE simp: inQ_def) + done + qed + +lemma (in delete_one_conc_pre) suspend_nonq: + "\Invariants_H.valid_queues and valid_objs' and tcb_at' t + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and (\s. t \ ksIdleThread s) and K (t = t')\ + suspend t + \\rv s. \d p. t' \ set (ksReadyQueues s (d, p))\" + apply (rule hoare_gen_asm) + apply (simp add: suspend_def) + unfolding updateRestartPC_def + apply (wp hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ) + apply wpsimp+ + done + +lemma suspend_makes_inactive: + "\K (t = t')\ suspend t \\rv. st_tcb_at' ((=) Inactive) t'\" + apply (cases "t = t'", simp_all) + apply (simp add: suspend_def unless_def) + apply (wp threadSet_pred_tcb_no_state setThreadState_st_tcb | simp)+ + done + +declare threadSet_sch_act_sane [wp] +declare sts_sch_act_sane [wp] + +lemma tcbSchedEnqueue_ksQset_weak: + "\\s. t' \ set (ksReadyQueues s p)\ + tcbSchedEnqueue t + \\_ s. t' \ set (ksReadyQueues s p)\" (is "\?PRE\ _ \_\") + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift hoare_vcg_if_lift) + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, ((wp | clarsimp)+))+ + done + +lemma tcbSchedEnqueue_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ tcbSchedEnqueue t \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + +lemma sts_sch_act_not_ct[wp]: + "\\s. sch_act_not (ksCurThread s) s\ + setThreadState st t \\_ s. sch_act_not (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps tcbSchedEnqueue_ct', wp, simp) + +text \Cancelling all IPC in an endpoint or notification object\ + +lemma ep_cancel_corres_helper: + "corres dc ((\s. \t \ set list. tcb_at t s) and valid_etcbs and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues' and valid_objs') + (mapM_x (\t. do + y \ set_thread_state t Structures_A.Restart; + tcb_sched_action tcb_sched_enqueue t + od) list) + (mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) list)" + apply (rule_tac S="{t. (fst t = snd t) \ fst t \ set list}" + in corres_mapM_x) + apply clarsimp + apply (rule corres_guard_imp) + apply (subst bind_return_unit, rule corres_split[OF _ tcbSchedEnqueue_corres]) + apply simp + apply (rule corres_guard_imp [OF setThreadState_corres]) + apply simp + apply (simp add: valid_tcb_state_def) + apply simp + apply (wp sts_valid_queues)+ + apply (force simp: tcb_at_is_etcb_at) + apply (fastforce elim: obj_at'_weakenE) + apply ((wp hoare_vcg_const_Ball_lift | simp)+)[1] + apply (rule hoare_pre) + apply (wp hoare_vcg_const_Ball_lift + weak_sch_act_wf_lift_linear sts_st_tcb' setThreadState_not_st + sts_valid_queues tcbSchedEnqueue_not_st + | simp)+ + apply (auto elim: obj_at'_weakenE simp: valid_tcb_state'_def) + done + +lemma ep_cancel_corres: + "corres dc (invs and valid_sched and ep_at ep) (invs' and ep_at' ep) + (cancel_all_ipc ep) (cancelAllIPC ep)" +proof - + have P: + "\list. + corres dc (\s. (\t \ set list. tcb_at t s) \ valid_pspace s \ ep_at ep s + \ valid_etcbs s \ weak_valid_sched_action s) + (\s. (\t \ set list. tcb_at' t s) \ valid_pspace' s + \ ep_at' ep s \ weak_sch_act_wf (ksSchedulerAction s) s + \ Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s) + (do x \ set_endpoint ep Structures_A.IdleEP; + x \ mapM_x (\t. do + y \ set_thread_state t Structures_A.Restart; + tcb_sched_action tcb_sched_enqueue t + od) list; + reschedule_required + od) + (do x \ setEndpoint ep IdleEP; + x \ mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) list; + rescheduleRequired + od)" + apply (rule corres_underlying_split) + apply (rule corres_guard_imp [OF setEndpoint_corres]) + apply (simp add: ep_relation_def)+ + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule ep_cancel_corres_helper) + apply (rule mapM_x_wp') + apply (wp weak_sch_act_wf_lift_linear set_thread_state_runnable_weak_valid_sched_action | simp)+ + apply (rule_tac R="\_ s. \x\set list. tcb_at' x s \ valid_objs' s" + in hoare_post_add) + apply (rule mapM_x_wp') + apply (rule hoare_name_pre_state) + apply ((wp hoare_vcg_const_Ball_lift mapM_x_wp' + sts_valid_queues setThreadState_not_st sts_st_tcb' tcbSchedEnqueue_not_st + | clarsimp + | fastforce elim: obj_at'_weakenE simp: valid_tcb_state'_def)+)[2] + apply (rule hoare_name_pre_state) + apply (wp hoare_vcg_const_Ball_lift set_ep_valid_objs' + | (clarsimp simp: valid_ep'_def) + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def valid_ep'_def elim!: valid_objs_valid_tcbE))+ + done + + show ?thesis + apply (simp add: cancel_all_ipc_def cancelAllIPC_def) + apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ep_sp']) + apply (rule corres_guard_imp [OF getEndpoint_corres], simp+) + apply (case_tac epa, simp_all add: ep_relation_def + get_ep_queue_def) + apply (rule corres_guard_imp [OF P] + | clarsimp simp: valid_obj_def valid_ep_def + valid_obj'_def valid_ep'_def + invs_valid_pspace + valid_sched_def valid_sched_action_def + | erule obj_at_valid_objsE + | drule ko_at_valid_objs' + | rule conjI | clarsimp simp: invs'_def valid_state'_def)+ + done +qed + +(* FIXME move *) +lemma set_ntfn_tcb_obj_at' [wp]: + "\obj_at' (P::tcb \ bool) t\ + setNotification ntfn v + \\_. obj_at' P t\" + apply (clarsimp simp: setNotification_def, wp) + done + +lemma cancelAllSignals_corres: + "corres dc (invs and valid_sched and ntfn_at ntfn) (invs' and ntfn_at' ntfn) + (cancel_all_signals ntfn) (cancelAllSignals ntfn)" + apply (simp add: cancel_all_signals_def cancelAllSignals_def) + apply (rule corres_underlying_split [OF _ _ get_simple_ko_sp get_ntfn_sp']) + apply (rule corres_guard_imp [OF getNotification_corres]) + apply simp+ + apply (case_tac "ntfn_obj ntfna", simp_all add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule ep_cancel_corres_helper) + apply (wp mapM_x_wp'[where 'b="det_ext state"] + weak_sch_act_wf_lift_linear setThreadState_not_st + set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (rename_tac list) + apply (rule_tac R="\_ s. (\x\set list. tcb_at' x s) \ valid_objs' s" + in hoare_post_add) + apply (rule mapM_x_wp') + apply (rule hoare_name_pre_state) + apply (wpsimp wp: hoare_vcg_const_Ball_lift + sts_st_tcb' sts_valid_queues setThreadState_not_st + simp: valid_tcb_state'_def) + apply (wp hoare_vcg_const_Ball_lift set_ntfn_aligned' set_ntfn_valid_objs' + weak_sch_act_wf_lift_linear + | simp)+ + apply (clarsimp simp: invs'_def valid_state'_def invs_valid_pspace valid_obj_def valid_ntfn_def + invs_weak_sch_act_wf valid_ntfn'_def valid_pspace'_def valid_sched_def + valid_sched_action_def valid_obj'_def + | erule obj_at_valid_objsE | drule ko_at_valid_objs' + | fastforce)+ + done + +lemma ep'_Idle_case_helper: + "(case ep of IdleEP \ a | _ \ b) = (if (ep = IdleEP) then a else b)" + by (cases ep, simp_all) + +lemma rescheduleRequired_notresume: + "\\s. ksSchedulerAction s \ ResumeCurrentThread\ + rescheduleRequired \\_ s. ksSchedulerAction s = ChooseNewThread\" + proof - + have ssa: "\\\ setSchedulerAction ChooseNewThread + \\_ s. ksSchedulerAction s = ChooseNewThread\" + by (simp add: setSchedulerAction_def | wp)+ + show ?thesis + by (simp add: rescheduleRequired_def, wp ssa) + qed + +lemma setThreadState_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + setThreadState st t + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + (is "\?PRE\ _ \_\") +proof - + have nrct: + "\\s. ksSchedulerAction s \ ResumeCurrentThread\ + rescheduleRequired + \\_ s. ksSchedulerAction s \ ResumeCurrentThread\" + by (rule hoare_strengthen_post [OF rescheduleRequired_notresume], simp) + show ?thesis + apply (simp add: setThreadState_def) + apply (wpsimp wp: hoare_vcg_imp_lift [OF nrct]) + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp) + apply (clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch threadSet_ct]) + apply assumption + done +qed + +lemma cancel_all_invs'_helper: + "\all_invs_but_sym_refs_ct_not_inQ' and (\s. \x \ set q. tcb_at' x s) + and (\s. sym_refs (\x. if x \ set q then {r \ state_refs_of' s x. snd r = TCBBound} + else state_refs_of' s x) + \ sym_refs (\x. state_hyp_refs_of' s x) + \ (\x \ set q. ex_nonz_cap_to' x s))\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) q + \\rv. all_invs_but_ct_not_inQ'\" + apply (rule mapM_x_inv_wp2) + apply clarsimp + apply (rule hoare_pre) + apply (wp valid_irq_node_lift valid_irq_handlers_lift'' irqs_masked_lift + hoare_vcg_const_Ball_lift untyped_ranges_zero_lift + sts_valid_queues sts_st_tcb' setThreadState_not_st + | simp add: cteCaps_of_def o_def)+ + apply (unfold fun_upd_apply Invariants_H.tcb_st_refs_of'_simps) + apply clarsimp + apply (intro conjI) + apply (clarsimp simp: valid_tcb_state'_def global'_no_ex_cap + elim!: rsubst[where P=sym_refs] + dest!: set_mono_suffix + intro!: ext + | (drule (1) bspec, clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE))+ + done + +lemma ep_q_refs_max: + "\ ko_at' r p s; sym_refs (state_refs_of' s); r \ IdleEP \ + \ (state_refs_of' s p \ (set (epQueue r) \ {EPSend, EPRecv})) + \ (\x\set (epQueue r). \ntfnptr. state_refs_of' s x \ + {(p, TCBBlockedSend), (p, TCBBlockedRecv), (ntfnptr, TCBBound)})" + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac r) + apply (clarsimp simp: st_tcb_at_refs_of_rev' tcb_bound_refs'_def + | rule conjI | drule(1) bspec | drule st_tcb_at_state_refs_ofD' + | case_tac ntfnptr)+ + done + +lemma rescheduleRequired_invs'[wp]: + "\invs'\ rescheduleRequired \\rv. invs'\" + apply (simp add: rescheduleRequired_def) + apply (wpsimp wp: ssa_invs') + apply (clarsimp simp: invs'_def valid_state'_def) + done + +lemma invs_rct_ct_activatable': + "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ + \ st_tcb_at' activatable' (ksCurThread s) s" + by (simp add: invs'_def valid_state'_def ct_in_state'_def) + +lemma not_in_epQueue: + assumes ko_at: "ko_at' r ep_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "r \ IdleEP" and + st_act: "st_tcb_at' simple' t s" + shows "t \ set (epQueue r)" +proof + assume t_epQ: "t \ set (epQueue r)" + + with ko_at nidle + have "(t, EPRecv) \ state_refs_of' s ep_ptr + \ (t, EPSend) \ state_refs_of' s ep_ptr" + by - (drule ko_at_state_refs_ofD', case_tac r, (clarsimp)+) + + with ko_at srefs + have "(ep_ptr, TCBBlockedRecv) \ state_refs_of' s t + \ (ep_ptr, TCBBlockedSend) \ state_refs_of' s t" + apply - + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac r) + apply (clarsimp simp: st_tcb_at_refs_of_rev' + | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ + done + + with ko_at have "st_tcb_at' (Not \ simple') t s" + apply - + apply (erule disjE) + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + done + + with st_act show False + by (rule pred_tcb'_neq_contra) simp +qed + +lemma ct_not_in_epQueue: + assumes "ko_at' r ep_ptr s" and + "sym_refs (state_refs_of' s)" and + "r \ IdleEP" and + "ct_in_state' simple' s" + shows "ksCurThread s \ set (epQueue r)" + using assms unfolding ct_in_state'_def + by (rule not_in_epQueue) + +lemma not_in_ntfnQueue: + assumes ko_at: "ko_at' r ntfn_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "ntfnObj r \ IdleNtfn \ (\b m. ntfnObj r \ ActiveNtfn b)" and + st_act: "st_tcb_at' simple' t s" + shows "t \ set (ntfnQueue (ntfnObj r))" +proof + assume t_epQ: "t \ set (ntfnQueue (ntfnObj r))" + + with ko_at nidle + have "(t, NTFNSignal) \ state_refs_of' s ntfn_ptr" + by - (drule ko_at_state_refs_ofD', case_tac "ntfnObj r", (clarsimp)+) + with ko_at srefs + have "(ntfn_ptr, TCBSignal) \ state_refs_of' s t" + apply - + apply (frule(1) sym_refs_ko_atD') + apply (drule ko_at_state_refs_ofD') + apply (case_tac "ntfnObj r") + apply (clarsimp simp: st_tcb_at_refs_of_rev' ntfn_bound_refs'_def + | drule st_tcb_at_state_refs_ofD')+ + apply (drule_tac x="(t, NTFNSignal)" in bspec, clarsimp) + apply (clarsimp simp: st_tcb_at_refs_of_rev' dest!: st_tcb_at_state_refs_ofD') + done + + with ko_at have "st_tcb_at' (Not \ simple') t s" + apply - + apply (drule state_refs_of'_elemD) + apply (simp add: st_tcb_at_refs_of_rev') + apply (erule pred_tcb'_weakenE) + apply (clarsimp) + done + + with st_act show False + by (rule pred_tcb'_neq_contra) simp +qed + +lemma ct_not_in_ntfnQueue: + assumes ko_at: "ko_at' r ntfn_ptr s" and + srefs: "sym_refs (state_refs_of' s)" and + nidle: "ntfnObj r \ IdleNtfn \ (\b m. ntfnObj r \ ActiveNtfn b)" and + st_act: "ct_in_state' simple' s" + shows "ksCurThread s \ set (ntfnQueue (ntfnObj r))" + using assms unfolding ct_in_state'_def + by (rule not_in_ntfnQueue) + +crunch valid_pspace'[wp]: rescheduleRequired "valid_pspace'" +crunch valid_global_refs'[wp]: rescheduleRequired "valid_global_refs'" +crunch valid_machine_state'[wp]: rescheduleRequired "valid_machine_state'" + +lemma sch_act_wf_weak[elim!]: + "sch_act_wf sa s \ weak_sch_act_wf sa s" + by (case_tac sa, (simp add: weak_sch_act_wf_def)+) + +lemma rescheduleRequired_all_invs_but_ct_not_inQ: + "\all_invs_but_ct_not_inQ'\ rescheduleRequired \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp rescheduleRequired_ct_not_inQ + valid_irq_node_lift valid_irq_handlers_lift'' + irqs_masked_lift cur_tcb_lift + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply (auto simp: sch_act_wf_weak) + done + +lemma cancelAllIPC_invs'[wp]: + "\invs'\ cancelAllIPC ep_ptr \\rv. invs'\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (wp rescheduleRequired_all_invs_but_ct_not_inQ + cancel_all_invs'_helper hoare_vcg_const_Ball_lift + valid_global_refs_lift' valid_arch_state_lift' + valid_irq_node_lift ssa_invs' sts_sch_act' + irqs_masked_lift + | simp only: sch_act_wf.simps forM_x_def | simp)+ + prefer 2 + apply assumption + apply (rule hoare_strengthen_post [OF get_ep_sp']) + apply (rename_tac rv s) + apply (clarsimp simp: invs'_def valid_state'_def valid_ep'_def) + apply (frule obj_at_valid_objs', fastforce) + apply (clarsimp simp: valid_obj'_def) + apply (rule conjI) + apply (case_tac rv, simp_all add: valid_ep'_def)[1] + apply (rule conjI[rotated]) + apply (drule(1) sym_refs_ko_atD') + apply (case_tac rv, simp_all add: st_tcb_at_refs_of_rev')[1] + apply (clarsimp elim!: if_live_state_refsE + | drule(1) bspec | drule st_tcb_at_state_refs_ofD')+ + apply (drule(2) ep_q_refs_max) + apply (erule delta_sym_refs) + apply (clarsimp dest!: symreftype_inverse' split: if_split_asm | drule(1) bspec subsetD)+ + done + +lemma cancelAllSignals_invs'[wp]: + "\invs'\ cancelAllSignals ntfn \\rv. invs'\" + apply (simp add: cancelAllSignals_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp) + apply (wp, simp) + apply (rule hoare_pre) + apply (wp rescheduleRequired_all_invs_but_ct_not_inQ + cancel_all_invs'_helper hoare_vcg_const_Ball_lift + valid_irq_node_lift ssa_invs' irqs_masked_lift + | simp only: sch_act_wf.simps)+ + apply (clarsimp simp: invs'_def valid_state'_def valid_ntfn'_def) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (drule(1) sym_refs_ko_atD') + apply (rule conjI, clarsimp elim!: if_live_state_refsE) + apply (rule conjI[rotated]) + apply (clarsimp elim!: if_live_state_refsE) + apply (drule_tac x="(x, NTFNSignal)" in bspec) + apply (clarsimp simp: st_tcb_at_refs_of_rev')+ + apply (drule st_tcb_at_state_refs_ofD') + apply clarsimp + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def) + apply (drule_tac x="(x, NTFNSignal)" in bspec) + apply (clarsimp simp: st_tcb_at_refs_of_rev')+ + apply (drule st_tcb_at_state_refs_ofD') + apply (fastforce simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def) + done + +lemma cancelAllIPC_valid_objs'[wp]: + "\valid_objs'\ cancelAllIPC ep \\rv. valid_objs'\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper cong del: if_cong) + apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule hoare_pre) + apply (wp set_ep_valid_objs' setSchedulerAction_valid_objs') + apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set (epQueue ep). tcb_at' x s)" + in hoare_post_imp) + apply simp + apply (simp add: Ball_def) + apply (wp mapM_x_wp' sts_valid_objs' + hoare_vcg_all_lift hoare_vcg_const_imp_lift)+ + apply simp + apply (simp add: valid_tcb_state'_def) + apply (wp set_ep_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift) + apply (clarsimp) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (case_tac epa, simp_all) + done + +lemma cancelAllSignals_valid_objs'[wp]: + "\valid_objs'\ cancelAllSignals ntfn \\rv. valid_objs'\" + apply (simp add: cancelAllSignals_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfna", simp_all) + apply (wp, simp) + apply (wp, simp) + apply (rename_tac list) + apply (rule_tac Q="\rv s. valid_objs' s \ (\x\set list. tcb_at' x s)" + in hoare_post_imp) + apply (simp add: valid_ntfn'_def) + apply (simp add: Ball_def) + apply (wp setSchedulerAction_valid_objs' mapM_x_wp' + sts_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift + | simp)+ + apply (simp add: valid_tcb_state'_def) + apply (wp set_ntfn_valid_objs' hoare_vcg_all_lift hoare_vcg_const_imp_lift) + apply clarsimp + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + done + +lemma cancelAllIPC_st_tcb_at: + assumes x[simp]: "P Restart" shows + "\st_tcb_at' P t\ cancelAllIPC epptr \\rv. st_tcb_at' P t\" + unfolding cancelAllIPC_def + by (wp ep'_cases_weak_wp mapM_x_wp' sts_st_tcb_at'_cases | clarsimp)+ + +lemmas cancelAllIPC_makes_simple[wp] = + cancelAllIPC_st_tcb_at [where P=simple', simplified] + +lemma cancelAllSignals_st_tcb_at: + assumes x[simp]: "P Restart" shows + "\st_tcb_at' P t\ cancelAllSignals epptr \\rv. st_tcb_at' P t\" + unfolding cancelAllSignals_def + by (wp ntfn'_cases_weak_wp mapM_x_wp' sts_st_tcb_at'_cases | clarsimp)+ + +lemmas cancelAllSignals_makes_simple[wp] = + cancelAllSignals_st_tcb_at [where P=simple', simplified] + +lemma threadSet_not_tcb[wp]: + "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ + threadSet f t + \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" + by (clarsimp simp: threadSet_def valid_def getObject_def + setObject_def in_monad loadObject_default_def + ko_wp_at'_def split_def in_magnitude_check + objBits_simps' updateObject_default_def + ps_clear_upd projectKO_opt_tcb) + +lemma setThreadState_not_tcb[wp]: + "\ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\ + setThreadState st t + \\rv. ko_wp_at' (\x. P x \ (projectKO_opt x = (None :: tcb option))) p\" + apply (simp add: setThreadState_def setQueue_def + rescheduleRequired_def tcbSchedEnqueue_def + unless_def bitmap_fun_defs + cong: scheduler_action.case_cong cong del: if_cong + | wp | wpcw)+ + done + +lemma tcbSchedEnqueue_unlive: + "\ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p + and tcb_at' t\ + tcbSchedEnqueue t + \\_. ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp | simp add: setQueue_def bitmap_fun_defs)+ + done + +lemma cancelAll_unlive_helper: + "\\s. (\x\set xs. tcb_at' x s) \ + ko_wp_at' (\x. \ live' x \ (projectKO_opt x = (None :: tcb option))) p s\ + mapM_x (\t. do + y \ setThreadState Structures_H.thread_state.Restart t; + tcbSchedEnqueue t + od) xs + \\rv. ko_wp_at' (Not \ live') p\" + apply (rule hoare_strengthen_post) + apply (rule mapM_x_wp') + apply (rule hoare_pre) + apply (wp tcbSchedEnqueue_unlive hoare_vcg_const_Ball_lift) + apply clarsimp + apply (clarsimp elim!: ko_wp_at'_weakenE) + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma setObject_ko_wp_at': + fixes v :: "'a :: pspace_storable" + assumes x: "\v :: 'a. updateObject v = updateObject_default v" + assumes n: "\v :: 'a. objBits v = n" + assumes v: "(1 :: machine_word) < 2 ^ n" + shows + "\\s. P (injectKO v)\ setObject p v \\rv. ko_wp_at' P p\" + by (clarsimp simp: setObject_def valid_def in_monad + ko_wp_at'_def x split_def n + updateObject_default_def + objBits_def[symmetric] ps_clear_upd + in_magnitude_check v) + +lemma rescheduleRequired_unlive: + "\\s. ko_wp_at' (Not \ live') p s \ ksSchedulerAction s \ SwitchToThread p\ + rescheduleRequired + \\rv. ko_wp_at' (Not \ live') p\" + apply (simp add: rescheduleRequired_def) + apply (wp | simp | wpc)+ + apply (simp add: tcbSchedEnqueue_def unless_def + threadSet_def setQueue_def threadGet_def) + apply (wp setObject_ko_wp_at getObject_tcb_wp + | simp add: objBits_simps' bitmap_fun_defs split del: if_split)+ + apply (clarsimp simp: o_def) + apply (drule obj_at_ko_at') + apply clarsimp + done + +lemmas setEndpoint_ko_wp_at' + = setObject_ko_wp_at'[where 'a=endpoint, folded setEndpoint_def, simplified] + +lemma cancelAllIPC_unlive: + "\valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s)\ + cancelAllIPC ep \\rv. ko_wp_at' (Not \ live') ep\" + apply (simp add: cancelAllIPC_def ep'_Idle_case_helper) + apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule hoare_pre) + apply (wp cancelAll_unlive_helper setEndpoint_ko_wp_at' + hoare_vcg_const_Ball_lift rescheduleRequired_unlive + mapM_x_wp' + | simp add: objBits_simps')+ + apply (clarsimp simp: projectKO_opt_tcb) + apply (frule(1) obj_at_valid_objs') + apply (intro conjI impI) + apply (clarsimp simp: valid_obj'_def valid_ep'_def obj_at'_def pred_tcb_at'_def ko_wp_at'_def + live'_def + split: endpoint.split_asm)+ + done + +lemma cancelAllSignals_unlive: + "\\s. valid_objs' s \ sch_act_wf (ksSchedulerAction s) s + \ obj_at' (\ko. ntfnBoundTCB ko = None) ntfnptr s\ + cancelAllSignals ntfnptr \\rv. ko_wp_at' (Not \ live') ntfnptr\" + apply (simp add: cancelAllSignals_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj ntfn", simp_all add: setNotification_def) + apply wp + apply (fastforce simp: obj_at'_real_def live'_def + dest: obj_at_conj' + elim: ko_wp_at'_weakenE) + apply wp + apply (fastforce simp: obj_at'_real_def live'_def + dest: obj_at_conj' + elim: ko_wp_at'_weakenE) + apply (wp rescheduleRequired_unlive) + apply (wp cancelAll_unlive_helper) + apply ((wp mapM_x_wp' setObject_ko_wp_at' hoare_vcg_const_Ball_lift)+, + simp_all add: objBits_simps', simp_all) + apply (fold setNotification_def, wp) + apply (intro conjI[rotated]) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (fastforce simp: projectKO_opt_tcb ko_wp_at'_def valid_obj'_def valid_ntfn'_def + obj_at'_def live'_def)+ + done + +crunch ep_at'[wp]: tcbSchedEnqueue "ep_at' epptr" + (simp: unless_def) + +declare if_cong[cong] + +lemma insert_eqD: + "A = insert a B \ a \ A" + by blast + +lemma cancelBadgedSends_filterM_helper': + notes if_cong[cong del] + shows + "\ys. + \\s. all_invs_but_sym_refs_ct_not_inQ' s + \ ex_nonz_cap_to' epptr s \ ep_at' epptr s + \ sym_refs ((state_refs_of' s) (epptr := set (xs @ ys) \ {EPSend})) + \ (\y \ set (xs @ ys). state_refs_of' s y = {(epptr, TCBBlockedSend)} + \ {r \ state_refs_of' s y. snd r = TCBBound}) + \ sym_refs (state_hyp_refs_of' s) + \ distinct (xs @ ys)\ + filterM (\t. do st \ getThreadState t; + if blockingIPCBadge st = badge then + do y \ setThreadState Structures_H.thread_state.Restart t; + y \ tcbSchedEnqueue t; + return False + od + else return True + od) xs + \\rv s. all_invs_but_sym_refs_ct_not_inQ' s + \ ex_nonz_cap_to' epptr s \ ep_at' epptr s + \ sym_refs ((state_refs_of' s) (epptr := (set rv \ set ys) \ {EPSend})) + \ sym_refs (state_hyp_refs_of' s) + \ (\y \ set ys. state_refs_of' s y = {(epptr, TCBBlockedSend)} + \ {r \ state_refs_of' s y. snd r = TCBBound}) + \ distinct rv \ distinct (xs @ ys) \ set rv \ set xs \ (\x \ set xs. tcb_at' x s)\" + apply (rule_tac xs=xs in rev_induct) + apply clarsimp + apply wp + apply clarsimp + apply (clarsimp simp: filterM_append bind_assoc simp del: set_append distinct_append) + apply (drule spec, erule hoare_seq_ext[rotated]) + apply (rule hoare_seq_ext [OF _ gts_inv']) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift hoare_vcg_const_Ball_lift sts_sch_act' + sch_act_wf_lift valid_irq_handlers_lift'' cur_tcb_lift irqs_masked_lift + sts_st_tcb' sts_valid_queues setThreadState_not_st + tcbSchedEnqueue_not_st + untyped_ranges_zero_lift + | clarsimp simp: cteCaps_of_def o_def)+ + apply (frule insert_eqD, frule state_refs_of'_elemD) + apply (clarsimp simp: valid_tcb_state'_def st_tcb_at_refs_of_rev') + apply (frule pred_tcb_at') + apply (rule conjI[rotated], blast) + apply clarsimp + apply (intro conjI) + apply (clarsimp simp: valid_pspace'_def valid_tcb'_def elim!: valid_objs_valid_tcbE dest!: st_tcb_ex_cap'') + apply (fastforce dest!: st_tcb_ex_cap'') + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply (erule delta_sym_refs) + apply (fastforce elim!: obj_atE' + simp: state_refs_of'_def tcb_bound_refs'_def + subsetD symreftype_inverse' + split: if_split_asm)+ + done + +lemmas cancelBadgedSends_filterM_helper + = spec [where x=Nil, OF cancelBadgedSends_filterM_helper', simplified] + +lemma cancelBadgedSends_invs[wp]: + notes if_cong[cong del] + shows + "\invs'\ cancelBadgedSends epptr badge \\rv. invs'\" + apply (simp add: cancelBadgedSends_def) + apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (case_tac ep, simp_all) + apply ((wp | simp)+)[2] + apply (subst bind_assoc [where g="\_. rescheduleRequired", + symmetric])+ + apply (rule hoare_seq_ext + [OF rescheduleRequired_all_invs_but_ct_not_inQ]) + apply (simp add: list_case_return cong: list.case_cong) + apply (rule hoare_pre, wp valid_irq_node_lift irqs_masked_lift) + apply simp + apply (rule hoare_strengthen_post, + rule cancelBadgedSends_filterM_helper[where epptr=epptr]) + apply (clarsimp simp: ep_redux_simps3 fun_upd_def[symmetric]) + apply (clarsimp simp add: valid_ep'_def split: list.split) + apply blast + apply (wp valid_irq_node_lift irqs_masked_lift | wp (once) sch_act_sane_lift)+ + apply (clarsimp simp: invs'_def valid_state'_def state_hyp_refs_of'_ep + valid_ep'_def fun_upd_def[symmetric] + obj_at'_weakenE[OF _ TrueI]) + apply (frule obj_at_valid_objs', clarsimp) + apply (clarsimp simp: valid_obj'_def valid_ep'_def) + apply (frule if_live_then_nonz_capD', simp add: obj_at'_real_def) + apply (clarsimp simp: live'_def) + apply (frule(1) sym_refs_ko_atD') + apply (clarsimp simp add: fun_upd_idem + st_tcb_at_refs_of_rev') + apply (drule (1) bspec, drule st_tcb_at_state_refs_ofD', clarsimp) + apply (fastforce simp: set_eq_subset tcb_bound_refs'_def) + done + +crunch state_refs_of[wp]: tcb_sched_action "\s. P (state_refs_of s)" + +lemma cancelBadgedSends_corres: + "corres dc (invs and valid_sched and ep_at epptr) (invs' and ep_at' epptr) + (cancel_badged_sends epptr bdg) (cancelBadgedSends epptr bdg)" + apply (simp add: cancel_badged_sends_def cancelBadgedSends_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres _ get_simple_ko_sp get_ep_sp', + where Q="invs and valid_sched" and Q'=invs']) + apply simp_all + apply (case_tac ep, simp_all add: ep_relation_def) + apply (simp add: filterM_mapM list_case_return cong: list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF setEndpoint_corres]) + apply (simp add: ep_relation_def) + apply (rule corres_split_eqr[OF _ _ _ hoare_post_add[where R="\_. valid_objs'"]]) + apply (rule_tac S="(=)" + and Q="\xs s. (\x \ set xs. (epptr, TCBBlockedSend) \ state_refs_of s x) \ + distinct xs \ valid_etcbs s \ pspace_aligned s \ pspace_distinct s" + and Q'="\xs s. Invariants_H.valid_queues s \ valid_queues' s \ valid_objs' s" + in corres_mapM_list_all2[where r'="(=)"], + simp_all add: list_all2_refl)[1] + apply (clarsimp simp: liftM_def[symmetric] o_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac F="\pl. st = Structures_A.BlockedOnSend epptr pl" + in corres_gen_asm) + apply (clarsimp simp: o_def dc_def[symmetric] liftM_def) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (rule corres_trivial) + apply simp + apply wp+ + apply simp + apply (wp sts_valid_queues gts_st_tcb_at)+ + apply (clarsimp simp: valid_tcb_state_def tcb_at_def st_tcb_def2 + st_tcb_at_refs_of_rev + dest!: state_refs_of_elemD elim!: tcb_at_is_etcb_at[rotated]) + apply (simp add: is_tcb_def) + apply simp + apply (wp hoare_vcg_const_Ball_lift gts_wp | clarsimp)+ + apply (wp hoare_vcg_imp_lift sts_st_tcb' sts_valid_queues + | clarsimp simp: valid_tcb_state'_def)+ + apply (rule corres_split[OF _ rescheduleRequired_corres]) + apply (rule setEndpoint_corres) + apply (simp split: list.split add: ep_relation_def) + apply (wp weak_sch_act_wf_lift_linear)+ + apply (wp gts_st_tcb_at hoare_vcg_imp_lift mapM_wp' + sts_st_tcb' sts_valid_queues + set_thread_state_runnable_weak_valid_sched_action + | clarsimp simp: valid_tcb_state'_def)+ + apply (wp hoare_vcg_const_Ball_lift weak_sch_act_wf_lift_linear set_ep_valid_objs' + | simp)+ + apply (clarsimp simp: conj_comms) + apply (frule sym_refs_ko_atD, clarsimp+) + apply (rule obj_at_valid_objsE, assumption+, clarsimp+) + apply (clarsimp simp: valid_obj_def valid_ep_def valid_sched_def valid_sched_action_def) + apply (rule conjI, fastforce) + apply (rule conjI, fastforce) + apply (rule conjI, erule obj_at_weakenE, clarsimp simp: is_ep) + apply (clarsimp simp: st_tcb_at_refs_of_rev) + apply (drule(1) bspec, drule st_tcb_at_state_refs_ofD, clarsimp) + apply (simp add: set_eq_subset) + apply (clarsimp simp: obj_at'_weakenE[OF _ TrueI]) + apply (drule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ep'_def invs_weak_sch_act_wf + invs'_def valid_state'_def) + done + +lemma suspend_unqueued: + "\\\ suspend t \\rv. obj_at' (Not \ tcbQueued) t\" + apply (simp add: suspend_def unless_def tcbSchedDequeue_def) + apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift) + apply (simp add: threadGet_def| wp getObject_tcb_wp)+ + apply (rule hoare_strengthen_post, rule hoare_post_taut) + apply (fastforce simp: obj_at'_def) + apply (rule hoare_post_taut) + apply wp+ + done + +crunch no_vcpu[wp]: vcpuInvalidateActive "obj_at' (P::'a:: no_vcpu \ bool) t" + +lemma asUser_tcbQueued[wp]: + "asUser t' f \obj_at' (P \ tcbQueued) t\" + unfolding asUser_def threadGet_stateAssert_gets_asUser + by (wpsimp simp: asUser_fetch_def obj_at'_def) + +lemma archThreadSet_tcbQueued[wp]: + "archThreadSet f tcb \obj_at' (P \ tcbQueued) t\" + unfolding archThreadSet_def + by (wp setObject_tcb_strongest getObject_tcb_wp) (fastforce simp: obj_at'_def) + +lemma dissociateVCPUTCB_unqueued[wp]: + "dissociateVCPUTCB vcpu tcb \obj_at' (Not \ tcbQueued) t\" + unfolding dissociateVCPUTCB_def archThreadGet_def by wpsimp + +lemmas asUser_st_tcb_at'[wp] = asUser_obj_at [folded st_tcb_at'_def] +lemmas setObject_vcpu_st_tcb_at'[wp] = + setObject_vcpu_obj_at'_no_vcpu [where P'="P o tcbState" for P, folded st_tcb_at'_def] +lemmas vcpuInvalidateActive_st_tcb_at'[wp] = + vcpuInvalidateActive_no_vcpu [where P="P o tcbState" for P, folded st_tcb_at'_def] + +lemma archThreadSet_st_tcb_at'[wp]: + "archThreadSet f tcb \st_tcb_at' P t\" + unfolding archThreadSet_def st_tcb_at'_def + by (wp setObject_tcb_strongest getObject_tcb_wp) (fastforce simp: obj_at'_def) + +lemma dissociateVCPUTCB_st_tcb_at'[wp]: + "dissociateVCPUTCB vcpu tcb \st_tcb_at' P t'\" + unfolding dissociateVCPUTCB_def archThreadGet_def by wpsimp + +crunch ksQ[wp]: dissociateVCPUTCB "\s. P (ksReadyQueues s)" + (wp: crunch_wps setObject_queues_unchanged_tcb simp: crunch_simps) + +(* FIXME AARCH64: move to TcbAcc_R *) +lemma archThreadGet_wp: + "\\s. \tcb. ko_at' tcb t s \ Q (f (tcbArch tcb)) s\ archThreadGet f t \Q\" + unfolding archThreadGet_def + by (wpsimp wp: getObject_tcb_wp simp: obj_at'_def) + +crunch unqueued: prepareThreadDelete "obj_at' (Not \ tcbQueued) t" + (simp: o_def wp: dissociateVCPUTCB_unqueued[simplified o_def] archThreadGet_wp) +crunch inactive: prepareThreadDelete "st_tcb_at' ((=) Inactive) t'" +crunch nonq: prepareThreadDelete " \s. \d p. t' \ set (ksReadyQueues s (d, p))" + +end +end diff --git a/proof/refine/AARCH64/Ipc_R.thy b/proof/refine/AARCH64/Ipc_R.thy new file mode 100644 index 0000000000..3bc8c3bac1 --- /dev/null +++ b/proof/refine/AARCH64/Ipc_R.thy @@ -0,0 +1,4376 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Ipc_R +imports Finalise_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemmas lookup_slot_wrapper_defs'[simp] = + lookupSourceSlot_def lookupTargetSlot_def lookupPivotSlot_def + +lemma getMessageInfo_corres: "corres ((=) \ message_info_map) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_message_info t) (getMessageInfo t)" + apply (rule corres_guard_imp) + apply (unfold get_message_info_def getMessageInfo_def fun_app_def) + apply (simp add: AARCH64_H.msgInfoRegister_def + AARCH64.msgInfoRegister_def AARCH64_A.msg_info_register_def) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule corres_trivial, simp add: message_info_from_data_eqv) + apply (wp | simp)+ + done + + +lemma get_mi_inv'[wp]: "\I\ getMessageInfo a \\x. I\" + by (simp add: getMessageInfo_def, wp) + +definition + "get_send_cap_relation rv rv' \ + (case rv of Some (c, cptr) \ (\c' cptr'. rv' = Some (c', cptr') \ + cte_map cptr = cptr' \ + cap_relation c c') + | None \ rv' = None)" + +lemma cap_relation_mask: + "\ cap_relation c c'; msk' = rights_mask_map msk \ \ + cap_relation (mask_cap msk c) (maskCapRights msk' c')" + by simp + +lemma lsfco_cte_at': + "\valid_objs' and valid_cap' cap\ + lookupSlotForCNodeOp f cap idx depth + \\rv. cte_at' rv\, -" + apply (simp add: lookupSlotForCNodeOp_def) + apply (rule conjI) + prefer 2 + apply clarsimp + apply (wp) + apply (clarsimp simp: split_def unlessE_def + split del: if_split) + apply (wp hoare_drop_imps throwE_R) + done + +declare unifyFailure_wp [wp] + +(* FIXME: move *) +lemma unifyFailure_wp_E [wp]: + "\P\ f -, \\_. E\ \ \P\ unifyFailure f -, \\_. E\" + unfolding validE_E_def + by (erule unifyFailure_wp)+ + +(* FIXME: move *) +lemma unifyFailure_wp2 [wp]: + assumes x: "\P\ f \\_. Q\" + shows "\P\ unifyFailure f \\_. Q\" + by (wp x, simp) + +definition + ct_relation :: "captransfer \ cap_transfer \ bool" +where + "ct_relation ct ct' \ + ct_receive_root ct = to_bl (ctReceiveRoot ct') + \ ct_receive_index ct = to_bl (ctReceiveIndex ct') + \ ctReceiveDepth ct' = unat (ct_receive_depth ct)" + +(* MOVE *) +lemma valid_ipc_buffer_ptr_aligned_word_size_bits: + "\valid_ipc_buffer_ptr' a s; is_aligned y word_size_bits \ \ is_aligned (a + y) word_size_bits" + unfolding valid_ipc_buffer_ptr'_def + apply clarsimp + apply (erule (1) aligned_add_aligned) + apply (simp add: msg_align_bits word_size_bits_def) + done + +(* MOVE *) +lemma valid_ipc_buffer_ptr'D2: + "\valid_ipc_buffer_ptr' a s; y < max_ipc_words * word_size; is_aligned y word_size_bits\ \ typ_at' UserDataT (a + y && ~~ mask pageBits) s" + unfolding valid_ipc_buffer_ptr'_def + apply clarsimp + apply (subgoal_tac "(a + y) && ~~ mask pageBits = a && ~~ mask pageBits") + apply simp + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (erule is_aligned_add_helper [THEN conjunct2]) + apply (erule order_less_le_trans) + apply (simp add: msg_align_bits max_ipc_words word_size_def) + apply simp + done + +lemma loadCapTransfer_corres: + notes msg_max_words_simps = max_ipc_words_def msgMaxLength_def msgMaxExtraCaps_def msgLengthBits_def + capTransferDataSize_def msgExtraCapBits_def + shows + "corres ct_relation \ (valid_ipc_buffer_ptr' buffer) (load_cap_transfer buffer) (loadCapTransfer buffer)" + apply (simp add: load_cap_transfer_def loadCapTransfer_def + captransfer_from_words_def + capTransferDataSize_def capTransferFromWords_def + msgExtraCapBits_def word_size add.commute add.left_commute + msg_max_length_def msg_max_extra_caps_def word_size_def + msgMaxLength_def msgMaxExtraCaps_def msgLengthBits_def wordSize_def wordBits_def + del: upt.simps) + apply (rule corres_guard_imp) + apply (rule corres_split[OF load_word_corres]) + apply (rule corres_split[OF load_word_corres]) + apply (rule corres_split[OF load_word_corres]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (clarsimp simp: ct_relation_def) + apply (wp no_irq_loadWord)+ + apply simp + apply (simp add: conj_comms) + apply safe + apply (erule valid_ipc_buffer_ptr_aligned_word_size_bits, simp add: is_aligned_def word_size_bits_def)+ + apply (erule valid_ipc_buffer_ptr'D2, + simp add: msg_max_words_simps word_size_def word_size_bits_def, + simp add: word_size_bits_def is_aligned_def)+ + done + +lemma getReceiveSlots_corres: + "corres (\xs ys. ys = map cte_map xs) + (tcb_at receiver and valid_objs and pspace_aligned) + (tcb_at' receiver and valid_objs' and pspace_aligned' and pspace_distinct' and + case_option \ valid_ipc_buffer_ptr' recv_buf) + (get_receive_slots receiver recv_buf) + (getReceiveSlots receiver recv_buf)" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (simp add: getReceiveSlots_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF loadCapTransfer_corres]) + apply (rule corres_empty_on_failure) + apply (rule corres_splitEE) + apply (rule corres_unify_failure) + apply (rule lookup_cap_corres) + apply (simp add: ct_relation_def) + apply simp + apply (rule corres_splitEE) + apply (rule corres_unify_failure) + apply (simp add: ct_relation_def) + apply (erule lookupSlotForCNodeOp_corres [OF _ refl]) + apply simp + apply (simp add: split_def liftE_bindE unlessE_whenE) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (case_tac cap, auto)[1] + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_trivial, simp add: returnOk_def) + apply (wp lookup_cap_valid lookup_cap_valid' lsfco_cte_at | simp)+ + done + +lemma get_recv_slot_inv'[wp]: + "\ P \ getReceiveSlots receiver buf \\rv'. P \" + apply (case_tac buf) + apply (simp add: getReceiveSlots_def) + apply (simp add: getReceiveSlots_def + split_def unlessE_def) + apply (wp | simp)+ + done + +lemma get_rs_cte_at'[wp]: + "\\\ + getReceiveSlots receiver recv_buf + \\rv s. \x \ set rv. cte_wp_at' (\c. cteCap c = capability.NullCap) x s\" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (wp,simp) + apply (clarsimp simp add: getReceiveSlots_def + split_def whenE_def unlessE_whenE) + apply wp + apply simp + apply (rule getCTE_wp) + apply (simp add: cte_wp_at_ctes_of cong: conj_cong) + apply wp+ + apply simp + done + +lemma get_rs_real_cte_at'[wp]: + "\valid_objs'\ + getReceiveSlots receiver recv_buf + \\rv s. \x \ set rv. real_cte_at' x s\" + apply (cases recv_buf) + apply (simp add: getReceiveSlots_def) + apply (wp,simp) + apply (clarsimp simp add: getReceiveSlots_def + split_def whenE_def unlessE_whenE) + apply wp + apply simp + apply (wp hoare_drop_imps)[1] + apply simp + apply (wp lookup_cap_valid')+ + apply simp + done + +declare word_div_1 [simp] +declare word_minus_one_le [simp] +declare word64_minus_one_le [simp] + +lemma loadWordUser_corres': + "\ y < unat max_ipc_words; y' = of_nat y * 8 \ \ + corres (=) \ (valid_ipc_buffer_ptr' a) (load_word_offs a y) (loadWordUser (a + y'))" + apply simp + apply (erule loadWordUser_corres) + done + +declare loadWordUser_inv [wp] + +lemma getExtraCptrs_inv[wp]: + "\P\ getExtraCPtrs buf mi \\rv. P\" + apply (cases mi, cases buf, simp_all add: getExtraCPtrs_def) + apply (wp dmo_inv' mapM_wp' loadWord_inv) + done + +lemma getSlotCap_cte_wp_at_rv: + "\cte_wp_at' (\cte. P (cteCap cte) cte) p\ + getSlotCap p + \\rv. cte_wp_at' (P rv) p\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_ctes_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma badge_derived_mask [simp]: + "badge_derived' (maskCapRights R c) c' = badge_derived' c c'" + by (simp add: badge_derived'_def) + +declare derived'_not_Null [simp] + +lemma maskCapRights_vs_cap_ref'[simp]: + "vs_cap_ref' (maskCapRights msk cap) = vs_cap_ref' cap" + unfolding vs_cap_ref'_def + apply (cases cap, simp_all add: maskCapRights_def isCap_simps Let_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; + simp add: maskCapRights_def AARCH64_H.maskCapRights_def isCap_simps Let_def) + done + +lemma corres_set_extra_badge: + "b' = b \ + corres dc (in_user_frame buffer) + (valid_ipc_buffer_ptr' buffer and + (\_. msg_max_length + 2 + n < unat max_ipc_words)) + (set_extra_badge buffer b n) (setExtraBadge buffer b' n)" + apply (rule corres_gen_asm2) + apply (drule storeWordUser_corres [where a=buffer and w=b]) + apply (simp add: set_extra_badge_def setExtraBadge_def buffer_cptr_index_def + bufferCPtrOffset_def Let_def) + apply (simp add: word_size word_size_def wordSize_def wordBits_def + bufferCPtrOffset_def buffer_cptr_index_def msgMaxLength_def + msg_max_length_def msgLengthBits_def store_word_offs_def + add.commute add.left_commute) + done + +crunch typ_at': setExtraBadge "\s. P (typ_at' T p s)" +lemmas setExtraBadge_typ_ats' [wp] = typ_at_lifts [OF setExtraBadge_typ_at'] +crunch valid_pspace' [wp]: setExtraBadge valid_pspace' +crunch cte_wp_at' [wp]: setExtraBadge "cte_wp_at' P p" +crunch ipc_buffer' [wp]: setExtraBadge "valid_ipc_buffer_ptr' buffer" + +crunch inv'[wp]: getExtraCPtr P (wp: dmo_inv' loadWord_inv) + +lemmas unifyFailure_discard2 + = corres_injection[OF id_injection unifyFailure_injection, simplified] + +lemma deriveCap_not_null: + "\\\ deriveCap slot cap \\rv. K (rv \ NullCap \ cap \ NullCap)\,-" + apply (simp add: deriveCap_def split del: if_split) + apply (case_tac cap) + apply (simp_all add: Let_def isCap_simps) + apply wp + apply simp + done + +lemma deriveCap_derived_foo: + "\\s. \cap'. (cte_wp_at' (\cte. badge_derived' cap (cteCap cte) + \ capASID cap = capASID (cteCap cte) \ cap_asid_base' cap = cap_asid_base' (cteCap cte) + \ cap_vptr' cap = cap_vptr' (cteCap cte)) slot s + \ valid_objs' s \ cap' \ NullCap \ cte_wp_at' (is_derived' (ctes_of s) slot cap' \ cteCap) slot s) + \ (cte_wp_at' (untyped_derived_eq cap \ cteCap) slot s + \ cte_wp_at' (untyped_derived_eq cap' \ cteCap) slot s) + \ (s \' cap \ s \' cap') \ (cap' \ NullCap \ cap \ NullCap) \ Q cap' s\ + deriveCap slot cap \Q\,-" + using deriveCap_derived[where slot=slot and c'=cap] deriveCap_valid[where slot=slot and c=cap] + deriveCap_untyped_derived[where slot=slot and c'=cap] deriveCap_not_null[where slot=slot and cap=cap] + apply (clarsimp simp: validE_R_def validE_def valid_def split: sum.split) + apply (frule in_inv_by_hoareD[OF deriveCap_inv]) + apply (clarsimp simp: o_def) + apply (drule spec, erule mp) + apply safe + apply fastforce + apply (drule spec, drule(1) mp) + apply fastforce + apply (drule spec, drule(1) mp) + apply fastforce + apply (drule spec, drule(1) bspec, simp) + done + +lemma valid_mdb_untyped_incD': + "valid_mdb' s \ untyped_inc' (ctes_of s)" + by (simp add: valid_mdb'_def valid_mdb_ctes_def) + +lemma cteInsert_cte_wp_at: + "\\s. cte_wp_at' (\c. is_derived' (ctes_of s) src cap (cteCap c)) src s + \ valid_mdb' s \ valid_objs' s + \ (if p = dest then P cap + else cte_wp_at' (\c. P (maskedAsFull (cteCap c) cap)) p s)\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + apply (simp add: cteInsert_def) + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp + | clarsimp simp: comp_def + | unfold setUntypedCapAsFull_def)+ + apply (drule cte_at_cte_wp_atD) + apply (elim exE) + apply (rule_tac x=cte in exI) + apply clarsimp + apply (drule cte_at_cte_wp_atD) + apply (elim exE) + apply (rule_tac x=ctea in exI) + apply clarsimp + apply (cases "p=dest") + apply (clarsimp simp: cte_wp_at'_def) + apply (cases "p=src") + apply clarsimp + apply (intro conjI impI) + apply ((clarsimp simp: cte_wp_at'_def maskedAsFull_def split: if_split_asm)+)[2] + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: maskedAsFull_def cte_wp_at_ctes_of split:if_split_asm) + apply (erule disjE) prefer 2 apply simp + apply (clarsimp simp: is_derived'_def isCap_simps) + apply (drule valid_mdb_untyped_incD') + apply (case_tac cte, case_tac cteb, clarsimp) + apply (drule untyped_incD', (simp add: isCap_simps)+) + apply (frule(1) ctes_of_valid'[where p = p]) + apply (clarsimp simp:valid_cap'_def capAligned_def split:if_splits) + apply (drule_tac y ="of_nat fb" in word_plus_mono_right[OF _ is_aligned_no_overflow',rotated]) + apply simp+ + apply (rule word_of_nat_less) + apply simp + apply (simp add:p_assoc_help mask_def) + apply (simp add: max_free_index_def) + apply (clarsimp simp: maskedAsFull_def is_derived'_def badge_derived'_def + isCap_simps capMasterCap_def cte_wp_at_ctes_of + split: if_split_asm capability.splits) + done + +lemma cteInsert_weak_cte_wp_at3: + assumes imp:"\c. P c \ \ isUntypedCap c" + shows " \\s. if p = dest then P cap + else cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp + | clarsimp simp: comp_def cteInsert_def + | unfold setUntypedCapAsFull_def + | auto simp: cte_wp_at'_def dest!: imp)+ + +lemma maskedAsFull_null_cap[simp]: + "(maskedAsFull x y = capability.NullCap) = (x = capability.NullCap)" + "(capability.NullCap = maskedAsFull x y) = (x = capability.NullCap)" + by (case_tac x, auto simp:maskedAsFull_def isCap_simps ) + +lemma maskCapRights_eq_null: + "(RetypeDecls_H.maskCapRights r xa = capability.NullCap) = + (xa = capability.NullCap)" + apply (cases xa; simp add: maskCapRights_def isCap_simps) + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (simp_all add: AARCH64_H.maskCapRights_def isCap_simps) + done + +lemma cte_refs'_maskedAsFull[simp]: + "cte_refs' (maskedAsFull a b) = cte_refs' a" + apply (rule ext)+ + apply (case_tac a) + apply (clarsimp simp:maskedAsFull_def isCap_simps)+ + done + + +lemma transferCapsToSlots_corres: + "\ list_all2 (\(cap, slot) (cap', slot'). cap_relation cap cap' + \ slot' = cte_map slot) caps caps'; + mi' = message_info_map mi \ \ + corres ((=) \ message_info_map) + (\s. valid_objs s \ pspace_aligned s \ pspace_distinct s \ valid_mdb s + \ valid_list s + \ (case ep of Some x \ ep_at x s | _ \ True) + \ (\x \ set slots. cte_wp_at (\cap. cap = cap.NullCap) x s \ + real_cte_at x s) + \ (\(cap, slot) \ set caps. valid_cap cap s \ + cte_wp_at (\cp'. (cap \ cap.NullCap \ cp'\cap \ cp' = masked_as_full cap cap )) slot s ) + \ distinct slots + \ in_user_frame buffer s) + (\s. valid_pspace' s + \ (case ep of Some x \ ep_at' x s | _ \ True) + \ (\x \ set (map cte_map slots). + cte_wp_at' (\cte. cteCap cte = NullCap) x s + \ real_cte_at' x s) + \ distinct (map cte_map slots) + \ valid_ipc_buffer_ptr' buffer s + \ (\(cap, slot) \ set caps'. valid_cap' cap s \ + cte_wp_at' (\cte. cap \ NullCap \ cteCap cte \ cap \ cteCap cte = maskedAsFull cap cap) slot s) + \ 2 + msg_max_length + n + length caps' < unat max_ipc_words) + (transfer_caps_loop ep buffer n caps slots mi) + (transferCapsToSlots ep buffer n caps' + (map cte_map slots) mi')" + (is "\ list_all2 ?P caps caps'; ?v \ \ ?corres") +proof (induct caps caps' arbitrary: slots n mi mi' rule: list_all2_induct) + case Nil + show ?case using Nil.prems by (case_tac mi, simp) +next + case (Cons x xs y ys slots n mi mi') + note if_weak_cong[cong] if_cong [cong del] + assume P: "?P x y" + show ?case using Cons.prems P + apply (clarsimp split del: if_split) + apply (simp add: Let_def split_def word_size liftE_bindE + word_bits_conv[symmetric] split del: if_split) + apply (rule corres_const_on_failure) + apply (simp add: dc_def[symmetric] split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_if2) + apply (case_tac "fst x", auto simp add: isCap_simps)[1] + apply (rule corres_split[OF corres_set_extra_badge]) + apply (clarsimp simp: is_cap_simps) + apply (drule conjunct1) + apply simp + apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1] + apply (case_tac mi, simp) + apply (simp add: split_def) + apply (wp hoare_vcg_const_Ball_lift) + apply (subgoal_tac "obj_ref_of (fst x) = capEPPtr (fst y)") + prefer 2 + apply (clarsimp simp: is_cap_simps) + apply (simp add: split_def) + apply (wp hoare_vcg_const_Ball_lift) + apply (rule_tac P="slots = []" and Q="slots \ []" in corres_disj_division) + apply simp + apply (rule corres_trivial, simp add: returnOk_def) + apply (case_tac mi, simp) + apply (simp add: list_case_If2 split del: if_split) + apply (rule corres_splitEE) + apply (rule unifyFailure_discard2) + apply (case_tac mi, clarsimp) + apply (rule deriveCap_corres) + apply (simp add: remove_rights_def) + apply clarsimp + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (case_tac cap', auto)[1] + apply (rule corres_trivial, simp) + apply (case_tac mi, simp) + apply simp + apply (simp add: liftE_bindE) + apply (rule corres_split_nor) + apply (rule cteInsert_corres, simp_all add: hd_map)[1] + apply (simp add: tl_map) + apply (rule corres_rel_imp, rule Cons.hyps, simp_all)[1] + apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + hoare_vcg_const_Ball_lift cap_insert_weak_cte_wp_at) + apply (wp hoare_vcg_const_Ball_lift | simp add:split_def del: imp_disj1)+ + apply (wp cap_insert_cte_wp_at) + apply (wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + cteInsert_valid_pspace + | simp add: split_def)+ + apply (wp cteInsert_weak_cte_wp_at hoare_valid_ipc_buffer_ptr_typ_at')+ + apply (wpsimp wp: hoare_vcg_const_Ball_lift cteInsert_cte_wp_at valid_case_option_post_wp + simp: split_def) + apply (unfold whenE_def) + apply wp+ + apply (clarsimp simp: conj_comms ball_conj_distrib split del: if_split) + apply (rule_tac Q' ="\cap' s. (cap'\ cap.NullCap \ + cte_wp_at (is_derived (cdt s) (a, b) cap') (a, b) s + \ QM s cap')" for QM + in hoare_post_imp_R) + prefer 2 + apply clarsimp + apply assumption + apply (subst imp_conjR) + apply (rule hoare_vcg_conj_liftE_R) + apply (rule derive_cap_is_derived) + apply (wp derive_cap_is_derived_foo)+ + apply (simp split del: if_split) + apply (rule_tac Q' ="\cap' s. (cap'\ capability.NullCap \ + cte_wp_at' (\c. is_derived' (ctes_of s) (cte_map (a, b)) cap' (cteCap c)) (cte_map (a, b)) s + \ QM s cap')" for QM + in hoare_post_imp_R) + prefer 2 + apply clarsimp + apply assumption + apply (subst imp_conjR) + apply (rule hoare_vcg_conj_liftE_R) + apply (rule hoare_post_imp_R[OF deriveCap_derived]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (wp deriveCap_derived_foo) + apply (clarsimp simp: cte_wp_at_caps_of_state remove_rights_def + real_cte_tcb_valid if_apply_def2 + split del: if_split) + apply (rule conjI, (clarsimp split del: if_split)+) + apply (clarsimp simp:conj_comms split del:if_split) + apply (intro conjI allI) + apply (clarsimp split:if_splits) + apply (case_tac "cap = fst x",simp+) + apply (clarsimp simp:masked_as_full_def is_cap_simps cap_master_cap_simps) + apply (clarsimp split del: if_split) + apply (intro conjI) + apply (clarsimp simp:neq_Nil_conv) + apply (drule hd_in_set) + apply (drule(1) bspec) + apply (clarsimp split:if_split_asm) + apply (fastforce simp:neq_Nil_conv) + apply (intro ballI conjI) + apply (clarsimp simp:neq_Nil_conv) + apply (intro impI) + apply (drule(1) bspec[OF _ subsetD[rotated]]) + apply (clarsimp simp:neq_Nil_conv) + apply (clarsimp split:if_splits) + apply clarsimp + apply (intro conjI) + apply (drule(1) bspec,clarsimp)+ + subgoal for \ aa _ _ capa + by (case_tac "capa = aa"; clarsimp split:if_splits simp:masked_as_full_def is_cap_simps) + apply (case_tac "isEndpointCap (fst y) \ capEPPtr (fst y) = the ep \ (\y. ep = Some y)") + apply (clarsimp simp:conj_comms split del:if_split) + apply (subst if_not_P) + apply clarsimp + apply (clarsimp simp:valid_pspace'_def cte_wp_at_ctes_of split del:if_split) + apply (intro conjI) + apply (case_tac "cteCap cte = fst y",clarsimp simp: badge_derived'_def) + apply (clarsimp simp: maskCapRights_eq_null maskedAsFull_def badge_derived'_def isCap_simps + split: if_split_asm) + apply (clarsimp split del: if_split) + apply (case_tac "fst y = capability.NullCap") + apply (clarsimp simp: neq_Nil_conv split del: if_split)+ + apply (intro allI impI conjI) + apply (clarsimp split:if_splits) + apply (clarsimp simp:image_def)+ + apply (thin_tac "\x\set ys. Q x" for Q) + apply (drule(1) bspec)+ + apply clarsimp+ + apply (drule(1) bspec) + apply (rule conjI) + apply clarsimp+ + apply (case_tac "cteCap cteb = ab") + by (clarsimp simp: isCap_simps maskedAsFull_def split:if_splits)+ +qed + +declare constOnFailure_wp [wp] + +lemma transferCapsToSlots_pres1[crunch_rules]: + assumes x: "\cap src dest. \P\ cteInsert cap src dest \\rv. P\" + assumes eb: "\b n. \P\ setExtraBadge buffer b n \\_. P\" + shows "\P\ transferCapsToSlots ep buffer n caps slots mi \\rv. P\" + apply (induct caps arbitrary: slots n mi) + apply simp + apply (simp add: Let_def split_def whenE_def + cong: if_cong list.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp x eb | assumption | simp split del: if_split | wpc + | wp (once) hoare_drop_imps)+ + done + +lemma cteInsert_cte_cap_to': + "\ex_cte_cap_to' p and cte_wp_at' (\cte. cteCap cte = NullCap) dest\ + cteInsert cap src dest + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) + apply (clarsimp simp:cteInsert_def) + apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (rule_tac x = "cref" in exI) + apply (rule conjI) + apply clarsimp+ + done + +declare maskCapRights_eq_null[simp] + +crunch ex_cte_cap_wp_to' [wp]: setExtraBadge "ex_cte_cap_wp_to' P p" + (rule: ex_cte_cap_to'_pres) + +crunch valid_objs' [wp]: setExtraBadge valid_objs' +crunch aligned' [wp]: setExtraBadge pspace_aligned' +crunch distinct' [wp]: setExtraBadge pspace_distinct' + +lemma cteInsert_assume_Null: + "\P\ cteInsert cap src dest \Q\ \ + \\s. cte_wp_at' (\cte. cteCap cte = NullCap) dest s \ P s\ + cteInsert cap src dest + \Q\" + apply (rule hoare_name_pre_state) + apply (erule impCE) + apply (simp add: cteInsert_def) + apply (rule hoare_seq_ext[OF _ getCTE_sp])+ + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule hoare_pre(1)) + apply simp + done + +crunch mdb'[wp]: setExtraBadge valid_mdb' + +lemma cteInsert_weak_cte_wp_at2: + assumes weak:"\c cap. P (maskedAsFull c cap) = P c" + shows + "\\s. if p = dest then P cap else cte_wp_at' (\c. P (cteCap c)) p s\ + cteInsert cap src dest + \\uu. cte_wp_at' (\c. P (cteCap c)) p\" + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) + apply (clarsimp simp:cteInsert_def) + apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) + apply (clarsimp simp:cte_wp_at_ctes_of weak) + apply auto + done + +lemma transferCapsToSlots_presM: + assumes x: "\cap src dest. \\s. P s \ (emx \ cte_wp_at' (\cte. cteCap cte = NullCap) dest s \ ex_cte_cap_to' dest s) + \ (vo \ valid_objs' s \ valid_cap' cap s \ real_cte_at' dest s) + \ (drv \ cte_wp_at' (is_derived' (ctes_of s) src cap \ cteCap) src s + \ cte_wp_at' (untyped_derived_eq cap o cteCap) src s + \ valid_mdb' s) + \ (pad \ pspace_aligned' s \ pspace_distinct' s)\ + cteInsert cap src dest \\rv. P\" + assumes eb: "\b n. \P\ setExtraBadge buffer b n \\_. P\" + shows "\\s. P s + \ (emx \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = NullCap) x s) \ distinct slots) + \ (vo \ valid_objs' s \ (\x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = NullCap) x s) + \ (\x \ set caps. s \' fst x ) \ distinct slots) + \ (pad \ pspace_aligned' s \ pspace_distinct' s) + \ (drv \ vo \ pspace_aligned' s \ pspace_distinct' s \ valid_mdb' s + \ length slots \ 1 + \ (\x \ set caps. s \' fst x \ (slots \ [] + \ cte_wp_at' (\cte. fst x \ NullCap \ cteCap cte = fst x) (snd x) s)))\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. P\" + apply (induct caps arbitrary: slots n mi) + apply (simp, wp, simp) + apply (simp add: Let_def split_def whenE_def + cong: if_cong list.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift + | assumption | wpc)+ + apply (rule cteInsert_assume_Null) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) + apply (rule cteInsert_weak_cte_wp_at2,clarsimp) + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ + apply (rule cteInsert_weak_cte_wp_at2,clarsimp) + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp + deriveCap_derived_foo)+ + apply (thin_tac "\slots. PROP P slots" for P) + apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def + real_cte_tcb_valid if_apply_def2 + split del: if_split) + apply (rule conjI) + apply (clarsimp simp:cte_wp_at_ctes_of untyped_derived_eq_def) + apply (intro conjI allI) + apply (clarsimp simp:Fun.comp_def cte_wp_at_ctes_of)+ + apply (clarsimp simp:valid_capAligned) + done + +lemmas transferCapsToSlots_pres2 + = transferCapsToSlots_presM[where vo=False and emx=True + and drv=False and pad=False, simplified] + +crunch pspace_aligned'[wp]: transferCapsToSlots pspace_aligned' +crunch pspace_distinct'[wp]: transferCapsToSlots pspace_distinct' + +lemma transferCapsToSlots_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. P (typ_at' T p s)\" + by (wp transferCapsToSlots_pres1 setExtraBadge_typ_at') + +lemma transferCapsToSlots_valid_objs[wp]: + "\valid_objs' and valid_mdb' and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and (\s. \x \ set caps. s \' fst x) and K(distinct slots)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_objs'\" + apply (rule hoare_pre) + apply (rule transferCapsToSlots_presM[where vo=True and emx=False and drv=False and pad=False]) + apply (wp | simp)+ + done + +abbreviation(input) + "transferCaps_srcs caps s \ \x\set caps. cte_wp_at' (\cte. fst x \ NullCap \ cteCap cte = fst x) (snd x) s" + +lemma transferCapsToSlots_mdb[wp]: + "\\s. valid_pspace' s \ distinct slots + \ length slots \ 1 + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_mdb'\" + apply (wp transferCapsToSlots_presM[where drv=True and vo=True and emx=True and pad=True]) + apply clarsimp + apply (frule valid_capAligned) + apply (clarsimp simp: cte_wp_at_ctes_of is_derived'_def badge_derived'_def) + apply wp + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) bspec,clarify) + apply (case_tac cte) + apply (clarsimp dest!:ctes_of_valid_cap' split:if_splits) + apply (fastforce simp:valid_cap'_def) + done + +crunch no_0' [wp]: setExtraBadge no_0_obj' + +lemma transferCapsToSlots_no_0_obj' [wp]: + "\no_0_obj'\ transferCapsToSlots ep buffer n caps slots mi \\rv. no_0_obj'\" + by (wp transferCapsToSlots_pres1) + +lemma transferCapsToSlots_vp[wp]: + "\\s. valid_pspace' s \ distinct slots + \ length slots \ 1 + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_pspace'\" + apply (rule hoare_pre) + apply (simp add: valid_pspace'_def | wp)+ + apply (fastforce simp: cte_wp_at_ctes_of dest: ctes_of_valid') + done + +crunches setExtraBadge, doIPCTransfer + for sch_act [wp]: "\s. P (ksSchedulerAction s)" + (wp: crunch_wps mapME_wp' simp: zipWithM_x_mapM) +crunches setExtraBadge + for pred_tcb_at' [wp]: "\s. pred_tcb_at' proj P p s" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and obj_at' [wp]: "\s. P' (obj_at' P p s)" + and queues [wp]: "\s. P (ksReadyQueues s)" + and queuesL1 [wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and queuesL2 [wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: storeWordUser_def) + + +lemma tcts_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift tcb_in_cur_domain'_lift transferCapsToSlots_pres1) + +lemma tcts_vq[wp]: + "\Invariants_H.valid_queues\ transferCapsToSlots ep buffer n caps slots mi \\rv. Invariants_H.valid_queues\" + by (wp valid_queues_lift transferCapsToSlots_pres1) + +lemma tcts_vq'[wp]: + "\valid_queues'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_queues'\" + by (wp valid_queues_lift' transferCapsToSlots_pres1) + +crunches setExtraBadge + for state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + +lemma tcts_state_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv s. P (state_refs_of' s)\" + by (wp transferCapsToSlots_pres1) + +lemma tcts_state_hyp_refs_of'[wp]: + "transferCapsToSlots ep buffer n caps slots mi \\s. P (state_hyp_refs_of' s)\" + by (wp transferCapsToSlots_pres1) + +crunch if_live' [wp]: setExtraBadge if_live_then_nonz_cap' + +lemma tcts_iflive[wp]: + "\\s. if_live_then_nonz_cap' s \ distinct slots \ + (\x\set slots. + ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s)\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. if_live_then_nonz_cap'\" + by (wp transferCapsToSlots_pres2 | simp)+ + +crunch if_unsafe' [wp]: setExtraBadge if_unsafe_then_cap' + +lemma tcts_ifunsafe[wp]: + "\\s. if_unsafe_then_cap' s \ distinct slots \ + (\x\set slots. cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s \ + ex_cte_cap_to' x s)\ transferCapsToSlots ep buffer n caps slots mi + \\rv. if_unsafe_then_cap'\" + by (wp transferCapsToSlots_pres2 | simp)+ + +crunch valid_idle' [wp]: setExtraBadge valid_idle' + +lemma tcts_idle'[wp]: + "\\s. valid_idle' s\ transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_idle'\" + apply (rule hoare_pre) + apply (wp transferCapsToSlots_pres1) + apply simp + done + +lemma tcts_ct[wp]: + "\cur_tcb'\ transferCapsToSlots ep buffer n caps slots mi \\rv. cur_tcb'\" + by (wp transferCapsToSlots_pres1 cur_tcb_lift) + +crunch valid_arch_state' [wp]: setExtraBadge valid_arch_state' + +lemma transferCapsToSlots_valid_arch [wp]: + "\valid_arch_state'\ transferCapsToSlots ep buffer n caps slots mi \\rv. valid_arch_state'\" + by (rule transferCapsToSlots_pres1; wp) + +crunch valid_global_refs' [wp]: setExtraBadge valid_global_refs' + +lemma transferCapsToSlots_valid_globals [wp]: + "\valid_global_refs' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned' and K (distinct slots) + and K (length slots \ 1) + and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and transferCaps_srcs caps\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_global_refs'\" + apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=True] | clarsimp)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (drule(1) bspec,clarsimp) + apply (case_tac cte,clarsimp) + apply (frule(1) CSpace_I.ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch irq_node' [wp]: setExtraBadge "\s. P (irq_node' s)" + +lemma transferCapsToSlots_irq_node'[wp]: + "\\s. P (irq_node' s)\ transferCapsToSlots ep buffer n caps slots mi \\rv s. P (irq_node' s)\" + by (wp transferCapsToSlots_pres1) + +lemma valid_irq_handlers_ctes_ofD: + "\ ctes_of s p = Some cte; cteCap cte = IRQHandlerCap irq; valid_irq_handlers' s \ + \ irq_issued' irq s" + by (auto simp: valid_irq_handlers'_def cteCaps_of_def ran_def) + +crunch valid_irq_handlers' [wp]: setExtraBadge valid_irq_handlers' + +lemma transferCapsToSlots_irq_handlers[wp]: + "\valid_irq_handlers' and valid_objs' and valid_mdb' and pspace_distinct' and pspace_aligned' + and K(distinct slots \ length slots \ 1) + and (\s. \x \ set slots. real_cte_at' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + and transferCaps_srcs caps\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. valid_irq_handlers'\" + apply (wp transferCapsToSlots_presM[where vo=True and emx=False and drv=True and pad=False]) + apply (clarsimp simp: is_derived'_def cte_wp_at_ctes_of badge_derived'_def) + apply (erule(2) valid_irq_handlers_ctes_ofD) + apply wp + apply (clarsimp simp:cte_wp_at_ctes_of | intro ballI conjI)+ + apply (drule(1) bspec,clarsimp) + apply (case_tac cte,clarsimp) + apply (frule(1) CSpace_I.ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch irq_state' [wp]: setExtraBadge "\s. P (ksInterruptState s)" + +lemma setExtraBadge_irq_states'[wp]: + "\valid_irq_states'\ setExtraBadge buffer b n \\_. valid_irq_states'\" + apply (wp valid_irq_states_lift') + apply (simp add: setExtraBadge_def storeWordUser_def) + apply (wpsimp wp: no_irq dmo_lift' no_irq_storeWord) + apply assumption + done + +lemma transferCapsToSlots_irq_states' [wp]: + "\valid_irq_states'\ transferCapsToSlots ep buffer n caps slots mi \\_. valid_irq_states'\" + by (wp transferCapsToSlots_pres1) + +lemma transferCapsToSlots_irqs_masked'[wp]: + "\irqs_masked'\ transferCapsToSlots ep buffer n caps slots mi \\rv. irqs_masked'\" + by (wp transferCapsToSlots_pres1 irqs_masked_lift) + +lemma storeWordUser_vms'[wp]: + "\valid_machine_state'\ storeWordUser a w \\_. valid_machine_state'\" +proof - + have aligned_offset_ignore: + "\(l::machine_word) (p::machine_word) sz. l<8 \ p && mask 3 = 0 \ + p+l && ~~ mask pageBits = p && ~~ mask pageBits" + proof - + fix l p sz + assume al: "(p::machine_word) && mask 3 = 0" + assume "(l::machine_word) < 8" hence less: "l<2^3" by simp + have le: "3 \ pageBits" by (simp add: pageBits_def) + show "?thesis l p sz" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, + where n=3, OF al less le]) + qed + + show ?thesis + apply (simp add: valid_machine_state'_def storeWordUser_def + doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid) + apply (rule_tac x=p in storeWord_um_inv, simp+) + apply (drule_tac x=p in spec) + apply (erule disjE, simp_all) + apply (erule conjE) + apply (erule disjE, simp) + apply (simp add: pointerInUserData_def word_size) + apply (subgoal_tac "a && ~~ mask pageBits = p && ~~ mask pageBits", simp) + apply (simp only: is_aligned_mask[of _ 3]) + apply (elim disjE, simp_all) + apply (rule aligned_offset_ignore[symmetric], simp+)+ + done +qed + +lemma setExtraBadge_vms'[wp]: + "\valid_machine_state'\ setExtraBadge buffer b n \\_. valid_machine_state'\" +by (simp add: setExtraBadge_def) wp + +lemma transferCapsToSlots_vms[wp]: + "\\s. valid_machine_state' s\ + transferCapsToSlots ep buffer n caps slots mi + \\_ s. valid_machine_state' s\" + by (wp transferCapsToSlots_pres1) + +crunches setExtraBadge, transferCapsToSlots + for pspace_domain_valid[wp]: "pspace_domain_valid" + +crunch ct_not_inQ[wp]: setExtraBadge "ct_not_inQ" + +lemma tcts_ct_not_inQ[wp]: + "\ct_not_inQ\ + transferCapsToSlots ep buffer n caps slots mi + \\_. ct_not_inQ\" + by (wp transferCapsToSlots_pres1) + +crunch gsUntypedZeroRanges[wp]: setExtraBadge "\s. P (gsUntypedZeroRanges s)" +crunch ctes_of[wp]: setExtraBadge "\s. P (ctes_of s)" + +lemma tcts_zero_ranges[wp]: + "\\s. untyped_ranges_zero' s \ valid_pspace' s \ distinct slots + \ (\x \ set slots. ex_cte_cap_to' x s \ cte_wp_at' (\cte. cteCap cte = capability.NullCap) x s) + \ (\x \ set slots. real_cte_at' x s) + \ length slots \ 1 + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. untyped_ranges_zero'\" + apply (wp transferCapsToSlots_presM[where emx=True and vo=True + and drv=True and pad=True]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + apply (clarsimp simp: valid_pspace'_def ball_conj_distrib[symmetric]) + apply (drule(1) bspec) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte, clarsimp) + apply (frule(1) ctes_of_valid_cap') + apply auto[1] + done + +crunch ct_idle_or_in_cur_domain'[wp]: setExtraBadge ct_idle_or_in_cur_domain' +crunch ct_idle_or_in_cur_domain'[wp]: transferCapsToSlots ct_idle_or_in_cur_domain' +crunch ksCurDomain[wp]: transferCapsToSlots "\s. P (ksCurDomain s)" +crunch ksDomSchedule[wp]: setExtraBadge "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: setExtraBadge "\s. P (ksDomScheduleIdx s)" +crunch ksDomSchedule[wp]: transferCapsToSlots "\s. P (ksDomSchedule s)" +crunch ksDomScheduleIdx[wp]: transferCapsToSlots "\s. P (ksDomScheduleIdx s)" + + +lemma transferCapsToSlots_invs[wp]: + "\\s. invs' s \ distinct slots + \ (\x \ set slots. cte_wp_at' (\cte. cteCap cte = NullCap) x s) + \ (\x \ set slots. ex_cte_cap_to' x s) + \ (\x \ set slots. real_cte_at' x s) + \ length slots \ 1 + \ transferCaps_srcs caps s\ + transferCapsToSlots ep buffer n caps slots mi + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (wp valid_irq_node_lift) + apply fastforce + done + +lemma grs_distinct'[wp]: + "\\\ getReceiveSlots t buf \\rv s. distinct rv\" + apply (cases buf, simp_all add: getReceiveSlots_def + split_def unlessE_def) + apply (wp, simp) + apply (wp | simp only: distinct.simps list.simps empty_iff)+ + apply simp + done + +lemma transferCaps_corres: + "\ info' = message_info_map info; + list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x)) + caps caps' \ + \ + corres ((=) \ message_info_map) + (tcb_at receiver and valid_objs and + pspace_aligned and pspace_distinct and valid_mdb + and valid_list + and (\s. case ep of Some x \ ep_at x s | _ \ True) + and case_option \ in_user_frame recv_buf + and (\s. valid_message_info info) + and transfer_caps_srcs caps) + (tcb_at' receiver and valid_objs' and + pspace_aligned' and pspace_distinct' + and no_0_obj' and valid_mdb' + and (\s. case ep of Some x \ ep_at' x s | _ \ True) + and case_option \ valid_ipc_buffer_ptr' recv_buf + and transferCaps_srcs caps' + and (\s. length caps' \ msgMaxExtraCaps)) + (transfer_caps info caps ep receiver recv_buf) + (transferCaps info' caps' ep receiver recv_buf)" + apply (simp add: transfer_caps_def transferCaps_def + getThreadCSpaceRoot) + apply (rule corres_assume_pre) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getReceiveSlots_corres]) + apply (rule_tac x=recv_buf in option_corres) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (case_tac info, simp) + apply simp + apply (rule corres_rel_imp, rule transferCapsToSlots_corres, + simp_all add: split_def)[1] + apply (case_tac info, simp) + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp + | simp only: ball_conj_distrib)+ + apply (simp add: cte_map_def tcb_cnode_index_def split_def) + apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 + split_def + cong: option.case_cong) + apply (drule(1) bspec) + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (frule(1) Invariants_AI.caps_of_state_valid) + apply (fastforce simp:valid_cap_def) + apply (cases info) + apply (clarsimp simp: msg_max_extra_caps_def valid_message_info_def + max_ipc_words msg_max_length_def + msgMaxExtraCaps_def msgExtraCapBits_def + shiftL_nat valid_pspace'_def) + apply (drule(1) bspec) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte,clarsimp) + apply (frule(1) ctes_of_valid_cap') + apply (fastforce simp:valid_cap'_def) + done + +crunch typ_at'[wp]: transferCaps "\s. P (typ_at' T p s)" + +lemmas transferCaps_typ_ats[wp] = typ_at_lifts [OF transferCaps_typ_at'] + +lemma isIRQControlCap_mask [simp]: + "isIRQControlCap (maskCapRights R c) = isIRQControlCap c" + apply (case_tac c) + apply (clarsimp simp: isCap_simps maskCapRights_def Let_def)+ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + apply (clarsimp simp: isCap_simps AARCH64_H.maskCapRights_def + maskCapRights_def Let_def)+ + done + +lemma isFrameCap_maskCapRights[simp]: +" isArchCap isFrameCap (RetypeDecls_H.maskCapRights R c) = isArchCap isFrameCap c" + apply (case_tac c; simp add: isCap_simps isArchCap_def maskCapRights_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp add: isCap_simps AARCH64_H.maskCapRights_def) + done + +lemma capReplyMaster_mask[simp]: + "isReplyCap c \ capReplyMaster (maskCapRights R c) = capReplyMaster c" + by (clarsimp simp: isCap_simps maskCapRights_def) + +lemma is_derived_mask' [simp]: + "is_derived' m p (maskCapRights R c) = is_derived' m p c" + apply (rule ext) + apply (simp add: is_derived'_def badge_derived'_def) + done + +lemma updateCapData_ordering: + "\ (x, capBadge cap) \ capBadge_ordering P; updateCapData p d cap \ NullCap \ + \ (x, capBadge (updateCapData p d cap)) \ capBadge_ordering P" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + capBadge_def AARCH64_H.updateCapData_def + split: if_split_asm) + apply fastforce+ + done + +lemma updateCapData_capReplyMaster: + "isReplyCap cap \ capReplyMaster (updateCapData p d cap) = capReplyMaster cap" + by (clarsimp simp: isCap_simps updateCapData_def split del: if_split) + +lemma updateCapData_is_Reply[simp]: + "(updateCapData p d cap = ReplyCap x y z) = (cap = ReplyCap x y z)" + by (rule ccontr, + clarsimp simp: isCap_simps updateCapData_def Let_def + AARCH64_H.updateCapData_def + split del: if_split + split: if_split_asm) + +lemma updateCapDataIRQ: + "updateCapData p d cap \ NullCap \ + isIRQControlCap (updateCapData p d cap) = isIRQControlCap cap" + apply (cases cap, simp_all add: updateCapData_def isCap_simps Let_def + AARCH64_H.updateCapData_def + split: if_split_asm) + done + +lemma updateCapData_vs_cap_ref'[simp]: + "vs_cap_ref' (updateCapData pr D c) = vs_cap_ref' c" + by (rule ccontr, + clarsimp simp: isCap_simps updateCapData_def Let_def + AARCH64_H.updateCapData_def + vs_cap_ref'_def + split del: if_split + split: if_split_asm) + +lemma isFrameCap_updateCapData[simp]: + "isArchCap isFrameCap (updateCapData pr D c) = isArchCap isFrameCap c" + apply (case_tac c; simp add:updateCapData_def isCap_simps isArchCap_def) + apply (rename_tac arch_capability) + apply (case_tac arch_capability; simp add: AARCH64_H.updateCapData_def isCap_simps isArchCap_def) + apply (clarsimp split:capability.splits simp:Let_def) + done + +lemma lookup_cap_to'[wp]: + "\\\ lookupCap t cref \\rv s. \r\cte_refs' rv (irq_node' s). ex_cte_cap_to' r s\,-" + by (simp add: lookupCap_def lookupCapAndSlot_def | wp)+ + +lemma grs_cap_to'[wp]: + "\\\ getReceiveSlots t buf \\rv s. \x \ set rv. ex_cte_cap_to' x s\" + apply (cases buf; simp add: getReceiveSlots_def split_def unlessE_def) + apply (wp, simp) + apply (wp | simp | rule hoare_drop_imps)+ + done + +lemma grs_length'[wp]: + "\\s. 1 \ n\ getReceiveSlots receiver recv_buf \\rv s. length rv \ n\" + apply (simp add: getReceiveSlots_def split_def unlessE_def) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + done + +lemma transferCaps_invs' [wp]: + "\invs' and transferCaps_srcs caps\ + transferCaps mi caps ep receiver recv_buf + \\rv. invs'\" + apply (simp add: transferCaps_def Let_def split_def) + apply (wp get_rs_cte_at' hoare_vcg_const_Ball_lift + | wpcw | clarsimp)+ + done + +lemma get_mrs_inv'[wp]: + "\P\ getMRs t buf info \\rv. P\" + by (simp add: getMRs_def load_word_offs_def getRegister_def + | wp dmo_inv' loadWord_inv mapM_wp' + asUser_inv det_mapM[where S=UNIV] | wpc)+ + + +lemma copyMRs_typ_at': + "\\s. P (typ_at' T p s)\ copyMRs s sb r rb n \\rv s. P (typ_at' T p s)\" + by (simp add: copyMRs_def | wp mapM_wp [where S=UNIV, simplified] | wpc)+ + +lemmas copyMRs_typ_at_lifts[wp] = typ_at_lifts [OF copyMRs_typ_at'] + +lemma copy_mrs_invs'[wp]: + "\ invs' and tcb_at' s and tcb_at' r \ copyMRs s sb r rb n \\rv. invs' \" + including no_pre + apply (simp add: copyMRs_def) + apply (wp dmo_invs' no_irq_mapM no_irq_storeWord| + simp add: split_def) + apply (case_tac sb, simp_all)[1] + apply wp+ + apply (case_tac rb, simp_all)[1] + apply (wp mapM_wp dmo_invs' no_irq_mapM no_irq_storeWord no_irq_loadWord) + apply blast + apply (rule hoare_strengthen_post) + apply (rule mapM_wp) + apply (wp | simp | blast)+ + done + +crunch aligned'[wp]: transferCaps pspace_aligned' + (wp: crunch_wps simp: zipWithM_x_mapM) +crunch distinct'[wp]: transferCaps pspace_distinct' + (wp: crunch_wps simp: zipWithM_x_mapM) + +crunch aligned'[wp]: setMRs pspace_aligned' + (wp: crunch_wps simp: crunch_simps) +crunch distinct'[wp]: setMRs pspace_distinct' + (wp: crunch_wps simp: crunch_simps) +crunch aligned'[wp]: copyMRs pspace_aligned' + (wp: crunch_wps simp: crunch_simps) +crunch distinct'[wp]: copyMRs pspace_distinct' + (wp: crunch_wps simp: crunch_simps) +crunch aligned'[wp]: setMessageInfo pspace_aligned' + (wp: crunch_wps simp: crunch_simps) +crunch distinct'[wp]: setMessageInfo pspace_distinct' + (wp: crunch_wps simp: crunch_simps) + +lemma set_mrs_valid_objs' [wp]: + "\valid_objs'\ setMRs t a msgs \\rv. valid_objs'\" + apply (simp add: setMRs_def zipWithM_x_mapM split_def) + apply (wp asUser_valid_objs crunch_wps) + done + +crunch valid_objs'[wp]: copyMRs valid_objs' + (wp: crunch_wps simp: crunch_simps) + +lemma setMRs_invs_bits[wp]: + "\valid_pspace'\ setMRs t buf mrs \\rv. valid_pspace'\" + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setMRs t buf mrs \\rv s. sch_act_wf (ksSchedulerAction s) s\" + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setMRs t buf mrs \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + "\Invariants_H.valid_queues\ setMRs t buf mrs \\rv. Invariants_H.valid_queues\" + "\valid_queues'\ setMRs t buf mrs \\rv. valid_queues'\" + "\P. setMRs t buf mrs \\s. P (state_refs_of' s)\" + "\P. setMRs t buf mrs \\s. P (state_hyp_refs_of' s)\" + "\if_live_then_nonz_cap'\ setMRs t buf mrs \\rv. if_live_then_nonz_cap'\" + "\ex_nonz_cap_to' p\ setMRs t buf mrs \\rv. ex_nonz_cap_to' p\" + "\cur_tcb'\ setMRs t buf mrs \\rv. cur_tcb'\" + "\if_unsafe_then_cap'\ setMRs t buf mrs \\rv. if_unsafe_then_cap'\" + by (simp add: setMRs_def zipWithM_x_mapM split_def storeWordUser_def | wp crunch_wps)+ + +crunch no_0_obj'[wp]: setMRs no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma copyMRs_invs_bits[wp]: + "\valid_pspace'\ copyMRs s sb r rb n \\rv. valid_pspace'\" + "\\s. sch_act_wf (ksSchedulerAction s) s\ copyMRs s sb r rb n + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + "\Invariants_H.valid_queues\ copyMRs s sb r rb n \\rv. Invariants_H.valid_queues\" + "\valid_queues'\ copyMRs s sb r rb n \\rv. valid_queues'\" + "\P. copyMRs s sb r rb n \\s. P (state_refs_of' s)\" + "\P. copyMRs s sb r rb n \\s. P (state_hyp_refs_of' s)\" + "\if_live_then_nonz_cap'\ copyMRs s sb r rb n \\rv. if_live_then_nonz_cap'\" + "\ex_nonz_cap_to' p\ copyMRs s sb r rb n \\rv. ex_nonz_cap_to' p\" + "\cur_tcb'\ copyMRs s sb r rb n \\rv. cur_tcb'\" + "\if_unsafe_then_cap'\ copyMRs s sb r rb n \\rv. if_unsafe_then_cap'\" + by (simp add: copyMRs_def storeWordUser_def | wp mapM_wp' | wpc)+ + +crunch no_0_obj'[wp]: copyMRs no_0_obj' + (wp: crunch_wps simp: crunch_simps) + +lemma mi_map_length[simp]: "msgLength (message_info_map mi) = mi_length mi" + by (cases mi, simp) + +crunch cte_wp_at'[wp]: copyMRs "cte_wp_at' P p" + (wp: crunch_wps) + +lemma lookupExtraCaps_srcs[wp]: + "\\\ lookupExtraCaps thread buf info \transferCaps_srcs\,-" + apply (simp add: lookupExtraCaps_def lookupCapAndSlot_def + split_def lookupSlotForThread_def + getSlotCap_def) + apply (wp mapME_set[where R=\] getCTE_wp') + apply (rule_tac P=\ in hoare_trivE_R) + apply (simp add: cte_wp_at_ctes_of) + apply (wp | simp)+ + done + +crunch inv[wp]: lookupExtraCaps "P" + (wp: crunch_wps mapME_wp' simp: crunch_simps) + +lemma invs_mdb_strengthen': + "invs' s \ valid_mdb' s" by auto + +lemma lookupExtraCaps_length: + "\\s. unat (msgExtraCaps mi) \ n\ lookupExtraCaps thread send_buf mi \\rv s. length rv \ n\,-" + apply (simp add: lookupExtraCaps_def getExtraCPtrs_def) + apply (rule hoare_pre) + apply (wp mapME_length | wpc)+ + apply (clarsimp simp: upto_enum_step_def Suc_unat_diff_1 word_le_sub1) + done + +lemma getMessageInfo_msgExtraCaps[wp]: + "\\\ getMessageInfo t \\rv s. unat (msgExtraCaps rv) \ msgMaxExtraCaps\" + apply (simp add: getMessageInfo_def) + apply wp + apply (simp add: messageInfoFromWord_def Let_def msgMaxExtraCaps_def + shiftL_nat) + apply (subst nat_le_Suc_less_imp) + apply (rule unat_less_power) + apply (simp add: word_bits_def msgExtraCapBits_def) + apply (rule and_mask_less'[unfolded mask_2pm1]) + apply (simp add: msgExtraCapBits_def) + apply wpsimp+ + done + +lemma lookupCapAndSlot_corres: + "cptr = to_bl cptr' \ + corres (lfr \ (\a b. cap_relation (fst a) (fst b) \ snd b = cte_map (snd a))) + (valid_objs and pspace_aligned and tcb_at thread) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread) + (lookup_cap_and_slot thread cptr) (lookupCapAndSlot thread cptr')" + unfolding lookup_cap_and_slot_def lookupCapAndSlot_def + apply (simp add: liftE_bindE split_def) + apply (rule corres_guard_imp) + apply (rule_tac r'="\rv rv'. rv' = cte_map (fst rv)" + in corres_splitEE) + apply (rule corres_rel_imp, rule lookupSlotForThread_corres) + apply (simp add: split_def) + apply (rule corres_split[OF getSlotCap_corres]) + apply simp + apply (rule corres_returnOkTT, simp) + apply wp+ + apply (wp | simp add: liftE_bindE[symmetric])+ + done + +lemma lookupExtraCaps_corres: + "\ info' = message_info_map info; buffer = buffer'\ \ + corres (fr \ list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))) + (valid_objs and pspace_aligned and tcb_at thread and (\_. valid_message_info info)) + (valid_objs' and pspace_distinct' and pspace_aligned' and tcb_at' thread + and case_option \ valid_ipc_buffer_ptr' buffer') + (lookup_extra_caps thread buffer info) (lookupExtraCaps thread buffer' info')" + unfolding lookupExtraCaps_def lookup_extra_caps_def + apply (rule corres_gen_asm) + apply (cases "mi_extra_caps info = 0") + apply (cases info) + apply (simp add: Let_def returnOk_def getExtraCPtrs_def + liftE_bindE upto_enum_step_def mapM_def + sequence_def doMachineOp_return mapME_Nil + split: option.split) + apply (cases info) + apply (rename_tac w1 w2 w3 w4) + apply (simp add: Let_def liftE_bindE) + apply (cases buffer') + apply (simp add: getExtraCPtrs_def mapME_Nil) + apply (rule corres_returnOk) + apply simp + apply (simp add: msgLengthBits_def msgMaxLength_def word_size field_simps + getExtraCPtrs_def upto_enum_step_def upto_enum_word + word_size_def msg_max_length_def liftM_def + Suc_unat_diff_1 word_le_sub1 mapM_map_simp + upt_lhs_sub_map[where x=buffer_cptr_index] + wordSize_def wordBits_def + del: upt.simps) + apply (rule corres_guard_imp) + apply (rule corres_underlying_split) + + apply (rule_tac S = "\x y. x = y \ x < unat w2" + in corres_mapM_list_all2 + [where Q = "\_. valid_objs and pspace_aligned and tcb_at thread" and r = "(=)" + and Q' = "\_. valid_objs' and pspace_aligned' and pspace_distinct' and tcb_at' thread + and case_option \ valid_ipc_buffer_ptr' buffer'" and r'="(=)" ]) + apply simp + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule loadWordUser_corres') + apply (clarsimp simp: buffer_cptr_index_def msg_max_length_def + max_ipc_words valid_message_info_def + msg_max_extra_caps_def word_le_nat_alt) + apply (simp add: buffer_cptr_index_def msg_max_length_def) + apply simp + apply simp + apply (simp add: load_word_offs_word_def) + apply (wp | simp)+ + apply (subst list_all2_same) + apply (clarsimp simp: max_ipc_words field_simps) + apply (simp add: mapME_def, fold mapME_def)[1] + apply (rule corres_mapME [where S = Id and r'="(\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))"]) + apply simp + apply simp + apply simp + apply (rule corres_cap_fault [OF lookupCapAndSlot_corres]) + apply simp + apply simp + apply (wp | simp)+ + apply (simp add: set_zip_same Int_lower1) + apply (wp mapM_wp [OF _ subset_refl] | simp)+ + done + +crunch ctes_of[wp]: copyMRs "\s. P (ctes_of s)" + (ignore: threadSet + wp: threadSet_ctes_of crunch_wps) + +lemma copyMRs_valid_mdb[wp]: + "\valid_mdb'\ copyMRs t buf t' buf' n \\rv. valid_mdb'\" + by (simp add: valid_mdb'_def copyMRs_ctes_of) + +lemma doNormalTransfer_corres: + "corres dc + (tcb_at sender and tcb_at receiver and (pspace_aligned:: det_state \ bool) + and valid_objs and cur_tcb and valid_mdb and valid_list and pspace_distinct + and (\s. case ep of Some x \ ep_at x s | _ \ True) + and case_option \ in_user_frame send_buf + and case_option \ in_user_frame recv_buf) + (tcb_at' sender and tcb_at' receiver and valid_objs' + and pspace_aligned' and pspace_distinct' and cur_tcb' + and valid_mdb' and no_0_obj' + and (\s. case ep of Some x \ ep_at' x s | _ \ True) + and case_option \ valid_ipc_buffer_ptr' send_buf + and case_option \ valid_ipc_buffer_ptr' recv_buf) + (do_normal_transfer sender send_buf ep badge can_grant receiver recv_buf) + (doNormalTransfer sender send_buf ep badge can_grant receiver recv_buf)" + apply (simp add: do_normal_transfer_def doNormalTransfer_def) + apply (rule corres_guard_imp) + apply (rule corres_split_mapr[OF getMessageInfo_corres]) + apply (rule_tac F="valid_message_info mi" in corres_gen_asm) + apply (rule_tac r'="list_all2 (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x))" + in corres_split) + apply (rule corres_if[OF refl]) + apply (rule corres_split_catch) + apply (rule lookupExtraCaps_corres; simp) + apply (rule corres_trivial, simp) + apply wp+ + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_split_eqr[OF copyMRs_corres]) + apply (rule corres_split) + apply (rule transferCaps_corres; simp) + apply (rename_tac mi' mi'') + apply (rule_tac F="mi_label mi' = mi_label mi" + in corres_gen_asm) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply (case_tac mi', clarsimp) + apply (simp add: badge_register_def badgeRegister_def) + apply (fold dc_def) + apply (rule asUser_setRegister_corres) + apply wp + apply simp+ + apply ((wp valid_case_option_post_wp hoare_vcg_const_Ball_lift + hoare_case_option_wp + hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' + hoare_vcg_const_Ball_lift lookupExtraCaps_length + | simp add: if_apply_def2)+) + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ + apply clarsimp + apply auto + done + +lemma corres_liftE_lift: + "corres r1 P P' m m' \ + corres (f1 \ r1) P P' (liftE m) (withoutFailure m')" + by simp + +lemmas corres_ipc_thread_helper = + corres_split_eqrE[OF corres_liftE_lift [OF getCurThread_corres]] + +lemmas corres_ipc_info_helper = + corres_split_maprE [where f = message_info_map, OF _ + corres_liftE_lift [OF getMessageInfo_corres]] + +crunch typ_at'[wp]: doNormalTransfer "\s. P (typ_at' T p s)" + +lemmas doNormal_lifts[wp] = typ_at_lifts [OF doNormalTransfer_typ_at'] + +lemma doNormal_invs'[wp]: + "\tcb_at' sender and tcb_at' receiver and invs'\ + doNormalTransfer sender send_buf ep badge + can_grant receiver recv_buf \\r. invs'\" + apply (simp add: doNormalTransfer_def) + apply (wp hoare_vcg_const_Ball_lift | simp)+ + done + +crunch aligned'[wp]: doNormalTransfer pspace_aligned' + (wp: crunch_wps) +crunch distinct'[wp]: doNormalTransfer pspace_distinct' + (wp: crunch_wps) + +lemma transferCaps_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' + and (\s. (\x\set caps. cte_wp_at' (\cte. fst x \ capability.NullCap \ cteCap cte = fst x) (snd x) s))\ + transferCaps tag caps ep receiver recv_buf + \\r. untyped_ranges_zero'\" + apply (simp add: transferCaps_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_const_imp_lift + | wpc + | simp add: ball_conj_distrib)+ + apply clarsimp + done + +crunch gsUntypedZeroRanges[wp]: doNormalTransfer "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps transferCapsToSlots_pres1 ignore: constOnFailure) + +lemmas asUser_urz = untyped_ranges_zero_lift[OF asUser_gsUntypedZeroRanges] + +crunch urz[wp]: doNormalTransfer "untyped_ranges_zero'" + (ignore: asUser wp: crunch_wps asUser_urz hoare_vcg_const_Ball_lift) + +lemma msgFromLookupFailure_map[simp]: + "msgFromLookupFailure (lookup_failure_map lf) + = msg_from_lookup_failure lf" + by (cases lf, simp_all add: lookup_failure_map_def msgFromLookupFailure_def) + +lemma asUser_getRestartPC_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t getRestartPC) (asUser t getRestartPC)" + apply (rule asUser_corres') + apply (rule corres_Id, simp, simp) + apply (rule no_fail_getRestartPC) + done + +lemma asUser_mapM_getRegister_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (mapM getRegister regs)) + (asUser t (mapM getRegister regs))" + apply (rule asUser_corres') + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_mapM) + apply (simp add: getRegister_def) + done + +lemma makeArchFaultMessage_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_arch_fault_msg f t) + (makeArchFaultMessage (arch_fault_map f) t)" + apply (cases f; clarsimp simp: makeArchFaultMessage_def ucast_nat_def split: arch_fault.split) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_trivial, simp) + apply (wp+, auto) + done + +lemma makeFaultMessage_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (make_fault_msg ft t) + (makeFaultMessage (fault_map ft) t)" + apply (cases ft, simp_all add: makeFaultMessage_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_trivial, simp add: fromEnum_def enum_bool) + apply (wp | simp)+ + apply (simp add: AARCH64_H.syscallMessage_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + apply (simp add: AARCH64_H.exceptionMessage_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_mapM_getRegister_corres]) + apply (rule corres_trivial, simp) + apply (wp | simp)+ + apply (rule makeArchFaultMessage_corres) + done + +lemma makeFaultMessage_inv[wp]: + "\P\ makeFaultMessage ft t \\rv. P\" + apply (cases ft, simp_all add: makeFaultMessage_def) + apply (wp asUser_inv mapM_wp' det_mapM[where S=UNIV] + det_getRestartPC getRestartPC_inv + | clarsimp simp: getRegister_def makeArchFaultMessage_def + split: arch_fault.split)+ + done + +lemmas threadget_fault_corres = + threadGet_corres [where r = fault_rel_optionation + and f = tcb_fault and f' = tcbFault, + simplified tcb_relation_def, simplified] + +lemma make_fault_msg_in_user_frame[wp]: + "make_fault_msg f t \in_user_frame p\" + supply if_split[split del] + apply (cases f; wpsimp) + apply (rename_tac af; case_tac af; wpsimp) + done + +lemma doFaultTransfer_corres: + "corres dc + (obj_at (\ko. \tcb ft. ko = TCB tcb \ tcb_fault tcb = Some ft) sender + and tcb_at receiver and case_option \ in_user_frame recv_buf + and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' recv_buf) + (do_fault_transfer badge sender receiver recv_buf) + (doFaultTransfer badge sender receiver recv_buf)" + apply (clarsimp simp: do_fault_transfer_def doFaultTransfer_def split_def + AARCH64_H.badgeRegister_def badge_register_def) + apply (rule_tac Q="\fault. K (\f. fault = Some f) and + tcb_at sender and tcb_at receiver and + case_option \ in_user_frame recv_buf and + pspace_aligned and pspace_distinct" + and Q'="\fault'. case_option \ valid_ipc_buffer_ptr' recv_buf" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule threadget_fault_corres) + apply (clarsimp simp: obj_at_def is_tcb)+ + apply (rule corres_assume_pre) + apply (fold assert_opt_def | unfold haskell_fail_def)+ + apply (rule corres_assert_opt_assume) + apply (clarsimp split: option.splits + simp: fault_rel_optionation_def assert_opt_def + map_option_case) + defer + defer + apply (clarsimp simp: fault_rel_optionation_def) + apply (wp thread_get_wp) + apply (clarsimp simp: obj_at_def is_tcb) + apply wp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF makeFaultMessage_corres]) + apply (rule corres_split_eqr[OF setMRs_corres [OF refl]]) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply simp + apply (rule asUser_setRegister_corres) + apply (wp | simp)+ + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF makeFaultMessage_corres]) + apply (rule corres_split_eqr[OF setMRs_corres [OF refl]]) + apply (rule corres_split_nor[OF setMessageInfo_corres]) + apply simp + apply (rule asUser_setRegister_corres) + apply (wp | simp)+ + done + +lemma doFaultTransfer_invs[wp]: + "\invs' and tcb_at' receiver\ + doFaultTransfer badge sender receiver recv_buf + \\rv. invs'\" + by (simp add: doFaultTransfer_def split_def | wp + | clarsimp split: option.split)+ + +lemma lookupIPCBuffer_valid_ipc_buffer [wp]: + "\valid_objs'\ VSpace_H.lookupIPCBuffer b s \case_option \ valid_ipc_buffer_ptr'\" + unfolding lookupIPCBuffer_def AARCH64_H.lookupIPCBuffer_def + apply (simp add: Let_def getSlotCap_def getThreadBufferSlot_def + locateSlot_conv threadGet_def comp_def) + apply (wp getCTE_wp getObject_tcb_wp | wpc)+ + apply (clarsimp simp del: imp_disjL) + apply (drule obj_at_ko_at') + apply (clarsimp simp del: imp_disjL) + apply (rule_tac x = ko in exI) + apply (frule ko_at_cte_ipcbuffer) + apply (clarsimp simp: cte_wp_at_ctes_of simp del: imp_disjL) + apply (rename_tac ref rg sz d m) + apply (clarsimp simp: valid_ipc_buffer_ptr'_def) + apply (frule (1) ko_at_valid_objs') + apply (clarsimp simp: projectKO_opts_defs split: kernel_object.split_asm) + apply (clarsimp simp add: valid_obj'_def valid_tcb'_def + isCap_simps cte_level_bits_def field_simps) + apply (drule bspec [OF _ ranI [where a = "4 << cteSizeBits"]]) + apply (simp add: cteSizeBits_def) + apply (clarsimp simp add: valid_cap'_def frame_at'_def) + apply (rule conjI) + apply (rule aligned_add_aligned) + apply (clarsimp simp add: capAligned_def) + apply assumption + apply (erule is_aligned_andI1) + apply (rule order_trans[rotated]) + apply (rule pbfs_atleast_pageBits) + apply (simp add: bit_simps msg_align_bits) + apply (clarsimp simp: capAligned_def) + apply (drule_tac x = "(tcbIPCBuffer ko && mask (pageBitsForSize sz)) >> pageBits" in spec) + apply (simp add: shiftr_shiftl1 ) + apply (subst (asm) mask_out_add_aligned) + apply (erule is_aligned_weaken [OF _ pbfs_atleast_pageBits]) + apply (erule mp) + apply (rule shiftr_less_t2n) + apply (clarsimp simp: pbfs_atleast_pageBits) + apply (rule and_mask_less') + apply (simp add: word_bits_conv pbfs_less_wb'[unfolded word_bits_conv]) + done + +lemma doIPCTransfer_corres: + "corres dc + (tcb_at s and tcb_at r and valid_objs and pspace_aligned + and valid_list + and pspace_distinct and valid_mdb and cur_tcb + and (\s. case ep of Some x \ ep_at x s | _ \ True)) + (tcb_at' s and tcb_at' r and valid_pspace' and cur_tcb' + and (\s. case ep of Some x \ ep_at' x s | _ \ True)) + (do_ipc_transfer s ep bg grt r) + (doIPCTransfer s ep bg grt r)" + apply (simp add: do_ipc_transfer_def doIPCTransfer_def) + apply (rule_tac Q="%receiveBuffer sa. tcb_at s sa \ valid_objs sa \ + pspace_aligned sa \ tcb_at r sa \ + cur_tcb sa \ valid_mdb sa \ valid_list sa \ pspace_distinct sa \ + (case ep of None \ True | Some x \ ep_at x sa) \ + case_option (\_. True) in_user_frame receiveBuffer sa \ + obj_at (\ko. \tcb. ko = TCB tcb + \ \\ft. tcb_fault tcb = Some ft\) s sa" + in corres_underlying_split) + apply (rule corres_guard_imp) + apply (rule lookupIPCBuffer_corres') + apply auto[2] + apply (rule corres_underlying_split [OF _ _ thread_get_sp threadGet_inv]) + apply (rule corres_guard_imp) + apply (rule threadget_fault_corres) + apply simp + defer + apply (rule corres_guard_imp) + apply (subst case_option_If)+ + apply (rule corres_if2) + apply (simp add: fault_rel_optionation_def) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres']) + apply (simp add: dc_def[symmetric]) + apply (rule doNormalTransfer_corres) + apply (wp | simp add: valid_pspace'_def)+ + apply (simp add: dc_def[symmetric]) + apply (rule doFaultTransfer_corres) + apply (clarsimp simp: obj_at_def) + apply (erule ignore_if) + apply (wp|simp add: obj_at_def is_tcb valid_pspace'_def)+ + done + + +crunch ifunsafe[wp]: doIPCTransfer "if_unsafe_then_cap'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots + simp: zipWithM_x_mapM ball_conj_distrib ) +crunch iflive[wp]: doIPCTransfer "if_live_then_nonz_cap'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' ignore: transferCapsToSlots + simp: zipWithM_x_mapM ball_conj_distrib ) +crunch vp[wp]: doIPCTransfer "valid_pspace'" + (wp: crunch_wps hoare_vcg_const_Ball_lift get_rs_cte_at' wp: transferCapsToSlots_vp simp:ball_conj_distrib ) +crunch sch_act_wf[wp]: doIPCTransfer "\s. sch_act_wf (ksSchedulerAction s) s" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch vq[wp]: doIPCTransfer "Invariants_H.valid_queues" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch vq'[wp]: doIPCTransfer "valid_queues'" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch state_refs_of[wp]: doIPCTransfer "\s. P (state_refs_of' s)" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch state_hyp_refs_of[wp]: doIPCTransfer "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch ct[wp]: doIPCTransfer "cur_tcb'" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) +crunch idle'[wp]: doIPCTransfer "valid_idle'" + (wp: crunch_wps get_rs_cte_at' ignore: transferCapsToSlots simp: zipWithM_x_mapM) + +crunch typ_at'[wp]: doIPCTransfer "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: zipWithM_x_mapM) +lemmas dit'_typ_ats[wp] = typ_at_lifts [OF doIPCTransfer_typ_at'] + +crunch irq_node'[wp]: doIPCTransfer "\s. P (irq_node' s)" + (wp: crunch_wps simp: crunch_simps) + +lemmas dit_irq_node'[wp] + = valid_irq_node_lift [OF doIPCTransfer_irq_node' doIPCTransfer_typ_at'] + +crunch valid_arch_state'[wp]: doIPCTransfer "valid_arch_state'" + (wp: crunch_wps simp: crunch_simps) + +(* Levity: added (20090126 19:32:26) *) +declare asUser_global_refs' [wp] + +lemma lec_valid_cap' [wp]: + "\valid_objs'\ lookupExtraCaps thread xa mi \\rv s. (\x\set rv. s \' fst x)\, -" + apply (rule hoare_pre, rule hoare_post_imp_R) + apply (rule hoare_vcg_conj_lift_R[where R=valid_objs' and S="\_. valid_objs'"]) + apply (rule lookupExtraCaps_srcs) + apply wp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + apply simp + done + +crunch objs'[wp]: doIPCTransfer "valid_objs'" + ( wp: crunch_wps hoare_vcg_const_Ball_lift + transferCapsToSlots_valid_objs + simp: zipWithM_x_mapM ball_conj_distrib ) + +crunch global_refs'[wp]: doIPCTransfer "valid_global_refs'" + (wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_global_refsT + transferCapsToSlots_valid_globals + simp: zipWithM_x_mapM ball_conj_distrib) + +declare asUser_irq_handlers' [wp] + +crunch irq_handlers'[wp]: doIPCTransfer "valid_irq_handlers'" + (wp: crunch_wps hoare_vcg_const_Ball_lift threadSet_irq_handlers' + transferCapsToSlots_irq_handlers + simp: zipWithM_x_mapM ball_conj_distrib ) + +crunch irq_states'[wp]: doIPCTransfer "valid_irq_states'" + (wp: crunch_wps no_irq no_irq_mapM no_irq_storeWord no_irq_loadWord + no_irq_case_option simp: crunch_simps zipWithM_x_mapM) + +crunch irqs_masked'[wp]: doIPCTransfer "irqs_masked'" + (wp: crunch_wps simp: crunch_simps rule: irqs_masked_lift) + +lemma doIPCTransfer_invs[wp]: + "\invs' and tcb_at' s and tcb_at' r\ + doIPCTransfer s ep bg grt r + \\rv. invs'\" + apply (simp add: doIPCTransfer_def) + apply (wpsimp wp: hoare_drop_imp) + done + + +lemma arch_getSanitiseRegisterInfo_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (arch_get_sanitise_register_info t) + (getSanitiseRegisterInfo t)" + unfolding arch_get_sanitise_register_info_def getSanitiseRegisterInfo_def + apply (fold archThreadGet_def) + apply corres + done + +crunch tcb_at'[wp]: getSanitiseRegisterInfo "tcb_at' t" + +crunches arch_get_sanitise_register_info + for pspace_distinct[wp]: pspace_distinct + and pspace_aligned[wp]: pspace_aligned + +lemma handle_fault_reply_registers_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (do t' \ arch_get_sanitise_register_info t; + y \ as_user t + (zipWithM_x + (\r v. setRegister r + (sanitise_register t' r v)) + msg_template msg); + return (label = 0) + od) + (do t' \ getSanitiseRegisterInfo t; + y \ asUser t + (zipWithM_x + (\r v. setRegister r (sanitiseRegister t' r v)) + msg_template msg); + return (label = 0) + od)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF arch_getSanitiseRegisterInfo_corres]) + apply (rule corres_split) + apply (rule asUser_corres') + apply(simp add: setRegister_def sanitise_register_def + sanitiseRegister_def syscallMessage_def Let_def cong: register.case_cong) + apply(subst zipWithM_x_modify)+ + apply(rule corres_modify') + apply (simp|wp)+ + done + +lemma handleFaultReply_corres: + "ft' = fault_map ft \ + corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (handle_fault_reply ft t label msg) + (handleFaultReply ft' t label msg)" + apply (cases ft) + apply(simp_all add: handleFaultReply_def + handle_arch_fault_reply_def handleArchFaultReply_def + syscallMessage_def exceptionMessage_def + split: arch_fault.split) + by (rule handle_fault_reply_registers_corres)+ + +crunch typ_at'[wp]: handleFaultReply "\s. P (typ_at' T p s)" + +lemmas hfr_typ_ats[wp] = typ_at_lifts [OF handleFaultReply_typ_at'] + +crunch ct'[wp]: handleFaultReply "\s. P (ksCurThread s)" + +lemma doIPCTransfer_sch_act_simple [wp]: + "\sch_act_simple\ doIPCTransfer sender endpoint badge grant receiver \\_. sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +lemma possibleSwitchTo_invs'[wp]: + "\invs' and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + possibleSwitchTo t \\_. invs'\" + apply (simp add: possibleSwitchTo_def curDomain_def) + apply (wp tcbSchedEnqueue_invs' ssa_invs') + apply (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt]) + apply (wpsimp wp: ssa_invs' threadGet_wp)+ + apply (clarsimp dest!: obj_at_ko_at' simp: tcb_in_cur_domain'_def obj_at'_def) + done + +crunch cur' [wp]: isFinalCapability "\s. P (cur_tcb' s)" + (simp: crunch_simps unless_when + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch ct' [wp]: deleteCallerCap "\s. P (ksCurThread s)" + (simp: crunch_simps unless_when + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma getThreadCallerSlot_inv: + "\P\ getThreadCallerSlot t \\_. P\" + by (simp add: getThreadCallerSlot_def, wp) + +lemma deleteCallerCap_ct_not_ksQ: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ksCurThread s \ set (ksReadyQueues s p))\ + deleteCallerCap t + \\rv s. ksCurThread s \ set (ksReadyQueues s p)\" + apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) + apply (wp getThreadCallerSlot_inv cteDeleteOne_ct_not_ksQ getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma finaliseCapTrue_standin_tcb_at' [wp]: + "\tcb_at' x\ finaliseCapTrue_standin cap v2 \\_. tcb_at' x\" + apply (simp add: finaliseCapTrue_standin_def Let_def) + apply (safe) + apply (wp getObject_ntfn_inv + | wpc + | simp)+ + done + +lemma finaliseCapTrue_standin_cur': + "\\s. cur_tcb' s\ finaliseCapTrue_standin cap v2 \\_ s'. cur_tcb' s'\" + apply (simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf2 [OF _ finaliseCapTrue_standin_ct']) + apply (wp) + done + +lemma cteDeleteOne_cur' [wp]: + "\\s. cur_tcb' s\ cteDeleteOne slot \\_ s'. cur_tcb' s'\" + apply (simp add: cteDeleteOne_def unless_def when_def) + apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur' + | simp add: split_def | wp (once) cur_tcb_lift)+ + done + +lemma handleFaultReply_cur' [wp]: + "\\s. cur_tcb' s\ handleFaultReply x0 thread label msg \\_ s'. cur_tcb' s'\" + apply (clarsimp simp add: cur_tcb'_def) + apply (rule hoare_lift_Pf2 [OF _ handleFaultReply_ct']) + apply (wp) + done + +lemma capClass_Reply: + "capClass cap = ReplyClass tcb \ isReplyCap cap \ capTCBPtr cap = tcb" + apply (cases cap, simp_all add: isCap_simps) + apply (rename_tac arch_capability) + apply (case_tac arch_capability, simp_all) + done + +lemma reply_cap_end_mdb_chain: + "\ cte_wp_at (is_reply_cap_to t) slot s; invs s; + invs' s'; + (s, s') \ state_relation; ctes_of s' (cte_map slot) = Some cte \ + \ (mdbPrev (cteMDBNode cte) \ nullPointer + \ mdbNext (cteMDBNode cte) = nullPointer) + \ cte_wp_at' (\cte. isReplyCap (cteCap cte) \ capReplyMaster (cteCap cte)) + (mdbPrev (cteMDBNode cte)) s'" + apply (clarsimp simp only: cte_wp_at_reply_cap_to_ex_rights) + apply (frule(1) pspace_relation_ctes_ofI[OF state_relation_pspace_relation], + clarsimp+) + apply (subgoal_tac "\slot' rights'. caps_of_state s slot' = Some (cap.ReplyCap t True rights') + \ descendants_of slot' (cdt s) = {slot}") + apply (elim state_relationE exE) + apply (clarsimp simp: cdt_relation_def + simp del: split_paired_All) + apply (drule spec, drule(1) mp[OF _ caps_of_state_cte_at]) + apply (frule(1) pspace_relation_cte_wp_at[OF _ caps_of_state_cteD], + clarsimp+) + apply (clarsimp simp: descendants_of'_def cte_wp_at_ctes_of) + apply (frule_tac f="\S. cte_map slot \ S" in arg_cong, simp(no_asm_use)) + apply (frule invs_mdb'[unfolded valid_mdb'_def]) + apply (rule context_conjI) + apply (clarsimp simp: nullPointer_def valid_mdb_ctes_def) + apply (erule(4) subtree_prev_0) + apply (rule conjI) + apply (rule ccontr) + apply (frule valid_mdb_no_loops, simp add: no_loops_def) + apply (drule_tac x="cte_map slot" in spec) + apply (erule notE, rule r_into_trancl, rule ccontr) + apply (clarsimp simp: mdb_next_unfold valid_mdb_ctes_def nullPointer_def) + apply (rule valid_dlistEn, assumption+) + apply (subgoal_tac "ctes_of s' \ cte_map slot \ mdbNext (cteMDBNode cte)") + apply (frule(3) class_linksD) + apply (clarsimp simp: isCap_simps dest!: capClass_Reply[OF sym]) + apply (drule_tac f="\S. mdbNext (cteMDBNode cte) \ S" in arg_cong) + apply (simp, erule notE, rule subtree.trans_parent, assumption+) + apply (case_tac ctea, case_tac cte') + apply (clarsimp simp add: parentOf_def isMDBParentOf_CTE) + apply (simp add: sameRegionAs_def2 isCap_simps) + apply (erule subtree.cases) + apply (clarsimp simp: parentOf_def isMDBParentOf_CTE) + apply (clarsimp simp: parentOf_def isMDBParentOf_CTE) + apply (simp add: mdb_next_unfold) + apply (erule subtree.cases) + apply (clarsimp simp: valid_mdb_ctes_def) + apply (erule_tac cte=ctea in valid_dlistEn, assumption) + apply (simp add: mdb_next_unfold) + apply (clarsimp simp: mdb_next_unfold isCap_simps) + apply (drule_tac f="\S. c' \ S" in arg_cong) + apply (clarsimp simp: no_loops_direct_simp valid_mdb_no_loops) + apply (frule invs_mdb) + apply (drule invs_valid_reply_caps) + apply (clarsimp simp: valid_mdb_def reply_mdb_def + valid_reply_caps_def reply_caps_mdb_def + cte_wp_at_caps_of_state + simp del: split_paired_All) + apply (erule_tac x=slot in allE, erule_tac x=t in allE, erule impE, fast) + apply (elim exEI) + apply clarsimp + apply (subgoal_tac "P" for P, rule sym, rule equalityI, assumption) + apply clarsimp + apply (erule(4) unique_reply_capsD) + apply (simp add: descendants_of_def) + apply (rule r_into_trancl) + apply (simp add: cdt_parent_rel_def is_cdt_parent_def) + done + +lemma unbindNotification_valid_objs'_strengthen: + "valid_tcb' tcb s \ valid_tcb' (tcbBoundNotification_update Map.empty tcb) s" + "valid_ntfn' ntfn s \ valid_ntfn' (ntfnBoundTCB_update Map.empty ntfn) s" + by (simp_all add: unbindNotification_valid_objs'_helper' unbindNotification_valid_objs'_helper) + +crunch valid_objs'[wp]: cteDeleteOne "valid_objs'" + (simp: crunch_simps unless_def + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunch nosch[wp]: handleFaultReply "\s. P (ksSchedulerAction s)" + +lemma emptySlot_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + emptySlot slot irq + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift tcb_in_cur_domain'_lift) + +lemma cancelAllIPC_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelAllIPC epptr + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cancelAllIPC_def) + apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+ + done + +lemma cancelAllSignals_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cancelAllSignals ntfnptr + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cancelAllSignals_def) + apply (wp rescheduleRequired_weak_sch_act_wf hoare_drop_imp | wpc | simp)+ + done + +crunch weak_sch_act_wf[wp]: finaliseCapTrue_standin "\s. weak_sch_act_wf (ksSchedulerAction s) s" + (ignore: setThreadState + simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma cteDeleteOne_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + cteDeleteOne sl + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: cteDeleteOne_def unless_def) + apply (wp hoare_drop_imps finaliseCapTrue_standin_cur' isFinalCapability_cur' + | simp add: split_def)+ + done + +crunch pred_tcb_at'[wp]: handleFaultReply "pred_tcb_at' proj P t" +crunch valid_queues[wp]: handleFaultReply "Invariants_H.valid_queues" +crunch valid_queues'[wp]: handleFaultReply "valid_queues'" +crunch tcb_in_cur_domain'[wp]: handleFaultReply "tcb_in_cur_domain' t" + +crunch sch_act_wf[wp]: unbindNotification "\s. sch_act_wf (ksSchedulerAction s) s" +(wp: sbn_sch_act') + +crunch valid_queues'[wp]: cteDeleteOne valid_queues' + (simp: crunch_simps unless_def inQ_def + wp: crunch_wps sts_st_tcb' getObject_inv loadObject_default_inv + threadSet_valid_queues' rescheduleRequired_valid_queues'_weak) + +lemma cancelSignal_valid_queues'[wp]: + "\valid_queues'\ cancelSignal t ntfn \\rv. valid_queues'\" + apply (simp add: cancelSignal_def) + apply (rule hoare_pre) + apply (wp getNotification_wp| wpc | simp)+ + done + +lemma cancelIPC_valid_queues'[wp]: + "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) \ cancelIPC t \\rv. valid_queues'\" + apply (simp add: cancelIPC_def Let_def getThreadReplySlot_def locateSlot_conv liftM_def) + apply (rule hoare_seq_ext[OF _ gts_sp']) + apply (case_tac state, simp_all) defer 2 + apply (rule hoare_pre) + apply ((wp getEndpoint_wp getCTE_wp | wpc | simp)+)[8] + apply (wp cteDeleteOne_valid_queues') + apply (rule_tac Q="\_. valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + apply (clarsimp simp: capHasProperty_def cte_wp_at_ctes_of) + apply (wp threadSet_valid_queues' threadSet_sch_act| simp)+ + apply (clarsimp simp: inQ_def) + done + +crunch valid_objs'[wp]: handleFaultReply valid_objs' + +lemma cte_wp_at_is_reply_cap_toI: + "cte_wp_at ((=) (cap.ReplyCap t False rights)) ptr s + \ cte_wp_at (is_reply_cap_to t) ptr s" + by (fastforce simp: cte_wp_at_reply_cap_to_ex_rights) + +crunches handle_fault_reply + for pspace_alignedp[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +lemma doReplyTransfer_corres: + "corres dc + (einvs and tcb_at receiver and tcb_at sender + and cte_wp_at ((=) (cap.ReplyCap receiver False rights)) slot) + (invs' and tcb_at' sender and tcb_at' receiver + and valid_pspace' and cte_at' (cte_map slot)) + (do_reply_transfer sender receiver slot grant) + (doReplyTransfer sender receiver (cte_map slot) grant)" + apply (simp add: do_reply_transfer_def doReplyTransfer_def cong: option.case_cong) + apply (rule corres_underlying_split [OF _ _ gts_sp gts_sp']) + apply (rule corres_guard_imp) + apply (rule getThreadState_corres, (clarsimp simp add: st_tcb_at_tcb_at invs_distinct invs_psp_aligned)+) + apply (rule_tac F = "awaiting_reply state" in corres_req) + apply (clarsimp simp add: st_tcb_at_def obj_at_def is_tcb) + apply (fastforce simp: invs_def valid_state_def intro: has_reply_cap_cte_wpD + dest: has_reply_cap_cte_wpD + dest!: valid_reply_caps_awaiting_reply cte_wp_at_is_reply_cap_toI) + apply (case_tac state, simp_all add: bind_assoc) + apply (simp add: isReply_def liftM_def) + apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated]) + apply (rule no_fail_pre, wp) + apply clarsimp + apply (rename_tac mdbnode) + apply (rule_tac P="Q" and Q="Q" and P'="Q'" and Q'="(\s. Q' s \ R' s)" for Q Q' R' + in stronger_corres_guard_imp[rotated]) + apply assumption + apply (rule conjI, assumption) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule cte_wp_at_is_reply_cap_toI) + apply (erule(4) reply_cap_end_mdb_chain) + apply (rule corres_assert_assume[rotated], simp) + apply (simp add: getSlotCap_def) + apply (rule corres_symb_exec_r[OF _ getCTE_sp getCTE_inv, rotated]) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_assert_assume[rotated]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule corres_guard_imp) + apply (rule corres_split[OF threadget_fault_corres]) + apply (case_tac rv, simp_all add: fault_rel_optionation_def bind_assoc)[1] + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF cap_delete_one_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (wp set_thread_state_runnable_valid_sched set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues sts_valid_objs' delete_one_tcbDomain_obj_at' + | simp add: valid_tcb_state'_def)+ + apply (strengthen cte_wp_at_reply_cap_can_fast_finalise) + apply (wp hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post [OF do_ipc_transfer_non_null_cte_wp_at]) + prefer 2 + apply (erule cte_wp_at_weakenE) + apply (fastforce) + apply (clarsimp simp:is_cap_simps) + apply (wp weak_valid_sched_action_lift)+ + apply (rule_tac Q="\_. valid_queues' and valid_objs' and cur_tcb' and tcb_at' receiver and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp, simp add: sch_act_wf_weak) + apply (wp tcb_in_cur_domain'_lift) + defer + apply (simp) + apply (wp)+ + apply (clarsimp simp: invs_psp_aligned invs_distinct) + apply (rule conjI, erule invs_valid_objs) + apply (rule conjI, clarsimp)+ + apply (rule conjI) + apply (erule cte_wp_at_weakenE) + apply (clarsimp) + apply (rule conjI, rule refl) + apply (fastforce) + apply (clarsimp simp: invs_def valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct) + apply (simp) + apply (auto simp: invs'_def valid_state'_def)[1] + + apply (rule corres_guard_imp) + apply (rule corres_split[OF cap_delete_one_corres]) + apply (rule corres_split_mapr[OF getMessageInfo_corres]) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres']) + apply (rule corres_split_eqr[OF getMRs_corres]) + apply (simp(no_asm) del: dc_simp) + apply (rule corres_split_eqr[OF handleFaultReply_corres]) + apply simp + apply (rule corres_split) + apply (rule threadset_corresT; + clarsimp simp add: tcb_relation_def fault_rel_optionation_def cteSizeBits_def + tcb_cap_cases_def tcb_cte_cases_def exst_same_def) + apply (rule_tac Q="valid_sched and cur_tcb and tcb_at receiver and pspace_aligned and pspace_distinct" + and Q'="tcb_at' receiver and cur_tcb' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and Invariants_H.valid_queues and valid_queues' and valid_objs'" + in corres_guard_imp) + apply (case_tac rvb, simp_all)[1] + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply (clarsimp simp: tcb_relation_def) + apply (fold dc_def, rule possibleSwitchTo_corres) + apply simp + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_st_tcb' sts_valid_queues + | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ + apply (rule corres_guard_imp) + apply (rule setThreadState_corres) + apply (clarsimp simp: tcb_relation_def) + apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state + thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' + threadSet_tcbDomain_triv threadSet_valid_objs' + | simp add: valid_tcb_state'_def)+ + apply (wp threadSet_cur weak_sch_act_wf_lift_linear threadSet_pred_tcb_no_state + thread_set_not_state_valid_sched threadSet_valid_queues threadSet_valid_queues' + | simp add: runnable_def inQ_def valid_tcb'_def)+ + apply (rule_tac Q="\_. valid_sched and cur_tcb and tcb_at sender and tcb_at receiver and + valid_objs and pspace_aligned and pspace_distinct" + in hoare_strengthen_post [rotated], clarsimp) + apply (wp) + apply (rule hoare_chain [OF cap_delete_one_invs]) + apply (assumption) + apply (rule conjI, clarsimp) + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def) + apply (rule_tac Q="\_. tcb_at' sender and tcb_at' receiver and invs'" + in hoare_strengthen_post [rotated]) + apply (solves\auto simp: invs'_def valid_state'_def\) + apply wp + apply clarsimp + apply (rule conjI) + apply (erule cte_wp_at_weakenE) + apply (clarsimp simp add: can_fast_finalise_def) + apply (erule(1) emptyable_cte_wp_atD) + apply (rule allI, rule impI) + apply (clarsimp simp add: is_master_reply_cap_def) + apply (clarsimp) + done + +(* when we cannot talk about reply cap rights explicitly (for instance, when a schematic ?rights + would be generated too early *) +lemma doReplyTransfer_corres': + "corres dc + (einvs and tcb_at receiver and tcb_at sender + and cte_wp_at (is_reply_cap_to receiver) slot) + (invs' and tcb_at' sender and tcb_at' receiver + and valid_pspace' and cte_at' (cte_map slot)) + (do_reply_transfer sender receiver slot grant) + (doReplyTransfer sender receiver (cte_map slot) grant)" + using doReplyTransfer_corres[of receiver sender _ slot] + by (fastforce simp add: cte_wp_at_reply_cap_to_ex_rights corres_underlying_def) + +lemma valid_pspace'_splits[elim!]: (* FIXME AARCH64: clean up duplicates *) + "valid_pspace' s \ valid_objs' s" + "valid_pspace' s \ pspace_aligned' s" + "valid_pspace' s \ pspace_distinct' s" + "valid_pspace' s \ valid_mdb' s" + "valid_pspace' s \ no_0_obj' s" + by (simp add: valid_pspace'_def)+ + +lemma sts_valid_pspace_hangers: + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. valid_objs'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. pspace_distinct'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. pspace_aligned'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. valid_mdb'\" + "\valid_pspace' and tcb_at' t and valid_tcb_state' st\ setThreadState st t \\rv. no_0_obj'\" + by (safe intro!: hoare_strengthen_post [OF sts'_valid_pspace'_inv]) + +declare no_fail_getSlotCap [wp] + +lemma setupCallerCap_corres: + "corres dc + (st_tcb_at (Not \ halted) sender and tcb_at receiver and + st_tcb_at (Not \ awaiting_reply) sender and valid_reply_caps and + valid_objs and pspace_distinct and pspace_aligned and valid_mdb + and valid_list and + valid_reply_masters and cte_wp_at (\c. c = cap.NullCap) (receiver, tcb_cnode_index 3)) + (tcb_at' sender and tcb_at' receiver and valid_pspace' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s)) + (setup_caller_cap sender receiver grant) + (setupCallerCap sender receiver grant)" + supply if_split[split del] + apply (simp add: setup_caller_cap_def setupCallerCap_def + getThreadReplySlot_def locateSlot_conv + getThreadCallerSlot_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split_nor) + apply (rule setThreadState_corres) + apply (simp split: option.split) + apply (rule corres_symb_exec_r) + apply (rule_tac F="\r. cteCap masterCTE = capability.ReplyCap sender True r + \ mdbNext (cteMDBNode masterCTE) = nullPointer" + in corres_gen_asm2, clarsimp simp add: isCap_simps) + apply (rule corres_symb_exec_r) + apply (rule_tac F="rv = capability.NullCap" + in corres_gen_asm2, simp) + apply (rule cteInsert_corres) + apply (simp split: if_splits) + apply (simp add: cte_map_def tcbReplySlot_def + tcb_cnode_index_def cte_level_bits_def) + apply (simp add: cte_map_def tcbCallerSlot_def + tcb_cnode_index_def cte_level_bits_def) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) + + apply (wp, (wp getSlotCap_wp)+) + apply blast + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at'_def cte_at'_def) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) + apply (wp, (wp getCTE_wp')+) + apply blast + apply (rule no_fail_pre, wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp sts_valid_pspace_hangers + | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: valid_tcb_state_def st_tcb_at_reply_cap_valid + st_tcb_at_tcb_at st_tcb_at_caller_cap_null + split: option.split) + apply (clarsimp simp: valid_tcb_state'_def valid_cap'_def capAligned_reply_tcbI) + apply (frule(1) st_tcb_at_reply_cap_valid, simp, clarsimp) + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (drule pspace_relation_cte_wp_at[rotated, OF caps_of_state_cteD], + erule valid_pspace'_splits, clarsimp+)+ + apply (clarsimp simp: cte_wp_at_ctes_of cte_map_def tcbReplySlot_def + tcbCallerSlot_def tcb_cnode_index_def + is_cap_simps) + apply (auto intro: reply_no_descendants_mdbNext_null[OF not_waiting_reply_slot_no_descendants] + simp: cte_level_bits_def) + done + +crunch tcb_at'[wp]: getThreadCallerSlot "tcb_at' t" + +lemma getThreadReplySlot_tcb_at'[wp]: + "\tcb_at' t\ getThreadReplySlot tcb \\_. tcb_at' t\" + by (simp add: getThreadReplySlot_def, wp) + +lemma setupCallerCap_tcb_at'[wp]: + "\tcb_at' t\ setupCallerCap sender receiver grant \\_. tcb_at' t\" + by (simp add: setupCallerCap_def, wp hoare_drop_imp) + +crunch ct'[wp]: setupCallerCap "\s. P (ksCurThread s)" + (wp: crunch_wps) + +lemma cteInsert_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + cteInsert newCap srcSlot destSlot + \\_ s. sch_act_wf (ksSchedulerAction s) s\" +by (wp sch_act_wf_lift tcb_in_cur_domain'_lift) + +lemma setupCallerCap_sch_act [wp]: + "\\s. sch_act_not t s \ sch_act_wf (ksSchedulerAction s) s\ + setupCallerCap t r g \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setupCallerCap_def getSlotCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv) + apply (wp getCTE_wp' sts_sch_act' hoare_drop_imps hoare_vcg_all_lift) + apply clarsimp + done + +lemma possibleSwitchTo_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s \ st_tcb_at' runnable' t s\ + possibleSwitchTo t \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def threadGet_def curDomain_def + bitmap_fun_defs) + apply (wp rescheduleRequired_weak_sch_act_wf + weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] + getObject_tcb_wp hoare_weak_lift_imp + | wpc)+ + apply (clarsimp simp: obj_at'_def weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) + done + +lemmas transferCapsToSlots_pred_tcb_at' = + transferCapsToSlots_pres1 [OF cteInsert_pred_tcb_at'] + +crunches doIPCTransfer, possibleSwitchTo + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: mapM_wp' crunch_wps simp: zipWithM_x_mapM) + +lemma setSchedulerAction_ct_in_domain: + "\\s. ct_idle_or_in_cur_domain' s + \ p \ ResumeCurrentThread \ setSchedulerAction p + \\_. ct_idle_or_in_cur_domain'\" + by (simp add:setSchedulerAction_def | wp)+ + +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ct_idle_or_in_cur_domain'[wp]: ct_idle_or_in_cur_domain' + (wp: crunch_wps setSchedulerAction_ct_in_domain simp: zipWithM_x_mapM) +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +crunch tcbDomain_obj_at'[wp]: doIPCTransfer "obj_at' (\tcb. P (tcbDomain tcb)) t" + (wp: crunch_wps constOnFailure_wp simp: crunch_simps) + +crunch tcb_at'[wp]: possibleSwitchTo "tcb_at' t" + (wp: crunch_wps) + +crunch valid_pspace'[wp]: possibleSwitchTo valid_pspace' + (wp: crunch_wps) + +lemma sendIPC_corres: +(* call is only true if called in handleSyscall SysCall, which + is always blocking. *) + assumes "call \ bl" + shows + "corres dc (einvs and st_tcb_at active t and ep_at ep and ex_nonz_cap_to t) + (invs' and sch_act_not t and tcb_at' t and ep_at' ep) + (send_ipc bl call bg cg cgr t ep) (sendIPC bl call bg cg cgr t ep)" +proof - + show ?thesis + apply (insert assms) + apply (unfold send_ipc_def sendIPC_def Let_def) + apply (case_tac bl) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres, + where + R="\rv. einvs and st_tcb_at active t and ep_at ep and + valid_ep rv and obj_at (\ob. ob = Endpoint rv) ep + and ex_nonz_cap_to t" + and + R'="\rv'. invs' and tcb_at' t and sch_act_not t + and ep_at' ep and valid_ep' rv'"]) + apply (case_tac rv) + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) + apply clarsimp + \ \concludes IdleEP if bl branch\ + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (clarsimp simp: st_tcb_at_tcb_at valid_tcb_state_def invs_distinct) + apply clarsimp + \ \concludes SendEP if bl branch\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (simp add: valid_ep_def) + apply (case_tac list) + apply simp + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def split: list.split) + apply (simp add: isReceive_def split del:if_split) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. recv_state = Structures_A.BlockedOnReceive ep data" + in corres_gen_asm) + apply (clarsimp simp: case_bool_If case_option_If if3_fold + simp del: dc_simp split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_split[OF possibleSwitchTo_corres]) + apply (fold when_def)[1] + apply (rule_tac P="call" and P'="call" + in corres_symmetric_bool_cases, blast) + apply (simp add: when_def dc_def[symmetric] split del: if_split) + apply (rule corres_if2, simp) + apply (rule setupCallerCap_corres) + apply (rule setThreadState_corres, simp) + apply (rule corres_trivial) + apply (simp add: when_def dc_def[symmetric] split del: if_split) + apply (simp split del: if_split add: if_apply_def2) + apply (wp hoare_drop_imps)[1] + apply (simp split del: if_split add: if_apply_def2) + apply (wp hoare_drop_imps)[1] + apply (wp | simp)+ + apply (wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases) + apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases)[1] + apply (simp add: valid_tcb_state_def pred_conj_def) + apply (strengthen reply_cap_doesnt_exist_strg disjI2_strg) + apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift + | clarsimp simp: is_cap_simps)+)[1] + apply (simp add: pred_conj_def) + apply (strengthen sch_act_wf_weak) + apply (simp add: valid_tcb_state'_def) + apply (wp weak_sch_act_wf_lift_linear tcb_in_cur_domain'_lift hoare_drop_imps)[1] + apply (wp gts_st_tcb_at)+ + apply (simp add: pred_conj_def cong: conj_cong) + apply (wp hoare_post_taut) + apply (simp) + apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb')+ + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def ep_redux_simps + ep_redux_simps' st_tcb_at_tcb_at valid_ep_def + cong: list.case_cong) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid + st_tcb_def2 valid_sched_def valid_sched_action_def) + apply (force simp: st_tcb_def2 dest!: st_tcb_at_caller_cap_null[simplified,rotated]) + subgoal by (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split) + apply wp+ + apply (clarsimp simp: ep_at_def2)+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres, + where + R="\rv. einvs and st_tcb_at active t and ep_at ep and + valid_ep rv and obj_at (\k. k = Endpoint rv) ep" + and + R'="\rv'. invs' and tcb_at' t and sch_act_not t + and ep_at' ep and valid_ep' rv'"]) + apply (rename_tac rv rv') + apply (case_tac rv) + apply (simp add: ep_relation_def) + \ \concludes IdleEP branch if not bl and no ft\ + apply (simp add: ep_relation_def) + \ \concludes SendEP branch if not bl and no ft\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (simp add: valid_ep_def) + apply (case_tac list) + apply simp + apply (rule_tac F="a \ t" in corres_req) + apply (clarsimp simp: invs_def valid_state_def + valid_pspace_def) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_def obj_at_def tcb_bound_refs_def2) + apply fastforce + apply (clarsimp split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (simp add: ep_relation_def split: list.split) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. recv_state = Structures_A.BlockedOnReceive ep data" + in corres_gen_asm) + apply (clarsimp simp: isReceive_def case_bool_If + split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (simp add: if_apply_def2) + apply ((wp sts_cur_tcb set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at_cases | + simp add: if_apply_def2 split del: if_split)+)[1] + apply (wp setThreadState_valid_queues' sts_valid_queues sts_weak_sch_act_wf + sts_cur_tcb' setThreadState_tcb' sts_st_tcb_at'_cases) + apply (simp add: valid_tcb_state_def pred_conj_def) + apply ((wp hoare_drop_imps do_ipc_transfer_tcb_caps weak_valid_sched_action_lift + | clarsimp simp:is_cap_simps)+)[1] + apply (simp add: valid_tcb_state'_def pred_conj_def) + apply (strengthen sch_act_wf_weak) + apply (wp weak_sch_act_wf_lift_linear hoare_drop_imps) + apply (wp gts_st_tcb_at)+ + apply (simp add: pred_conj_def cong: conj_cong) + apply (wp hoare_post_taut) + apply simp + apply (wp weak_sch_act_wf_lift_linear set_ep_valid_objs' setEndpoint_valid_mdb') + apply (clarsimp simp add: invs_def valid_state_def + valid_pspace_def ep_redux_simps ep_redux_simps' + st_tcb_at_tcb_at + cong: list.case_cong) + apply (clarsimp simp: valid_ep_def) + apply (drule(1) sym_refs_obj_atD[where P="\ob. ob = e" for e]) + apply (clarsimp simp: st_tcb_at_refs_of_rev st_tcb_at_reply_cap_valid + st_tcb_at_caller_cap_null) + apply (fastforce simp: st_tcb_def2 valid_sched_def valid_sched_action_def) + subgoal by (auto simp: valid_ep'_def + split: list.split; + clarsimp simp: invs'_def valid_state'_def) + apply wp+ + apply (clarsimp simp: ep_at_def2)+ + done +qed + +lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] + +(* Annotation added by Simon Winwood (Thu Jul 1 20:54:41 2010) using taint-mode *) +declare tl_drop_1[simp] + +crunch cur[wp]: cancel_ipc "cur_tcb" + (wp: crunch_wps simp: crunch_simps) + +lemma valid_sched_weak_strg: + "valid_sched s \ weak_valid_sched_action s" + by (simp add: valid_sched_def valid_sched_action_def) + +lemma sendSignal_corres: + "corres dc (einvs and ntfn_at ep) (invs' and ntfn_at' ep) + (send_signal ep bg) (sendSignal ep bg)" + apply (simp add: send_signal_def sendSignal_def Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres, + where + R = "\rv. einvs and ntfn_at ep and valid_ntfn rv and + ko_at (Structures_A.Notification rv) ep" and + R' = "\rv'. invs' and ntfn_at' ep and + valid_ntfn' rv' and ko_at' rv' ep"]) + defer + apply (wp get_simple_ko_ko_at get_ntfn_ko')+ + apply (simp add: invs_valid_objs)+ + apply (case_tac "ntfn_obj ntfn") + \ \IdleNtfn\ + apply (clarsimp simp add: ntfn_relation_def) + apply (case_tac "ntfnBoundTCB nTFN") + apply clarsimp + apply (rule corres_guard_imp[OF setNotification_corres]) + apply (clarsimp simp add: ntfn_relation_def)+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_if) + apply (fastforce simp: receive_blocked_def receiveBlocked_def + thread_state_relation_def + split: Structures_A.thread_state.splits + Structures_H.thread_state.splits) + apply (rule corres_split[OF cancel_ipc_corres]) + apply (rule corres_split[OF setThreadState_corres]) + apply (clarsimp simp: thread_state_relation_def) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply wp + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_valid_queues sts_st_tcb' hoare_disjI2 + cancel_ipc_cte_wp_at_not_reply_state + | strengthen invs_vobjs_strgs invs_psp_aligned_strg valid_sched_weak_strg + | simp add: valid_tcb_state_def)+ + apply (rule_tac Q="\rv. invs' and tcb_at' a" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak + valid_tcb_state'_def) + apply (rule setNotification_corres) + apply (clarsimp simp add: ntfn_relation_def) + apply (wp gts_wp gts_wp' | clarsimp)+ + apply (auto simp: valid_ntfn_def receive_blocked_def valid_sched_def invs_cur + elim: pred_tcb_weakenE + intro: st_tcb_at_reply_cap_valid + split: Structures_A.thread_state.splits)[1] + apply (clarsimp simp: valid_ntfn'_def invs'_def valid_state'_def valid_pspace'_def sch_act_wf_weak) + \ \WaitingNtfn\ + apply (clarsimp simp add: ntfn_relation_def Let_def) + apply (simp add: update_waiting_ntfn_def) + apply (rename_tac list) + apply (case_tac "tl list = []") + \ \tl list = []\ + apply (rule corres_guard_imp) + apply (rule_tac F="list \ []" in corres_gen_asm) + apply (simp add: list_case_helper split del: if_split) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply ((wp | simp)+)[1] + apply (rule_tac Q="\_. Invariants_H.valid_queues and valid_queues' and + (\s. sch_act_wf (ksSchedulerAction s) s) and + cur_tcb' and + st_tcb_at' runnable' (hd list) and valid_objs'" + in hoare_post_imp, clarsimp simp: pred_tcb_at') + apply (wp | simp)+ + apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' + setThreadState_st_tcb + | simp)+ + apply (wp set_simple_ko_valid_objs set_ntfn_aligned' set_ntfn_valid_objs' + hoare_vcg_disj_lift weak_sch_act_wf_lift_linear + | simp add: valid_tcb_state_def valid_tcb_state'_def)+ + apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def ntfn_queued_st_tcb_at valid_sched_def + valid_sched_action_def) + apply (auto simp: valid_ntfn'_def )[1] + apply (clarsimp simp: invs'_def valid_state'_def) + + \ \tl list \ []\ + apply (rule corres_guard_imp) + apply (rule_tac F="list \ []" in corres_gen_asm) + apply (simp add: list_case_helper) + apply (rule corres_split[OF setNotification_corres]) + apply (simp add: ntfn_relation_def split:list.splits) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule possibleSwitchTo_corres) + apply (wp cur_tcb_lift | simp)+ + apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' + setThreadState_st_tcb + | simp)+ + apply (wp set_ntfn_aligned' set_simple_ko_valid_objs set_ntfn_valid_objs' + hoare_vcg_disj_lift weak_sch_act_wf_lift_linear + | simp add: valid_tcb_state_def valid_tcb_state'_def)+ + apply (clarsimp simp: invs_def valid_state_def valid_ntfn_def + valid_pspace_def neq_Nil_conv + ntfn_queued_st_tcb_at valid_sched_def valid_sched_action_def + split: option.splits) + apply (auto simp: valid_ntfn'_def neq_Nil_conv invs'_def valid_state'_def + weak_sch_act_wf_def + split: option.splits)[1] + \ \ActiveNtfn\ + apply (clarsimp simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def combine_ntfn_badges_def + combine_ntfn_msgs_def) + apply (simp add: invs_def valid_state_def valid_ntfn_def) + apply (simp add: invs'_def valid_state'_def valid_ntfn'_def) + done + +lemma valid_Running'[simp]: + "valid_tcb_state' Running = \" + by (rule ext, simp add: valid_tcb_state'_def) + +crunch typ'[wp]: setMRs "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +lemma possibleSwitchTo_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s \ st_tcb_at' runnable' t s\ + possibleSwitchTo t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp + | simp add: unless_def | wpc)+ + apply (auto simp: obj_at'_def tcb_in_cur_domain'_def) + done + +lemma possibleSwitchTo_valid_queues[wp]: + "\Invariants_H.valid_queues and valid_objs' and (\s. sch_act_wf (ksSchedulerAction s) s) and st_tcb_at' runnable' t\ + possibleSwitchTo t + \\rv. Invariants_H.valid_queues\" + apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) + apply (wp hoare_drop_imps | wpc | simp)+ + apply (auto simp: valid_tcb'_def weak_sch_act_wf_def + dest: pred_tcb_at' + elim!: valid_objs_valid_tcbE) + done + +lemma possibleSwitchTo_ksQ': + "\(\s. t' \ set (ksReadyQueues s p) \ sch_act_not t' s) and K(t' \ t)\ + possibleSwitchTo t + \\_ s. t' \ set (ksReadyQueues s p)\" + apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) + apply (wp hoare_weak_lift_imp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp + | wpc + | simp split del: if_split)+ + apply (auto simp: obj_at'_def) + done + +lemma possibleSwitchTo_valid_queues'[wp]: + "\valid_queues' and (\s. sch_act_wf (ksSchedulerAction s) s) + and st_tcb_at' runnable' t\ + possibleSwitchTo t + \\rv. valid_queues'\" + apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) + apply (wp hoare_weak_lift_imp threadGet_wp | wpc | simp)+ + apply (auto simp: obj_at'_def) + done + +crunch st_refs_of'[wp]: possibleSwitchTo "\s. P (state_refs_of' s)" + (wp: crunch_wps) +crunch st_hyp_refs_of'[wp]: possibleSwitchTo "\s. P (state_hyp_refs_of' s)" + (wp: crunch_wps) +crunch cap_to'[wp]: possibleSwitchTo "ex_nonz_cap_to' p" + (wp: crunch_wps) +crunch objs'[wp]: possibleSwitchTo valid_objs' + (wp: crunch_wps) +crunch ct[wp]: possibleSwitchTo cur_tcb' + (wp: cur_tcb_lift crunch_wps) + +lemma possibleSwitchTo_iflive[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' t + and (\s. sch_act_wf (ksSchedulerAction s) s)\ + possibleSwitchTo t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: possibleSwitchTo_def curDomain_def) + apply (wp | wpc | simp)+ + apply (simp only: imp_conv_disj, wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp threadGet_wp)+ + apply (auto simp: obj_at'_def) + done + +crunch ifunsafe[wp]: possibleSwitchTo if_unsafe_then_cap' + (wp: crunch_wps) +crunch idle'[wp]: possibleSwitchTo valid_idle' + (wp: crunch_wps) +crunch global_refs'[wp]: possibleSwitchTo valid_global_refs' + (wp: crunch_wps) +crunch arch_state'[wp]: possibleSwitchTo valid_arch_state' + (wp: crunch_wps) +crunch irq_node'[wp]: possibleSwitchTo "\s. P (irq_node' s)" + (wp: crunch_wps) +crunch typ_at'[wp]: possibleSwitchTo "\s. P (typ_at' T p s)" + (wp: crunch_wps) +crunch irq_handlers'[wp]: possibleSwitchTo valid_irq_handlers' + (simp: unless_def tcb_cte_cases_def cteSizeBits_def wp: crunch_wps) +crunch irq_states'[wp]: possibleSwitchTo valid_irq_states' + (wp: crunch_wps) +crunch ct'[wp]: sendSignal "\s. P (ksCurThread s)" + (wp: crunch_wps simp: crunch_simps o_def) +crunch it'[wp]: sendSignal "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) + +crunch irqs_masked'[wp]: setBoundNotification "irqs_masked'" + (wp: irqs_masked_lift) + +crunch irqs_masked'[wp]: sendSignal "irqs_masked'" + (wp: crunch_wps getObject_inv loadObject_default_inv + simp: crunch_simps unless_def o_def + rule: irqs_masked_lift) + +lemma sts_running_valid_queues: + "runnable' st \ \ Invariants_H.valid_queues \ setThreadState st t \\_. Invariants_H.valid_queues \" + by (wp sts_valid_queues, clarsimp) + +lemma ct_in_state_activatable_imp_simple'[simp]: + "ct_in_state' activatable' s \ ct_in_state' simple' s" + apply (simp add: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply (case_tac st; simp) + done + +lemma setThreadState_nonqueued_state_update: + "\\s. invs' s \ st_tcb_at' simple' t s + \ st \ {Inactive, Running, Restart, IdleThreadState} + \ (st \ Inactive \ ex_nonz_cap_to' t s) + \ (t = ksIdleThread s \ idle' st) + + \ (\ runnable' st \ sch_act_simple s) + \ (\ runnable' st \ (\p. t \ set (ksReadyQueues s p)))\ + setThreadState st t \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift + sts_valid_queues + setThreadState_ct_not_inQ) + apply (clarsimp simp: pred_tcb_at') + apply (rule conjI, fastforce simp: valid_tcb_state'_def) + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (rule conjI, fastforce) + apply clarsimp + apply (erule delta_sym_refs) + apply (fastforce split: if_split_asm) + apply (fastforce simp: symreftype_inverse' tcb_bound_refs'_def + split: if_split_asm) + done + +lemma cteDeleteOne_reply_cap_to'[wp]: + "\ex_nonz_cap_to' p and + cte_wp_at' (\c. isReplyCap (cteCap c)) slot\ + cteDeleteOne slot + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (subgoal_tac "isReplyCap (cteCap cte)") + apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv + | clarsimp simp: finaliseCap_def isCap_simps + | wp (once) hoare_drop_imps)+ + apply (fastforce simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +crunches setupCallerCap, possibleSwitchTo, asUser, doIPCTransfer + for vms'[wp]: "valid_machine_state'" + (wp: crunch_wps simp: zipWithM_x_mapM_x) + +crunch nonz_cap_to'[wp]: cancelSignal "ex_nonz_cap_to' p" + (wp: crunch_wps simp: crunch_simps) + +lemma cancelIPC_nonz_cap_to'[wp]: + "\ex_nonz_cap_to' p\ cancelIPC t \\rv. ex_nonz_cap_to' p\" + apply (simp add: cancelIPC_def getThreadReplySlot_def Let_def + capHasProperty_def) + apply (wp threadSet_cap_to' + | wpc + | simp + | clarsimp elim!: cte_wp_at_weakenE' + | rule hoare_post_imp[where Q="\rv. ex_nonz_cap_to' p"])+ + done + + +crunches activateIdleThread, getThreadReplySlot, isFinalCapability + for nosch[wp]: "\s. P (ksSchedulerAction s)" + (simp: Let_def) + +crunches setupCallerCap, asUser, setMRs, doIPCTransfer, possibleSwitchTo + for pspace_domain_valid[wp]: "pspace_domain_valid" + (wp: crunch_wps simp: zipWithM_x_mapM_x) + +crunches setupCallerCap, doIPCTransfer, possibleSwitchTo + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + (wp: crunch_wps simp: zipWithM_x_mapM) + +lemma setThreadState_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + setThreadState st t + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: setThreadState_def) + apply (wp) + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply (simp) + apply (wp)+ + apply simp + done + +lemma cancelAllIPC_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + cancelAllIPC epptr + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: cancelAllIPC_def) + apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ + apply (wp hoare_vcg_all_lift hoare_drop_imp) + apply (simp_all) + done + +lemma cancelAllSignals_not_rct[wp]: + "\\s. ksSchedulerAction s \ ResumeCurrentThread \ + cancelAllSignals epptr + \\_ s. ksSchedulerAction s \ ResumeCurrentThread \" + apply (simp add: cancelAllSignals_def) + apply (wp | wpc)+ + apply (rule hoare_post_imp [OF _ rescheduleRequired_notresume], simp) + apply simp + apply (rule mapM_x_wp_inv) + apply (wp)+ + apply (wp hoare_vcg_all_lift hoare_drop_imp) + apply (simp_all) + done + +crunch not_rct[wp]: finaliseCapTrue_standin "\s. ksSchedulerAction s \ ResumeCurrentThread" +(simp: Let_def) + +declare setEndpoint_ct' [wp] + +lemma cancelIPC_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cancelIPC t + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + (is "\?PRE t'\ _ \_\") +proof - + have aipc: "\t t' ntfn. + \\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cancelSignal t ntfn + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + apply (simp add: cancelSignal_def) + apply (wp)[1] + apply (wp hoare_convert_imp)+ + apply (rule_tac P="\s. ksSchedulerAction s \ ResumeCurrentThread" + in hoare_weaken_pre) + apply (wpc) + apply (wp | simp)+ + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply simp + done + have cdo: "\t t' slot. + \\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + cteDeleteOne slot + \\_ s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + apply (simp add: cteDeleteOne_def unless_def split_def) + apply (wp) + apply (wp hoare_convert_imp)[1] + apply (wp) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp hoare_convert_imp | simp)+ + done + show ?thesis + apply (simp add: cancelIPC_def Let_def) + apply (wp, wpc) + prefer 4 \ \state = Running\ + apply wp + prefer 7 \ \state = Restart\ + apply wp + apply (wp)+ + apply (wp hoare_convert_imp)[1] + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp cdo)+ + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply ((wp aipc hoare_convert_imp)+)[6] + apply (wp) + apply (wp hoare_convert_imp)[1] + apply (wpc, wp+) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply (rule_tac Q="\_. ?PRE t'" in hoare_post_imp, clarsimp) + apply (wp) + apply simp + done +qed + +lemma sai_invs'[wp]: + "\invs' and ex_nonz_cap_to' ntfnptr\ + sendSignal ntfnptr badge \\y. invs'\" + unfolding sendSignal_def + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (case_tac "ntfnObj nTFN", simp_all) + prefer 3 + apply (rename_tac list) + apply (case_tac list, + simp_all split del: if_split + add: setMessageInfo_def)[1] + apply (wp hoare_convert_imp [OF asUser_nosch] + hoare_convert_imp [OF setMRs_sch_act])+ + apply (clarsimp simp:conj_comms) + apply (simp add: invs'_def valid_state'_def) + apply (wp valid_irq_node_lift sts_valid_objs' setThreadState_ct_not_inQ + sts_valid_queues [where st="Structures_H.thread_state.Running", simplified] + set_ntfn_valid_objs' cur_tcb_lift sts_st_tcb' + hoare_convert_imp [OF setNotification_nosch] + | simp split del: if_split)+ + + apply (intro conjI[rotated]; + (solves \clarsimp simp: invs'_def valid_state'_def valid_pspace'_def\)?) + apply (clarsimp simp: invs'_def valid_state'_def split del: if_split) + apply (drule(1) ct_not_in_ntfnQueue, simp+) + apply clarsimp + apply (frule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_idle'_def pred_tcb_at'_def idle_tcb'_def + dest!: sym_refs_ko_atD' sym_refs_st_tcb_atD' sym_refs_obj_atD' + split: list.splits) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits option.splits) + apply (clarsimp elim!: if_live_then_nonz_capE' simp:invs'_def valid_state'_def) + apply (drule(1) sym_refs_ko_atD') + apply (clarsimp elim!: ko_wp_at'_weakenE + intro!: refs_of_live') + apply (clarsimp split del: if_split)+ + apply (frule ko_at_valid_objs', clarsimp) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split) + apply (frule invs_sym') + apply (drule(1) sym_refs_obj_atD') + apply (clarsimp split del: if_split cong: if_cong + simp: st_tcb_at_refs_of_rev' ep_redux_simps' ntfn_bound_refs'_def) + apply (frule st_tcb_at_state_refs_ofD') + apply (erule delta_sym_refs) + apply (fastforce simp: split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def set_eq_subset symreftype_inverse' + split: if_split_asm) + apply (clarsimp simp:invs'_def) + apply (frule ko_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def valid_state'_def) + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def split del: if_split) + apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) + apply (frule(1) ko_at_valid_objs') + apply simp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def + split: list.splits option.splits) + apply (case_tac "ntfnBoundTCB nTFN", simp_all) + apply (wp set_ntfn_minor_invs') + apply (fastforce simp: valid_ntfn'_def invs'_def valid_state'_def + elim!: obj_at'_weakenE + dest!: global'_no_ex_cap) + apply (wp add: hoare_convert_imp [OF asUser_nosch] + hoare_convert_imp [OF setMRs_sch_act] + setThreadState_nonqueued_state_update sts_st_tcb' + del: cancelIPC_simple) + apply (clarsimp | wp cancelIPC_ct')+ + apply (wp set_ntfn_minor_invs' gts_wp' | clarsimp)+ + apply (frule pred_tcb_at') + by (wp set_ntfn_minor_invs' + | rule conjI + | clarsimp elim!: st_tcb_ex_cap'' + | fastforce simp: receiveBlocked_def pred_tcb_at'_def obj_at'_def + dest!: invs_rct_ct_activatable' + split: thread_state.splits + | fastforce simp: invs'_def valid_state'_def receiveBlocked_def + valid_obj'_def valid_ntfn'_def + split: thread_state.splits + dest!: global'_no_ex_cap st_tcb_ex_cap'' ko_at_valid_objs')+ + +lemma replyFromKernel_corres: + "corres dc (tcb_at t and invs) (invs') + (reply_from_kernel t r) (replyFromKernel t r)" + apply (case_tac r) + apply (clarsimp simp: replyFromKernel_def reply_from_kernel_def + badge_register_def badgeRegister_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF lookupIPCBuffer_corres]) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule corres_split_eqr[OF setMRs_corres]) + apply clarsimp + apply (rule setMessageInfo_corres) + apply (wp hoare_case_option_wp hoare_valid_ipc_buffer_ptr_typ_at' + | clarsimp simp: invs_distinct invs_psp_aligned)+ + apply fastforce + done + +lemma rfk_invs': + "\invs' and tcb_at' t\ replyFromKernel t r \\rv. invs'\" + apply (simp add: replyFromKernel_def) + apply (cases r) + apply (wp | clarsimp)+ + done + +crunch nosch[wp]: replyFromKernel "\s. P (ksSchedulerAction s)" + +lemma completeSignal_corres: + "corres dc (ntfn_at ntfnptr and tcb_at tcb and pspace_aligned and valid_objs and pspace_distinct) + (ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' and obj_at' isActive ntfnptr) + (complete_signal ntfnptr tcb) (completeSignal ntfnptr tcb)" + apply (simp add: complete_signal_def completeSignal_def) + apply (rule corres_guard_imp) + apply (rule_tac R'="\ntfn. ntfn_at' ntfnptr and tcb_at' tcb and valid_pspace' + and valid_ntfn' ntfn and (\_. isActive ntfn)" + in corres_split[OF getNotification_corres]) + apply (rule corres_gen_asm2) + apply (case_tac "ntfn_obj rv") + apply (clarsimp simp: ntfn_relation_def isActive_def + split: ntfn.splits Structures_H.notification.splits)+ + apply (rule corres_guard2_imp) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def) + apply (wp set_simple_ko_valid_objs get_simple_ko_wp getNotification_wp | clarsimp simp: valid_ntfn'_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (frule_tac P="(\k. k = ntfn)" in obj_at_valid_objs', assumption) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def obj_at'_def) + done + + +lemma doNBRecvFailedTransfer_corres: + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) \ + (do_nbrecv_failed_transfer thread) + (doNBRecvFailedTransfer thread)" + unfolding do_nbrecv_failed_transfer_def doNBRecvFailedTransfer_def + by (simp add: badgeRegister_def badge_register_def, rule asUser_setRegister_corres) + +lemma receiveIPC_corres: + assumes "is_ep_cap cap" and "cap_relation cap cap'" + shows " + corres dc (einvs and valid_sched and tcb_at thread and valid_cap cap and ex_nonz_cap_to thread + and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3)) + (invs' and tcb_at' thread and valid_cap' cap') + (receive_ipc thread cap isBlocking) (receiveIPC thread cap' isBlocking)" + apply (insert assms) + apply (simp add: receive_ipc_def receiveIPC_def + split del: if_split) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rename_tac word1 word2 right) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getEndpoint_corres]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getBoundNotification_corres]) + apply (rule_tac r'="ntfn_relation" in corres_split) + apply (rule corres_option_split[rotated 2]) + apply (rule getNotification_corres) + apply clarsimp + apply (rule corres_trivial, simp add: ntfn_relation_def default_notification_def + default_ntfn_def) + apply (rule corres_if) + apply (clarsimp simp: ntfn_relation_def Ipc_A.isActive_def Endpoint_H.isActive_def + split: Structures_A.ntfn.splits Structures_H.notification.splits) + apply clarsimp + apply (rule completeSignal_corres) + apply (rule_tac P="einvs and valid_sched and tcb_at thread and + ep_at word1 and valid_ep ep and + obj_at (\k. k = Endpoint ep) word1 + and cte_wp_at (\c. c = cap.NullCap) (thread, tcb_cnode_index 3) + and ex_nonz_cap_to thread" and + P'="invs' and tcb_at' thread and ep_at' word1 and + valid_ep' epa" + in corres_inst) + apply (case_tac ep) + \ \IdleEP\ + apply (simp add: ep_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) + apply simp + apply (clarsimp simp add: invs_def valid_state_def valid_pspace_def + valid_tcb_state_def st_tcb_at_tcb_at) + apply auto[1] + \ \SendEP\ + apply (simp add: ep_relation_def) + apply (rename_tac list) + apply (rule_tac F="list \ []" in corres_req) + apply (clarsimp simp: valid_ep_def) + apply (case_tac list, simp_all split del: if_split)[1] + apply (rule corres_guard_imp) + apply (rule corres_split[OF setEndpoint_corres]) + apply (case_tac lista, simp_all add: ep_relation_def)[1] + apply (rule corres_split[OF getThreadState_corres]) + apply (rule_tac + F="\data. + sender_state = + Structures_A.thread_state.BlockedOnSend word1 data" + in corres_gen_asm) + apply (clarsimp simp: isSend_def case_bool_If + case_option_If if3_fold + split del: if_split cong: if_cong) + apply (rule corres_split[OF doIPCTransfer_corres]) + apply (simp split del: if_split cong: if_cong) + apply (fold dc_def)[1] + apply (rule_tac P="valid_objs and valid_mdb and valid_list + and valid_sched + and cur_tcb + and valid_reply_caps + and pspace_aligned and pspace_distinct + and st_tcb_at (Not \ awaiting_reply) a + and st_tcb_at (Not \ halted) a + and tcb_at thread and valid_reply_masters + and cte_wp_at (\c. c = cap.NullCap) + (thread, tcb_cnode_index 3)" + and P'="tcb_at' a and tcb_at' thread and cur_tcb' + and Invariants_H.valid_queues + and valid_queues' + and valid_pspace' + and valid_objs' + and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" + in corres_guard_imp [OF corres_if]) + apply (simp add: fault_rel_optionation_def) + apply (rule corres_if2 [OF _ setupCallerCap_corres setThreadState_corres]) + apply simp + apply simp + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule possibleSwitchTo_corres) + apply (wp sts_st_tcb_at' set_thread_state_runnable_weak_valid_sched_action + | simp)+ + apply (wp sts_st_tcb_at'_cases sts_valid_queues setThreadState_valid_queues' + setThreadState_st_tcb + | simp)+ + apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 valid_sched_def + valid_sched_action_def) + apply (clarsimp split: if_split_asm) + apply (clarsimp | wp do_ipc_transfer_tcb_caps)+ + apply (rule_tac Q="\_ s. sch_act_wf (ksSchedulerAction s) s" + in hoare_post_imp, erule sch_act_wf_weak) + apply (wp sts_st_tcb' gts_st_tcb_at | simp)+ + apply (simp cong: list.case_cong) + apply wp + apply simp + apply (wp weak_sch_act_wf_lift_linear setEndpoint_valid_mdb' set_ep_valid_objs') + apply (clarsimp split: list.split) + apply (clarsimp simp add: invs_def valid_state_def st_tcb_at_tcb_at) + apply (clarsimp simp add: valid_ep_def valid_pspace_def) + apply (drule(1) sym_refs_obj_atD[where P="\ko. ko = Endpoint e" for e]) + apply (fastforce simp: st_tcb_at_refs_of_rev elim: st_tcb_weakenE) + apply (auto simp: valid_ep'_def invs'_def valid_state'_def split: list.split)[1] + \ \RecvEP\ + apply (simp add: ep_relation_def) + apply (rule_tac corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setEndpoint_corres) + apply (simp add: ep_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres, simp) + apply simp + apply (clarsimp simp: valid_tcb_state_def invs_distinct) + apply (clarsimp simp add: valid_tcb_state'_def) + apply (wp get_simple_ko_wp[where f=Notification] getNotification_wp gbn_wp gbn_wp' + hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_if_lift + | wpc | simp add: ep_at_def2[symmetric, simplified] | clarsimp)+ + apply (clarsimp simp: valid_cap_def invs_psp_aligned invs_valid_objs pred_tcb_at_def + valid_obj_def valid_tcb_def valid_bound_ntfn_def invs_distinct + dest!: invs_valid_objs + elim!: obj_at_valid_objsE + split: option.splits) + apply clarsimp + apply (auto simp: valid_cap'_def invs_valid_pspace' valid_obj'_def valid_tcb'_def + valid_bound_ntfn'_def obj_at'_def pred_tcb_at'_def + dest!: invs_valid_objs' obj_at_valid_objs' + split: option.splits)[1] + done + +lemma receiveSignal_corres: + "\ is_ntfn_cap cap; cap_relation cap cap' \ \ + corres dc (invs and st_tcb_at active thread and valid_cap cap and ex_nonz_cap_to thread) + (invs' and tcb_at' thread and valid_cap' cap') + (receive_signal thread cap isBlocking) (receiveSignal thread cap' isBlocking)" + apply (simp add: receive_signal_def receiveSignal_def) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rename_tac word1 word2 rights) + apply (rule corres_guard_imp) + apply (rule_tac R="\rv. invs and tcb_at thread and st_tcb_at active thread and + ntfn_at word1 and ex_nonz_cap_to thread and + valid_ntfn rv and + obj_at (\k. k = Notification rv) word1" and + R'="\rv'. invs' and tcb_at' thread and ntfn_at' word1 and + valid_ntfn' rv'" + in corres_split[OF getNotification_corres]) + apply clarsimp + apply (case_tac "ntfn_obj rv") + \ \IdleNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (rule corres_guard_imp, rule doNBRecvFailedTransfer_corres; simp) + apply (clarsimp simp: invs_distinct) + apply simp + \ \WaitingNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (case_tac isBlocking; simp) + apply (rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (rule corres_guard_imp) + apply (rule doNBRecvFailedTransfer_corres; simp) + apply (clarsimp simp: invs_distinct)+ + \ \ActiveNtfn\ + apply (simp add: ntfn_relation_def) + apply (rule corres_guard_imp) + apply (simp add: badgeRegister_def badge_register_def) + apply (rule corres_split[OF asUser_setRegister_corres]) + apply (rule setNotification_corres) + apply (simp add: ntfn_relation_def) + apply wp+ + apply (fastforce simp: invs_def valid_state_def valid_pspace_def + elim!: st_tcb_weakenE) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def) + apply wp+ + apply (clarsimp simp add: ntfn_at_def2 valid_cap_def st_tcb_at_tcb_at) + apply (clarsimp simp add: valid_cap'_def) + done + +lemma tg_sp': + "\P\ threadGet f p \\t. obj_at' (\t'. f t' = t) p and P\" + including no_pre + apply (simp add: threadGet_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule getObject_tcb_sp) + apply clarsimp + apply (erule obj_at'_weakenE) + apply simp + done + +declare lookup_cap_valid' [wp] + +lemma sendFaultIPC_corres: + "valid_fault f \ fr f f' \ + corres (fr \ dc) + (einvs and st_tcb_at active thread and ex_nonz_cap_to thread) + (invs' and sch_act_not thread and tcb_at' thread) + (send_fault_ipc thread f) (sendFaultIPC thread f')" + apply (simp add: send_fault_ipc_def sendFaultIPC_def + liftE_bindE Let_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="\fh fh'. fh = to_bl fh'"]) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + apply simp + apply (rule corres_splitEE) + apply (rule corres_cap_fault) + apply (rule lookup_cap_corres, rule refl) + apply (rule_tac P="einvs and st_tcb_at active thread + and valid_cap handler_cap and ex_nonz_cap_to thread" + and P'="invs' and tcb_at' thread and sch_act_not thread + and valid_cap' handlerCap" + in corres_inst) + apply (case_tac handler_cap, + simp_all add: isCap_defs lookup_failure_map_def + case_bool_If If_rearrage + split del: if_split cong: if_cong)[1] + apply (rule corres_guard_imp) + apply (rule corres_if2 [OF refl]) + apply (simp add: dc_def[symmetric]) + apply (rule corres_split[OF threadset_corres sendIPC_corres], simp_all)[1] + apply (simp add: tcb_relation_def fault_rel_optionation_def exst_same_def)+ + apply (wp thread_set_invs_trivial thread_set_no_change_tcb_state + thread_set_typ_at ep_at_typ_at ex_nonz_cap_to_pres + thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched + | simp add: tcb_cap_cases_def)+ + apply ((wp threadSet_invs_trivial threadSet_tcb' + | simp add: tcb_cte_cases_def + | wp (once) sch_act_sane_lift)+)[1] + apply (rule corres_trivial, simp add: lookup_failure_map_def) + apply (clarsimp simp: st_tcb_at_tcb_at split: if_split) + apply (clarsimp simp: valid_cap_def invs_distinct) + apply (clarsimp simp: valid_cap'_def inQ_def) + apply auto[1] + apply (clarsimp simp: lookup_failure_map_def) + apply wp+ + apply (fastforce elim: st_tcb_at_tcb_at) + apply fastforce + done + +lemma gets_the_noop_corres: + assumes P: "\s. P s \ f s \ None" + shows "corres dc P P' (gets_the f) (return x)" + apply (clarsimp simp: corres_underlying_def gets_the_def + return_def gets_def bind_def get_def) + apply (clarsimp simp: assert_opt_def return_def dest!: P) + done + +lemma handleDoubleFault_corres: + "corres dc (tcb_at thread and pspace_aligned and pspace_distinct) + \ + (handle_double_fault thread f ft) + (handleDoubleFault thread f' ft')" + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce intro!: tcb_at_cross) + apply (simp add: handle_double_fault_def handleDoubleFault_def) + apply (rule corres_guard_imp) + apply (subst bind_return [symmetric], + rule corres_split[OF setThreadState_corres]) + apply simp + apply (rule corres_noop2) + apply (simp add: exs_valid_def return_def) + apply (rule hoare_eq_P) + apply wp + apply (rule asUser_inv) + apply (rule getRestartPC_inv) + apply (wp no_fail_getRestartPC)+ + apply (wp|simp)+ + done + +crunch tcb' [wp]: sendFaultIPC "tcb_at' t" (wp: crunch_wps) + +crunch typ_at'[wp]: receiveIPC "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemmas receiveIPC_typ_ats[wp] = typ_at_lifts [OF receiveIPC_typ_at'] + +crunch typ_at'[wp]: receiveSignal "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +lemmas receiveAIPC_typ_ats[wp] = typ_at_lifts [OF receiveSignal_typ_at'] + +crunch aligned'[wp]: setupCallerCap "pspace_aligned'" + (wp: crunch_wps) +crunch distinct'[wp]: setupCallerCap "pspace_distinct'" + (wp: crunch_wps) +crunch cur_tcb[wp]: setupCallerCap "cur_tcb'" + (wp: crunch_wps) + +lemma setupCallerCap_state_refs_of[wp]: + "\\s. P ((state_refs_of' s) (sender := {r \ state_refs_of' s sender. snd r = TCBBound}))\ + setupCallerCap sender rcvr grant + \\rv s. P (state_refs_of' s)\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def) + apply (wp hoare_drop_imps) + apply (simp add: fun_upd_def cong: if_cong) + done + +lemma setupCallerCap_state_hyp_refs_of[wp]: + "setupCallerCap sender rcvr canGrant \\s. P (state_hyp_refs_of' s)\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def getThreadReplySlot_def) + apply (wp hoare_drop_imps) + done + +lemma setCTE_valid_queues[wp]: + "\Invariants_H.valid_queues\ setCTE ptr val \\rv. Invariants_H.valid_queues\" + by (wp valid_queues_lift setCTE_pred_tcb_at') + +crunch vq[wp]: cteInsert "Invariants_H.valid_queues" + (wp: crunch_wps) + +crunch vq[wp]: getThreadCallerSlot "Invariants_H.valid_queues" + (wp: crunch_wps) + +crunch vq[wp]: getThreadReplySlot "Invariants_H.valid_queues" + (wp: crunch_wps) + +lemma setupCallerCap_vq[wp]: + "\Invariants_H.valid_queues and (\s. \p. send \ set (ksReadyQueues s p))\ + setupCallerCap send recv grant \\_. Invariants_H.valid_queues\" + apply (simp add: setupCallerCap_def) + apply (wp crunch_wps sts_valid_queues) + apply (fastforce simp: valid_queues_def obj_at'_def inQ_def) + done + +crunch vq'[wp]: setupCallerCap "valid_queues'" + (wp: crunch_wps) + +lemma is_derived_ReplyCap' [simp]: + "\m p g. is_derived' m p (capability.ReplyCap t False g) = + (\c. \ g. c = capability.ReplyCap t True g)" + apply (subst fun_eq_iff) + apply clarsimp + apply (case_tac x, simp_all add: is_derived'_def isCap_simps + badge_derived'_def + vs_cap_ref'_def) + done + +lemma unique_master_reply_cap': + "\c t. isReplyCap c \ capReplyMaster c \ capTCBPtr c = t \ + (\g . c = capability.ReplyCap t True g)" + by (fastforce simp: isCap_simps conj_comms) + +lemma getSlotCap_cte_wp_at: + "\\\ getSlotCap sl \\rv. cte_wp_at' (\c. cteCap c = rv) sl\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma setupCallerCap_vp[wp]: + "\valid_pspace' and tcb_at' sender and tcb_at' rcvr\ + setupCallerCap sender rcvr grant \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv getSlotCap_def) + apply (wp getCTE_wp) + apply (rule_tac Q="\_. valid_pspace' and + tcb_at' sender and tcb_at' rcvr" + in hoare_post_imp) + apply (clarsimp simp: valid_cap'_def o_def cte_wp_at_ctes_of isCap_simps + valid_pspace'_def) + apply (frule(1) ctes_of_valid', simp add: valid_cap'_def capAligned_def) + apply clarsimp + apply (wp | simp add: valid_pspace'_def valid_tcb_state'_def)+ + done + +declare haskell_assert_inv[wp del] + +lemma setupCallerCap_iflive[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' sender\ + setupCallerCap sender rcvr grant + \\rv. if_live_then_nonz_cap'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + by (wp getSlotCap_cte_wp_at + | simp add: unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+ + +lemma setupCallerCap_ifunsafe[wp]: + "\if_unsafe_then_cap' and valid_objs' and + ex_nonz_cap_to' rcvr and tcb_at' rcvr\ + setupCallerCap sender rcvr grant + \\rv. if_unsafe_then_cap'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + apply (wp getSlotCap_cte_wp_at + | simp add: unique_master_reply_cap' | strengthen eq_imp_strg + | wp (once) hoare_drop_imp[where f="getCTE rs" for rs])+ + apply (rule_tac Q="\rv. valid_objs' and tcb_at' rcvr and ex_nonz_cap_to' rcvr" + in hoare_post_imp) + apply (clarsimp simp: ex_nonz_tcb_cte_caps' tcbCallerSlot_def + objBits_def objBitsKO_def dom_def cte_level_bits_def) + apply (wp sts_valid_objs' | simp)+ + apply (clarsimp simp: valid_tcb_state'_def)+ + done + +lemma setupCallerCap_global_refs'[wp]: + "\valid_global_refs'\ + setupCallerCap sender rcvr grant + \\rv. valid_global_refs'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + apply (wp getSlotCap_cte_wp_at + | simp add: o_def unique_master_reply_cap' + | strengthen eq_imp_strg + | wp (once) getCTE_wp | clarsimp simp: cte_wp_at_ctes_of)+ + (* at setThreadState *) + apply (rule_tac Q="\_. valid_global_refs'" in hoare_post_imp, wpsimp+) + done + +crunch valid_arch'[wp]: setupCallerCap "valid_arch_state'" + (wp: hoare_drop_imps) + +crunch irq_node'[wp]: setupCallerCap "\s. P (irq_node' s)" + (wp: hoare_drop_imps) + +lemma setupCallerCap_irq_handlers'[wp]: + "\valid_irq_handlers'\ + setupCallerCap sender rcvr grant + \\rv. valid_irq_handlers'\" + unfolding setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def locateSlot_conv + by (wp hoare_drop_imps | simp)+ + +lemma cteInsert_cap_to': + "\ex_nonz_cap_to' p and cte_wp_at' (\c. cteCap c = NullCap) dest\ + cteInsert cap src dest + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteInsert_def ex_nonz_cap_to'_def + updateCap_def setUntypedCapAsFull_def + split del: if_split) + apply (rule hoare_pre, rule hoare_vcg_ex_lift) + apply (wp updateMDB_weak_cte_wp_at + setCTE_weak_cte_wp_at + | simp + | rule hoare_drop_imps)+ + apply (wp getCTE_wp) + apply clarsimp + apply (rule_tac x=cref in exI) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_ctes_of)+ + done + +crunch cap_to'[wp]: setExtraBadge "ex_nonz_cap_to' p" + +crunch cap_to'[wp]: doIPCTransfer "ex_nonz_cap_to' p" + (ignore: transferCapsToSlots + wp: crunch_wps transferCapsToSlots_pres2 cteInsert_cap_to' hoare_vcg_const_Ball_lift + simp: zipWithM_x_mapM ball_conj_distrib) + +lemma st_tcb_idle': + "\valid_idle' s; st_tcb_at' P t s\ \ + (t = ksIdleThread s) \ P IdleThreadState" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + +crunch idle'[wp]: getThreadCallerSlot "valid_idle'" +crunch idle'[wp]: getThreadReplySlot "valid_idle'" + +crunch it[wp]: setupCallerCap "\s. P (ksIdleThread s)" + (simp: updateObject_cte_inv wp: crunch_wps) + +lemma setupCallerCap_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. st \ ksIdleThread s \ rt \ ksIdleThread s)\ + setupCallerCap st rt gr + \\_. valid_idle'\" + by (simp add: setupCallerCap_def capRange_def | wp hoare_drop_imps)+ + +crunch it[wp]: setExtraBadge "\s. P (ksIdleThread s)" +crunch it[wp]: receiveIPC "\s. P (ksIdleThread s)" + (ignore: transferCapsToSlots + wp: transferCapsToSlots_pres2 crunch_wps hoare_vcg_const_Ball_lift + simp: crunch_simps ball_conj_distrib) + +crunch irq_states' [wp]: setupCallerCap valid_irq_states' + (wp: crunch_wps) + +crunch irqs_masked' [wp]: receiveIPC "irqs_masked'" + (wp: crunch_wps rule: irqs_masked_lift) + +crunch ct_not_inQ[wp]: getThreadCallerSlot "ct_not_inQ" +crunch ct_not_inQ[wp]: getThreadReplySlot "ct_not_inQ" + +lemma setupCallerCap_ct_not_inQ[wp]: + "\ct_not_inQ\ setupCallerCap sender receiver grant \\_. ct_not_inQ\" + apply (simp add: setupCallerCap_def) + apply (wp hoare_drop_imp setThreadState_ct_not_inQ) + done + +crunch ksQ'[wp]: copyMRs "\s. P (ksReadyQueues s)" + (wp: mapM_wp' hoare_drop_imps simp: crunch_simps) + +crunch ksQ[wp]: doIPCTransfer "\s. P (ksReadyQueues s)" + (wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +crunch ct'[wp]: doIPCTransfer "\s. P (ksCurThread s)" + (wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +lemma asUser_ct_not_inQ[wp]: + "\ct_not_inQ\ asUser t m \\rv. ct_not_inQ\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps threadSet_not_inQ | simp)+ + done + +crunch ct_not_inQ[wp]: copyMRs "ct_not_inQ" + (wp: mapM_wp' hoare_drop_imps simp: crunch_simps) + +crunch ct_not_inQ[wp]: doIPCTransfer "ct_not_inQ" + (ignore: transferCapsToSlots + wp: hoare_drop_imps hoare_vcg_split_case_option + mapM_wp' + simp: split_def zipWithM_x_mapM) + +lemma ntfn_q_refs_no_bound_refs': "rf : ntfn_q_refs_of' (ntfnObj ob) \ rf \ ntfn_bound_refs' (ntfnBoundTCB ob')" + by (auto simp add: ntfn_q_refs_of'_def ntfn_bound_refs'_def + split: Structures_H.ntfn.splits) + +lemma completeSignal_invs: + "\invs' and tcb_at' tcb\ + completeSignal ntfnptr tcb + \\_. invs'\" + apply (simp add: completeSignal_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp set_ntfn_minor_invs' | wpc | simp)+ + apply (rule_tac Q="\_ s. (state_refs_of' s ntfnptr = ntfn_bound_refs' (ntfnBoundTCB ntfn)) + \ ntfn_at' ntfnptr s + \ valid_ntfn' (ntfnObj_update (\_. Structures_H.ntfn.IdleNtfn) ntfn) s + \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) + \ ntfnptr \ ksIdleThread s" + in hoare_strengthen_post) + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] + apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def live'_def + split: option.splits) + apply (blast dest: ntfn_q_refs_no_bound_refs') + apply wp + apply (subgoal_tac "valid_ntfn' ntfn s") + apply (subgoal_tac "ntfnptr \ ksIdleThread s") + apply (fastforce simp: valid_ntfn'_def valid_bound_tcb'_def ko_at_state_refs_ofD' live'_def + elim: obj_at'_weakenE + if_live_then_nonz_capD'[OF invs_iflive' + obj_at'_real_def[THEN meta_eq_to_obj_eq, + THEN iffD1]]) + apply (fastforce simp: valid_idle'_def pred_tcb_at'_def obj_at'_def dest!: invs_valid_idle') + apply (fastforce dest: invs_valid_objs' ko_at_valid_objs' simp: valid_obj'_def) + done + +lemma setupCallerCap_urz[wp]: + "\untyped_ranges_zero' and valid_pspace' and tcb_at' sender\ + setupCallerCap sender t g \\rv. untyped_ranges_zero'\" + apply (simp add: setupCallerCap_def getSlotCap_def + getThreadCallerSlot_def getThreadReplySlot_def + locateSlot_conv) + apply (wp getCTE_wp') + apply (rule_tac Q="\_. untyped_ranges_zero' and valid_mdb' and valid_objs'" in hoare_post_imp) + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def untyped_derived_eq_def + isCap_simps) + apply (wp sts_valid_pspace_hangers) + apply (clarsimp simp: valid_tcb_state'_def) + done + +lemmas threadSet_urz = untyped_ranges_zero_lift[where f="cteCaps_of", OF _ threadSet_cteCaps_of] + +crunch urz[wp]: doIPCTransfer "untyped_ranges_zero'" + (ignore: threadSet wp: threadSet_urz crunch_wps simp: zipWithM_x_mapM) + +crunch gsUntypedZeroRanges[wp]: receiveIPC "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps transferCapsToSlots_pres1 simp: zipWithM_x_mapM ignore: constOnFailure) + +crunch ctes_of[wp]: possibleSwitchTo "\s. P (ctes_of s)" + (wp: crunch_wps ignore: constOnFailure) + +lemmas possibleSwitchToTo_cteCaps_of[wp] + = cteCaps_of_ctes_of_lift[OF possibleSwitchTo_ctes_of] + +crunches possibleSwitchTo + for ksArch[wp]: "\s. P (ksArchState s)" + (wp: possibleSwitchTo_ctes_of crunch_wps ignore: constOnFailure) + +(* t = ksCurThread s *) +lemma ri_invs' [wp]: + "\invs' and sch_act_not t + and ct_in_state' simple' + and st_tcb_at' simple' t + and (\s. \p. t \ set (ksReadyQueues s p)) + and ex_nonz_cap_to' t + and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s)\ + receiveIPC t cap isBlocking + \\_. invs'\" (is "\?pre\ _ \_\") + apply (clarsimp simp: receiveIPC_def) + apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (rule hoare_seq_ext [OF _ gbn_sp']) + apply (rule hoare_seq_ext) + (* set up precondition for old proof *) + apply (rule_tac R="ko_at' ep (capEPPtr cap) and ?pre" in hoare_vcg_if_split) + apply (wp completeSignal_invs) + apply (case_tac ep) + \ \endpoint = RecvEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wpc, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp sts_sch_act' hoare_vcg_const_Ball_lift valid_irq_node_lift + sts_valid_queues setThreadState_ct_not_inQ + asUser_urz + | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at' o_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (frule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: st_tcb_at_refs_of_rev' valid_ep'_def + valid_obj'_def tcb_bound_refs'_def + dest!: isCapDs) + apply (rule conjI, clarsimp) + apply (drule (1) bspec) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD') + apply (clarsimp simp: set_eq_subset) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply ((case_tac tp; fastforce elim: nonempty_cross_distinct_singleton_elim)+)[2] + apply (clarsimp split: if_split_asm) + apply (fastforce simp: valid_pspace'_def global'_no_ex_cap idle'_not_queued) + \ \endpoint = IdleEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wpc, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp sts_sch_act' valid_irq_node_lift + sts_valid_queues setThreadState_ct_not_inQ + asUser_urz + | simp add: doNBRecvFailedTransfer_def cteCaps_of_def)+ + apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def o_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "t \ capEPPtr cap") + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule ko_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (clarsimp simp: tcb_bound_refs'_def + dest: symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: global'_no_ex_cap) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + \ \endpoint = SendEP\ + apply (simp add: invs'_def valid_state'_def) + apply (rename_tac list) + apply (case_tac list, simp_all split del: if_split) + apply (rename_tac sender queue) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift hoare_drop_imps setEndpoint_valid_mdb' + set_ep_valid_objs' sts_st_tcb' sts_sch_act' sts_valid_queues + setThreadState_ct_not_inQ possibleSwitchTo_valid_queues + possibleSwitchTo_valid_queues' + possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift + setEndpoint_ksQ setEndpoint_ct' + | simp add: valid_tcb_state'_def case_bool_If + case_option_If + split del: if_split cong: if_cong + | wp (once) sch_act_sane_lift hoare_vcg_conj_lift hoare_vcg_all_lift + untyped_ranges_zero_lift)+ + apply (clarsimp split del: if_split simp: pred_tcb_at') + apply (frule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def st_tcb_at_refs_of_rev' conj_ac + split del: if_split + cong: if_cong) + apply (frule_tac t=sender in valid_queues_not_runnable'_not_ksQ) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (subgoal_tac "sch_act_not sender s") + prefer 2 + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (drule st_tcb_at_state_refs_ofD') + apply (simp only: conj_ac(1, 2)[where Q="sym_refs R" for R]) + apply (subgoal_tac "distinct (ksIdleThread s # capEPPtr cap # t # sender # queue)") + apply (rule conjI) + apply (clarsimp simp: ep_redux_simps' cong: if_cong) + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def + dest: symreftype_inverse' + split: if_split_asm) + apply (clarsimp simp: singleton_tuple_cartesian split: list.split + | rule conjI | drule(1) bspec + | drule st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD' + | clarsimp elim!: if_live_state_refsE)+ + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (clarsimp simp: global'_no_ex_cap) + apply (rule conjI + | clarsimp simp: singleton_tuple_cartesian split: list.split + | clarsimp elim!: if_live_state_refsE + | clarsimp simp: global'_no_ex_cap idle'_not_queued' idle'_no_refs tcb_bound_refs'_def + | drule(1) bspec | drule st_tcb_at_state_refs_ofD' + | clarsimp simp: set_eq_subset dest!: bound_tcb_at_state_refs_ofD' )+ + apply (rule hoare_pre) + apply (wp getNotification_wp | wpc | clarsimp)+ + done + +(* t = ksCurThread s *) +lemma rai_invs'[wp]: + "\invs' and sch_act_not t + and st_tcb_at' simple' t + and (\s. \p. t \ set (ksReadyQueues s p)) + and ex_nonz_cap_to' t + and (\s. \r \ zobj_refs' cap. ex_nonz_cap_to' r s) + and (\s. \ntfnptr. isNotificationCap cap + \ capNtfnPtr cap = ntfnptr + \ obj_at' (\ko. ntfnBoundTCB ko = None \ ntfnBoundTCB ko = Some t) + ntfnptr s)\ + receiveSignal t cap isBlocking + \\_. invs'\" + apply (simp add: receiveSignal_def) + apply (rule hoare_seq_ext [OF _ get_ntfn_sp']) + apply (rename_tac ep) + apply (case_tac "ntfnObj ep") + \ \ep = IdleNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_sch_act' typ_at_lifts + sts_valid_queues setThreadState_ct_not_inQ + asUser_urz + | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ + apply (clarsimp simp: pred_tcb_at' valid_tcb_state'_def) + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "capNtfnPtr cap \ t") + apply (frule valid_pspace_valid_objs') + apply (frule (1) ko_at_valid_objs') + apply clarsimp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def) + apply (rule conjI, clarsimp simp: obj_at'_def split: option.split) + apply (drule simple_st_tcb_at_state_refs_ofD' + ko_at_state_refs_ofD' bound_tcb_at_state_refs_ofD')+ + apply (clarsimp dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm) + apply (clarsimp dest!: global'_no_ex_cap) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + \ \ep = ActiveNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp + asUser_urz + | simp add: valid_ntfn'_def)+ + apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) + apply (frule (1) ko_at_valid_objs') + apply clarsimp + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def isCap_simps) + apply (drule simple_st_tcb_at_state_refs_ofD' + ko_at_state_refs_ofD')+ + apply (erule delta_sym_refs) + apply (clarsimp split: if_split_asm simp: global'_no_ex_cap)+ + \ \ep = WaitingNtfn\ + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' + sts_valid_queues setThreadState_ct_not_inQ typ_at_lifts + asUser_urz + | simp add: valid_ntfn'_def doNBRecvFailedTransfer_def live'_def | wpc)+ + apply (clarsimp simp: valid_tcb_state'_def) + apply (frule_tac t=t in not_in_ntfnQueue) + apply (simp) + apply (simp) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (frule ko_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply simp + apply (clarsimp simp: valid_obj'_def) + apply (clarsimp simp: valid_ntfn'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (rule conjI, clarsimp simp: obj_at'_def split: option.split) + apply (drule(1) sym_refs_ko_atD') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: st_tcb_at_refs_of_rev' + dest!: isCapDs) + apply (rule conjI, erule delta_sym_refs) + apply (clarsimp split: if_split_asm) + apply (rename_tac list one two three four five six seven eight nine) + apply (subgoal_tac "set list \ {NTFNSignal} \ {}") + apply safe[1] + apply (auto simp: symreftype_inverse' ntfn_bound_refs'_def tcb_bound_refs'_def)[5] + apply (fastforce simp: tcb_bound_refs'_def + split: if_split_asm) + apply (clarsimp dest!: global'_no_ex_cap) + done + +lemma getCTE_cap_to_refs[wp]: + "\\\ getCTE p \\rv s. \r\zobj_refs' (cteCap rv). ex_nonz_cap_to' r s\" + apply (rule hoare_strengthen_post [OF getCTE_sp]) + apply (clarsimp simp: ex_nonz_cap_to'_def) + apply (fastforce elim: cte_wp_at_weakenE') + done + +lemma lookupCap_cap_to_refs[wp]: + "\\\ lookupCap t cref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" + apply (simp add: lookupCap_def lookupCapAndSlot_def split_def + getSlotCap_def) + apply (wp | simp)+ + done + +crunches setVMRoot + for valid_objs'[wp]: valid_objs' + (wp: getASID_wp crunch_wps simp: getPoolPtr_def) + +lemma arch_stt_objs' [wp]: + "\valid_objs'\ Arch.switchToThread t \\rv. valid_objs'\" + apply (simp add: AARCH64_H.switchToThread_def) + apply wp + done + +lemma possibleSwitchTo_sch_act_not: + "\sch_act_not t' and K (t \ t')\ possibleSwitchTo t \\rv. sch_act_not t'\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def curDomain_def) + apply (wp hoare_drop_imps | wpc | simp)+ + done + +crunch urz[wp]: possibleSwitchTo "untyped_ranges_zero'" + (simp: crunch_simps unless_def wp: crunch_wps) + +declare zipWithM_x_mapM[simp] (* FIXME AARCH64: remove? *) + +lemma si_invs'[wp]: + "\invs' and st_tcb_at' simple' t + and (\s. \p. t \ set (ksReadyQueues s p)) + and sch_act_not t + and ex_nonz_cap_to' ep and ex_nonz_cap_to' t\ + sendIPC bl call ba cg cgr t ep + \\rv. invs'\" + supply if_split[split del] + apply (simp add: sendIPC_def) + apply (rule hoare_seq_ext [OF _ get_ep_sp']) + apply (case_tac epa) + \ \epa = RecvEP\ + apply simp + apply (rename_tac list) + apply (case_tac list) + apply simp + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (rule_tac P="a\t" in hoare_gen_asm) + apply (wp valid_irq_node_lift + sts_valid_objs' set_ep_valid_objs' setEndpoint_valid_mdb' sts_st_tcb' sts_sch_act' + possibleSwitchTo_sch_act_not sts_valid_queues setThreadState_ct_not_inQ + possibleSwitchTo_ksQ' possibleSwitchTo_ct_not_inQ hoare_vcg_all_lift sts_ksQ' + hoare_convert_imp [OF doIPCTransfer_sch_act doIPCTransfer_ct'] + hoare_convert_imp [OF setEndpoint_nosch setEndpoint_ct'] + hoare_drop_imp [where f="threadGet tcbFault t"] + | rule_tac f="getThreadState a" in hoare_drop_imp + | wp (once) hoare_drop_imp[where R="\_ _. call"] + hoare_drop_imp[where R="\_ _. \ call"] + hoare_drop_imp[where R="\_ _. cg"] + | simp add: valid_tcb_state'_def case_bool_If + case_option_If + cong: if_cong + | wp (once) sch_act_sane_lift tcb_in_cur_domain'_lift hoare_vcg_const_imp_lift)+ + apply (clarsimp simp: pred_tcb_at' cong: conj_cong imp_cong) + apply (frule obj_at_valid_objs', clarsimp) + apply (frule(1) sym_refs_ko_atD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def + st_tcb_at_refs_of_rev' pred_tcb_at' + conj_comms fun_upd_def[symmetric]) + apply (frule pred_tcb_at') + apply (drule simple_st_tcb_at_state_refs_ofD' st_tcb_at_state_refs_ofD')+ + apply (clarsimp simp: valid_pspace'_splits) + apply (subst fun_upd_idem[where x=t]) + apply (clarsimp split: if_split) + apply (rule conjI, clarsimp simp: obj_at'_def) + apply (drule bound_tcb_at_state_refs_ofD') + apply (fastforce simp: tcb_bound_refs'_def) + apply (subgoal_tac "ex_nonz_cap_to' a s") + prefer 2 + apply (clarsimp elim!: if_live_state_refsE) + apply clarsimp + apply (rule conjI) + apply (drule bound_tcb_at_state_refs_ofD') + apply (fastforce simp: tcb_bound_refs'_def set_eq_subset) + apply (clarsimp simp: conj_ac) + apply (rule conjI, clarsimp simp: idle'_no_refs) + apply (rule conjI, clarsimp simp: global'_no_ex_cap) + apply (rule conjI) + apply (rule impI) + apply (frule(1) ct_not_in_epQueue, clarsimp, clarsimp) + apply (clarsimp) + apply (simp add: ep_redux_simps') + apply (rule conjI, clarsimp split: if_split) + apply (rule conjI, fastforce simp: tcb_bound_refs'_def set_eq_subset) + apply (clarsimp, erule delta_sym_refs; + solves\auto simp: symreftype_inverse' tcb_bound_refs'_def split: if_split_asm\) + apply (solves\clarsimp split: list.splits\) + \ \epa = IdleEP\ + apply (cases bl) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues + setThreadState_ct_not_inQ) + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (subgoal_tac "ep \ t") + apply (drule simple_st_tcb_at_state_refs_ofD' ko_at_state_refs_ofD' + bound_tcb_at_state_refs_ofD')+ + apply (rule conjI, erule delta_sym_refs) + apply (auto simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm)[2] + apply (fastforce simp: global'_no_ex_cap) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply simp + apply wp + apply simp + \ \epa = SendEP\ + apply (cases bl) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre, wp valid_irq_node_lift) + apply (simp add: valid_ep'_def) + apply (wp hoare_vcg_const_Ball_lift valid_irq_node_lift sts_sch_act' + sts_valid_queues setThreadState_ct_not_inQ) + apply (clarsimp simp: valid_tcb_state'_def pred_tcb_at') + apply (rule conjI, clarsimp elim!: obj_at'_weakenE) + apply (frule obj_at_valid_objs', clarsimp) + apply (frule(1) sym_refs_ko_atD') + apply (frule pred_tcb_at') + apply (drule simple_st_tcb_at_state_refs_ofD') + apply (drule bound_tcb_at_state_refs_ofD') + apply (clarsimp simp: valid_obj'_def valid_ep'_def st_tcb_at_refs_of_rev') + apply (rule conjI, clarsimp) + apply (drule (1) bspec) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' bound_tcb_at_state_refs_ofD' + simp: tcb_bound_refs'_def) + apply (clarsimp simp: set_eq_subset) + apply (rule conjI, erule delta_sym_refs) + subgoal by (fastforce simp: obj_at'_def symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: tcb_bound_refs'_def symreftype_inverse' + split: if_split_asm) + apply (fastforce simp: global'_no_ex_cap idle'_not_queued) + apply (simp | wp)+ + done + +lemma sfi_invs_plus': + "\invs' and st_tcb_at' simple' t + and sch_act_not t + and (\s. \p. t \ set (ksReadyQueues s p)) + and ex_nonz_cap_to' t\ + sendFaultIPC t f + \\rv. invs'\, \\rv. invs' and st_tcb_at' simple' t + and (\s. \p. t \ set (ksReadyQueues s p)) + and sch_act_not t and (\s. ksIdleThread s \ t)\" + apply (simp add: sendFaultIPC_def) + apply (wp threadSet_invs_trivial threadSet_pred_tcb_no_state + threadSet_cap_to' + | wpc | simp)+ + apply (rule_tac Q'="\rv s. invs' s \ sch_act_not t s + \ st_tcb_at' simple' t s + \ (\p. t \ set (ksReadyQueues s p)) + \ ex_nonz_cap_to' t s + \ t \ ksIdleThread s + \ (\r\zobj_refs' rv. ex_nonz_cap_to' r s)" + in hoare_post_imp_R) + apply wp + apply (clarsimp simp: inQ_def pred_tcb_at') + apply (wp | simp)+ + apply (clarsimp simp: eq_commute) + apply (subst(asm) global'_no_ex_cap, auto) + done + +crunches send_fault_ipc + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps wp: crunch_wps) + +lemma handleFault_corres: + "fr f f' \ + corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread + and (\_. valid_fault f)) + (invs' and sch_act_not thread + and (\s. \p. thread \ set(ksReadyQueues s p)) + and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) + (handle_fault thread f) (handleFault thread f')" + apply (simp add: handle_fault_def handleFault_def) + apply (rule corres_guard_imp) + apply (subst return_bind [symmetric], + rule corres_split[where P="tcb_at thread", + OF gets_the_noop_corres [where x="()"]]) + apply (simp add: tcb_at_def) + apply (rule corres_split_catch) + apply (rule_tac F="valid_fault f" in corres_gen_asm) + apply (rule sendFaultIPC_corres, assumption) + apply simp + apply (rule handleDoubleFault_corres) + apply wpsimp+ + apply (clarsimp simp: st_tcb_at_tcb_at st_tcb_def2 invs_def valid_state_def valid_idle_def) + apply auto + done + +lemma sts_invs_minor'': + "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st + \ (st \ Inactive \ \ idle' st \ + st' \ Inactive \ \ idle' st')) t + and (\s. t = ksIdleThread s \ idle' st) + and (\s. (\p. t \ set (ksReadyQueues s p)) \ runnable' st) + and (\s. runnable' st \ obj_at' tcbQueued t s + \ st_tcb_at' runnable' t s) + and (\s. \ runnable' st \ sch_act_not t s) + and invs'\ + setThreadState st t + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp valid_irq_node_lift sts_sch_act' sts_valid_queues + setThreadState_ct_not_inQ) + apply clarsimp + apply (rule conjI) + apply fastforce + apply (rule conjI) + apply (clarsimp simp: pred_tcb_at'_def) + apply (drule obj_at_valid_objs') + apply (clarsimp simp: valid_pspace'_def) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def) + subgoal by (cases st, auto simp: valid_tcb_state'_def + split: Structures_H.thread_state.splits)[1] + apply (rule conjI) + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + done + +lemma hf_invs' [wp]: + "\invs' and sch_act_not t + and (\s. \p. t \ set(ksReadyQueues s p)) + and st_tcb_at' simple' t + and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ + handleFault t f \\r. invs'\" + apply (simp add: handleFault_def) + apply wp + apply (simp add: handleDoubleFault_def) + apply (wp sts_invs_minor'' dmo_invs')+ + apply (rule hoare_post_impErr, rule sfi_invs_plus', + simp_all) + apply (strengthen no_refs_simple_strg') + apply clarsimp + done + +declare zipWithM_x_mapM [simp del] + +lemma gts_st_tcb': + "\\\ getThreadState t \\r. st_tcb_at' (\st. st = r) t\" + apply (rule hoare_strengthen_post) + apply (rule gts_sp') + apply simp + done + +declare setEndpoint_ct' [wp] + +lemma setupCallerCap_pred_tcb_unchanged: + "\pred_tcb_at' proj P t and K (t \ t')\ + setupCallerCap t' t'' g + \\rv. pred_tcb_at' proj P t\" + apply (simp add: setupCallerCap_def getThreadCallerSlot_def + getThreadReplySlot_def) + apply (wp sts_pred_tcb_neq' hoare_drop_imps) + apply clarsimp + done + +lemma si_blk_makes_simple': + "\st_tcb_at' simple' t and K (t \ t')\ + sendIPC True call bdg x x' t' ep + \\rv. st_tcb_at' simple' t\" + apply (simp add: sendIPC_def) + apply (rule hoare_seq_ext [OF _ get_ep_inv']) + apply (case_tac xa, simp_all) + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_drop_imps) + apply (clarsimp simp: pred_tcb_at' del: disjCI) + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemma si_blk_makes_runnable': + "\st_tcb_at' runnable' t and K (t \ t')\ + sendIPC True call bdg x x' t' ep + \\rv. st_tcb_at' runnable' t\" + apply (simp add: sendIPC_def) + apply (rule hoare_seq_ext [OF _ get_ep_inv']) + apply (case_tac xa, simp_all) + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_vcg_const_imp_lift hoare_drop_imps + | simp)+ + apply (clarsimp del: disjCI simp: pred_tcb_at' elim!: pred_tcb'_weakenE) + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + apply (wp sts_st_tcb_at'_cases) + apply clarsimp + done + +lemma sfi_makes_simple': + "\st_tcb_at' simple' t and K (t \ t')\ + sendFaultIPC t' ft + \\rv. st_tcb_at' simple' t\" + apply (rule hoare_gen_asm) + apply (simp add: sendFaultIPC_def + cong: if_cong capability.case_cong bool.case_cong) + apply (wpsimp wp: si_blk_makes_simple' threadSet_pred_tcb_no_state hoare_drop_imps + hoare_vcg_all_lift_R) + done + +lemma sfi_makes_runnable': + "\st_tcb_at' runnable' t and K (t \ t')\ + sendFaultIPC t' ft + \\rv. st_tcb_at' runnable' t\" + apply (rule hoare_gen_asm) + apply (simp add: sendFaultIPC_def + cong: if_cong capability.case_cong bool.case_cong) + apply (wpsimp wp: si_blk_makes_runnable' threadSet_pred_tcb_no_state hoare_drop_imps + hoare_vcg_all_lift_R) + done + +lemma hf_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + handleFault t ft + \\rv. st_tcb_at' P t'\" + apply (safe intro!: hoare_gen_asm) + apply (simp_all add: handleFault_def handleDoubleFault_def) + apply (wp sfi_makes_runnable' sfi_makes_simple' sts_st_tcb_at'_cases + | simp add: handleDoubleFault_def)+ + done + +crunches possibleSwitchTo, completeSignal + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + +lemma ri_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + receiveIPC t cap isBlocking + \\rv. st_tcb_at' P t'\" + including no_pre + apply (rule hoare_gen_asm)+ + apply (simp add: receiveIPC_def) + apply (case_tac cap, simp_all add: isEndpointCap_def) + apply (rule hoare_seq_ext [OF _ get_ep_inv']) + apply (rule hoare_seq_ext [OF _ gbn_sp']) + apply wp + apply (rename_tac ep q r) + apply (case_tac ep, simp_all) + apply (wp sts_st_tcb_at'_cases | wpc | simp add: doNBRecvFailedTransfer_def)+ + apply (rename_tac list) + apply (case_tac list, simp_all add: case_bool_If case_option_If + split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp sts_st_tcb_at'_cases setupCallerCap_pred_tcb_unchanged + hoare_vcg_const_imp_lift)+ + apply (simp, simp only: imp_conv_disj) + apply (wp hoare_vcg_disj_lift)+ + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (fastforce simp: pred_tcb_at'_def obj_at'_def isSend_def + split: Structures_H.thread_state.split_asm) + apply (rule hoare_pre) + apply wpsimp+ + done + +lemma rai_makes_runnable_simple': + "\st_tcb_at' P t' and K (t \ t') and K (P = runnable' \ P = simple')\ + receiveSignal t cap isBlocking + \\rv. st_tcb_at' P t'\" + apply (rule hoare_gen_asm) + apply (simp add: receiveSignal_def) + apply (rule hoare_pre) + by (wp sts_st_tcb_at'_cases getNotification_wp | wpc | simp add: doNBRecvFailedTransfer_def)+ + +lemma sendSignal_st_tcb'_Running: + "\st_tcb_at' (\st. st = Running \ P st) t\ + sendSignal ntfnptr bdg + \\_. st_tcb_at' (\st. st = Running \ P st) t\" + apply (simp add: sendSignal_def) + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp + | wpc | clarsimp simp: pred_tcb_at')+ + done + +lemma sai_st_tcb': + "\st_tcb_at' P t and K (P Running)\ + sendSignal ntfn bdg + \\rv. st_tcb_at' P t\" + apply (rule hoare_gen_asm) + apply (subgoal_tac "\Q. P = (\st. st = Running \ Q st)") + apply (clarsimp intro!: sendSignal_st_tcb'_Running) + apply (fastforce intro!: exI[where x=P]) + done + +end + +end diff --git a/proof/refine/AARCH64/KHeap_R.thy b/proof/refine/AARCH64/KHeap_R.thy new file mode 100644 index 0000000000..2ede9e72a9 --- /dev/null +++ b/proof/refine/AARCH64/KHeap_R.thy @@ -0,0 +1,2237 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory KHeap_R +imports + Machine_R +begin + +lemma lookupAround2_known1: + "m x = Some y \ fst (lookupAround2 x m) = Some (x, y)" + by (fastforce simp: lookupAround2_char1) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma obj_at_getObject: + assumes R: + "\a b n ko s obj::'a::pspace_storable. + \ (a, b) \ fst (loadObject t t n ko s); projectKO_opt ko = Some obj \ \ a = obj" + shows "\obj_at' P t\ getObject t \\(rv::'a::pspace_storable) s. P rv\" + by (auto simp: getObject_def obj_at'_def in_monad valid_def + split_def lookupAround2_known1 + dest: R) + +declare projectKO_inv [wp] + +lemma loadObject_default_inv: + "\P\ loadObject_default addr addr' next obj \\rv. P\" + apply (simp add: loadObject_default_def magnitudeCheck_def + alignCheck_def unless_def alignError_def + | wp hoare_vcg_split_case_option + hoare_drop_imps hoare_vcg_all_lift)+ + done + +lemma getObject_inv: + assumes x: "\p q n ko. \P\ loadObject p q n ko \\(rv :: 'a :: pspace_storable). P\" + shows "\P\ getObject p \\(rv :: 'a :: pspace_storable). P\" + by (simp add: getObject_def split_def | wp x)+ + +lemma getObject_inv_vcpu [wp]: "\P\ getObject l \\(rv :: ArchStructures_H.vcpu). P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done + +lemma getObject_inv_tcb [wp]: "\P\ getObject l \\(rv :: Structures_H.tcb). P\" + apply (rule getObject_inv) + apply simp + apply (rule loadObject_default_inv) + done +end + +(* FIXME: this should go somewhere in spec *) +translations + (type) "'a kernel" <=(type) "kernel_state \ ('a \ kernel_state) set \ bool" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma no_fail_loadObject_default [wp]: + "no_fail (\s. \obj. projectKO_opt ko = Some (obj::'a) \ + is_aligned p (objBits obj) \ q = p + \ case_option True (\x. 2 ^ (objBits obj) \ x - p) n) + (loadObject_default p q n ko :: ('a::pre_storable) kernel)" + apply (simp add: loadObject_default_def split_def projectKO_def + alignCheck_def alignError_def magnitudeCheck_def + unless_def) + apply (rule no_fail_pre) + apply (wp case_option_wp) + apply (clarsimp simp: is_aligned_mask) + apply (clarsimp split: option.split_asm) + apply (clarsimp simp: is_aligned_mask[symmetric]) + apply simp + done + +lemma no_fail_getObject_tcb [wp]: + "no_fail (tcb_at' t) (getObject t :: tcb kernel)" + apply (simp add: getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' + cong: conj_cong) + apply (rule ps_clear_lookupAround2, assumption+) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply simp + apply (fastforce split: option.split_asm simp: objBits_simps') + done + +lemma typ_at_to_obj_at': + "typ_at' (koType (TYPE ('a :: pspace_storable))) p s + = obj_at' (\ :: 'a \ bool) p s" + by (simp add: typ_at'_def obj_at'_real_def project_koType[symmetric]) + +lemmas typ_at_to_obj_at_arches + = typ_at_to_obj_at'[where 'a=pte, simplified] + typ_at_to_obj_at'[where 'a=asidpool, simplified] + typ_at_to_obj_at'[where 'a=user_data, simplified] + typ_at_to_obj_at'[where 'a=user_data_device, simplified] + typ_at_to_obj_at'[where 'a=vcpu, simplified] + +lemmas page_table_at_obj_at' + = page_table_at'_def[unfolded typ_at_to_obj_at_arches] + +lemma no_fail_getObject_vcpu[wp]: "no_fail (vcpu_at' vcpu) (getObject vcpu :: vcpu kernel)" + apply (simp add: getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps typ_at_to_obj_at_arches + cong: conj_cong option.case_cong) + apply (rule ps_clear_lookupAround2; assumption?) + apply simp + apply (erule is_aligned_no_overflow) + apply clarsimp + done + +lemma vcpu_at_ko: "typ_at (AArch AVCPU) p s \ \vcpu. ko_at (ArchObj (arch_kernel_obj.VCPU vcpu)) p s " + by (clarsimp simp add: obj_at_def) + +lemma corres_get_tcb: + "corres (tcb_relation \ the) (tcb_at t) (tcb_at' t) (gets (get_tcb t)) (getObject t)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp add: gets_def get_def return_def bind_def get_tcb_def) + apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) + apply (clarsimp simp add: obj_at_def is_tcb obj_at'_def projectKO_def + projectKO_opt_tcb split_def + getObject_def loadObject_default_def in_monad) + apply (case_tac koa) + apply (simp_all add: fail_def return_def) + apply (case_tac bb) + apply (simp_all add: fail_def return_def) + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply clarsimp + apply blast + apply (clarsimp simp add: other_obj_relation_def + lookupAround2_known1) + done + +lemma lookupAround2_same1[simp]: + "(fst (lookupAround2 x s) = Some (x, y)) = (s x = Some y)" + apply (rule iffI) + apply (simp add: lookupAround2_char1) + apply (simp add: lookupAround2_known1) + done + + (* If we ever copy this: consider lifting Haskell precondition to \ here first. Not strictly + necessary since the rest of the proofs manage to lift later, but might be more convenient + for new proofs. *) +lemma getObject_vcpu_corres: + "corres vcpu_relation (vcpu_at vcpu) (vcpu_at' vcpu) + (get_vcpu vcpu) (getObject vcpu)" + apply (simp add: getObject_def get_vcpu_def get_object_def split_def) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object, simp_all)[1] + apply (clarsimp simp: lookupAround2_known1) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (erule (1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp: objBits_simps split: option.split) + apply (clarsimp simp: in_monad loadObject_default_def) + apply (simp add: bind_assoc exec_gets gets_map_def assert_opt_def fail_def split: option.split) + apply (drule vcpu_at_ko) + apply (clarsimp simp: obj_at_def in_omonad) + apply (simp add: return_def) + apply (simp add: in_magnitude_check objBits_simps pageBits_def) + apply (frule in_inv_by_hoareD [OF magnitudeCheck_inv]) + apply (clarsimp simp: state_relation_def pspace_relation_def) + apply (drule bspec, blast) + apply (clarsimp simp: other_obj_relation_def) + done + +lemma getObject_tcb_at': + "\ \ \ getObject t \\r::tcb. tcb_at' t\" + by (clarsimp simp: valid_def getObject_def in_monad + loadObject_default_def obj_at'_def split_def + in_magnitude_check objBits_simps') + +text \updateObject_cte lemmas\ + +lemma koType_objBitsKO: + "koTypeOf k = koTypeOf k' \ objBitsKO k = objBitsKO k'" + by (auto simp: objBitsKO_def archObjSize_def + split: kernel_object.splits arch_kernel_object.splits) + +lemma updateObject_objBitsKO: + "(ko', t') \ fst (updateObject (val :: 'a :: pspace_storable) ko p q n t) + \ objBitsKO ko' = objBitsKO ko" + apply (drule updateObject_type) + apply (erule koType_objBitsKO) + done + +lemma updateObject_cte_is_tcb_or_cte: + fixes cte :: cte and ptr :: machine_word + shows "\ fst (lookupAround2 p (ksPSpace s)) = Some (q, ko); + snd (lookupAround2 p (ksPSpace s)) = n; + (ko', s') \ fst (updateObject cte ko p q n s) \ \ + (\tcb getF setF. ko = KOTCB tcb \ s' = s \ tcb_cte_cases (p - q) = Some (getF, setF) + \ ko' = KOTCB (setF (\x. cte) tcb) \ is_aligned q tcbBlockSizeBits \ ps_clear q tcbBlockSizeBits s) \ + (\cte'. ko = KOCTE cte' \ ko' = KOCTE cte \ s' = s + \ p = q \ is_aligned p cte_level_bits \ ps_clear p cte_level_bits s)" + apply (clarsimp simp: updateObject_cte typeError_def alignError_def + tcbVTableSlot_def tcbCTableSlot_def to_bl_1 rev_take objBits_simps' + in_monad map_bits_to_bl cte_level_bits_def in_magnitude_check + lookupAround2_char1 + split: kernel_object.splits) + apply (subst(asm) in_magnitude_check3, simp+) + apply (simp add: in_monad tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + split: if_split_asm) + apply (simp add: in_monad tcbCTableSlot_def tcbVTableSlot_def + tcbReplySlot_def tcbCallerSlot_def tcbIPCBufferSlot_def + split: if_split_asm) + done + +declare plus_1_less[simp] + +lemma updateObject_default_result: + "(x, s'') \ fst (updateObject_default e ko p q n s) \ x = injectKO e" + by (clarsimp simp add: updateObject_default_def in_monad) + +lemma obj_at_setObject1: + assumes R: "\(v::'a::pspace_storable) p q n ko s x s''. + (x, s'') \ fst (updateObject v ko p q n s) \ x = injectKO v" + assumes Q: "\(v::'a::pspace_storable) (v'::'a). objBits v = objBits v'" + shows + "\ obj_at' (\x::'a::pspace_storable. True) t \ + setObject p (v::'a::pspace_storable) + \ \rv. obj_at' (\x::'a::pspace_storable. True) t \" + apply (simp add: setObject_def split_def) + apply (rule hoare_seq_ext [OF _ hoare_gets_sp]) + apply (clarsimp simp: valid_def in_monad obj_at'_def lookupAround2_char1 project_inject + dest!: R) + apply (subgoal_tac "objBitsKO (injectKO v) = objBitsKO (injectKO obj)") + apply (intro conjI impI, simp_all) + apply fastforce+ + apply (fold objBits_def) + apply (rule Q) + done + +lemma obj_at_setObject2: + fixes v :: "'a::pspace_storable" + fixes P :: "'b::pspace_storable \ bool" + assumes R: "\ko s' (v :: 'a) oko x y n s. (ko, s') \ fst (updateObject v oko x y n s) + \ koTypeOf ko \ koType TYPE('b)" + shows + "\ obj_at' P t \ + setObject p (v::'a) + \ \rv. obj_at' P t \" + apply (simp add: setObject_def split_def) + apply (rule hoare_seq_ext [OF _ hoare_gets_sp]) + apply (clarsimp simp: valid_def in_monad) + apply (frule updateObject_type) + apply (drule R) + apply (clarsimp simp: obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: lookupAround2_char1) + apply (drule iffD1 [OF project_koType, OF exI]) + apply simp + apply (clarsimp simp: ps_clear_upd lookupAround2_char1) + done + +lemma updateObject_ep_eta: + "updateObject (v :: endpoint) = updateObject_default v" + by ((rule ext)+, simp) + +lemma updateObject_tcb_eta: + "updateObject (v :: tcb) = updateObject_default v" + by ((rule ext)+, simp) + +lemma updateObject_ntfn_eta: + "updateObject (v :: Structures_H.notification) = updateObject_default v" + by ((rule ext)+, simp) + +lemmas updateObject_eta = + updateObject_ep_eta updateObject_tcb_eta updateObject_ntfn_eta + +lemma objBits_type: + "koTypeOf k = koTypeOf k' \ objBitsKO k = objBitsKO k'" + by (erule koType_objBitsKO) + +lemma setObject_typ_at_inv: + "\typ_at' T p'\ setObject p v \\r. typ_at' T p'\" + apply (clarsimp simp: setObject_def split_def) + apply (clarsimp simp: valid_def typ_at'_def ko_wp_at'_def in_monad + lookupAround2_char1 ps_clear_upd) + apply (drule updateObject_type) + apply clarsimp + apply (drule objBits_type) + apply (simp add: ps_clear_upd) + done + +lemma setObject_typ_at_not: + "\\s. \ (typ_at' T p' s)\ setObject p v \\r s. \ (typ_at' T p' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def) + apply (erule notE) + apply (clarsimp simp: typ_at'_def ko_wp_at'_def lookupAround2_char1 + split: if_split_asm) + apply (drule updateObject_type) + apply clarsimp + apply (drule objBits_type) + apply (clarsimp elim!: ps_clear_domE) + apply fastforce + apply (clarsimp elim!: ps_clear_domE) + apply fastforce + done + +lemma setObject_typ_at': + "\\s. P (typ_at' T p' s)\ setObject p v \\r s. P (typ_at' T p' s)\" + by (blast intro: P_bool_lift setObject_typ_at_inv setObject_typ_at_not) + +lemmas setObject_typ_ats [wp] = typ_at_lifts [OF setObject_typ_at'] + +lemma setObject_cte_wp_at2': + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); Q s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\\s. P' (cte_wp_at' P p s) \ Q s\ setObject ptr v \\rv s. P' (cte_wp_at' P p s)\" + apply (clarsimp simp add: setObject_def valid_def in_monad split_def) + apply (simp add: cte_wp_at_cases' split del: if_split) + apply (erule rsubst[where P=P']) + apply (rule iffI) + apply (erule disjEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 y) + apply (erule exEI [where 'a=machine_word]) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1) + apply (drule(1) x) + apply (clarsimp simp: lookupAround2_char1 prod_eqI) + apply (fastforce dest: bspec [OF _ ranI]) + apply (erule disjEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 + split: if_split_asm) + apply (frule updateObject_type) + apply (case_tac ba, simp_all add: y)[1] + apply (erule exEI) + apply (clarsimp simp: ps_clear_upd lookupAround2_char1 + split: if_split_asm) + apply (frule updateObject_type) + apply (case_tac ba, simp_all) + apply (drule(1) x) + apply (clarsimp simp: prod_eqI lookupAround2_char1) + apply (fastforce dest: bspec [OF _ ranI]) + done + +lemma setObject_cte_wp_at': + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); Q s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\cte_wp_at' P p and Q\ setObject ptr v \\rv. cte_wp_at' P p\" + unfolding pred_conj_def + by (rule setObject_cte_wp_at2'[OF x y], assumption+) + +lemma setObject_ep_pre: + assumes "\P and ep_at' p\ setObject p (e::endpoint) \Q\" + shows "\P\ setObject p (e::endpoint) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_ntfn_pre: + assumes "\P and ntfn_at' p\ setObject p (e::Structures_H.notification) \Q\" + shows "\P\ setObject p (e::Structures_H.notification) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_tcb_pre: + assumes "\P and tcb_at' p\ setObject p (t::tcb) \Q\" + shows "\P\ setObject p (t::tcb) \Q\" using assms + apply (clarsimp simp: valid_def setObject_def in_monad + split_def updateObject_default_def in_magnitude_check objBits_simps') + apply (drule spec, drule mp, erule conjI) + apply (simp add: obj_at'_def objBits_simps') + apply (simp add: split_paired_Ball) + apply (drule spec, erule mp) + apply (clarsimp simp: in_monad in_magnitude_check) + done + +lemma setObject_tcb_ep_at: + shows + "\ ep_at' t \ + setObject p (x::tcb) + \ \rv. ep_at' t \" + apply (rule obj_at_setObject2) + apply (auto dest: updateObject_default_result) + done + +lemma obj_at_setObject3: + fixes Q::"'a::pspace_storable \ bool" + fixes P::"'a::pspace_storable \ bool" + assumes R: "\ko s y n. (updateObject v ko p y n s) + = (updateObject_default v ko p y n s)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\(\s. P v)\ setObject p v \\rv. obj_at' P p\" + apply (clarsimp simp add: valid_def in_monad obj_at'_def + setObject_def split_def project_inject objBits_def[symmetric] + R updateObject_default_def in_magnitude_check P ps_clear_upd) + apply fastforce + done + +lemma setObject_tcb_strongest: + "\\s. if t = t' then P tcb else obj_at' P t' s\ + setObject t (tcb :: tcb) + \\rv. obj_at' P t'\" + apply (cases "t = t'") + apply simp + apply (rule hoare_weaken_pre) + apply (rule obj_at_setObject3) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (simp add: setObject_def split_def) + apply (clarsimp simp: valid_def obj_at'_def split_def in_monad + updateObject_default_def ps_clear_upd) + done + +lemma getObject_obj_at': + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ \ \ getObject p \\r::'a::pspace_storable. obj_at' ((=) r) p\" + by (clarsimp simp: valid_def getObject_def in_monad + loadObject_default_def obj_at'_def + split_def in_magnitude_check lookupAround2_char1 + x P project_inject objBits_def[symmetric]) + +lemma getObject_valid_obj: + assumes x: "\p q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ valid_objs' \ getObject p \\rv::'a::pspace_storable. valid_obj' (injectKO rv) \" + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule getObject_obj_at' [OF x P]) + apply (rule getObject_inv) + apply (subst x) + apply (rule loadObject_default_inv) + apply (clarsimp, assumption) + apply clarsimp + apply (drule(1) obj_at_valid_objs') + apply (clarsimp simp: project_inject) + done + +declare fail_inv[simp] + +lemma typeError_inv [wp]: + "\P\ typeError x y \\rv. P\" + by (simp add: typeError_def|wp)+ + +lemma getObject_cte_inv [wp]: "\P\ (getObject addr :: cte kernel) \\rv. P\" + apply (simp add: getObject_def loadObject_cte split_def) + apply (clarsimp simp: valid_def in_monad) + apply (clarsimp simp: typeError_def in_monad magnitudeCheck_def + split: kernel_object.split_asm if_split_asm option.split_asm) + done + +lemma getObject_ko_at: + assumes x: "\q n ko. loadObject p q n ko = + (loadObject_default p q n ko :: ('a :: pspace_storable) kernel)" + assumes P: "\(v::'a::pspace_storable). (1 :: machine_word) < 2 ^ (objBits v)" + shows "\ \ \ getObject p \\r::'a::pspace_storable. ko_at' r p\" + by (subst eq_commute, rule getObject_obj_at' [OF x P]) + +lemma getObject_ko_at_tcb [wp]: + "\\\ getObject p \\rv::tcb. ko_at' rv p\" + by (rule getObject_ko_at | simp add: objBits_simps')+ + +lemma OMG_getObject_tcb: + "\obj_at' P t\ getObject t \\(tcb :: tcb) s. P tcb\" + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + done + +lemma setObject_nosch: + assumes x: "\p q n ko. \\s. P (ksSchedulerAction s)\ updateObject val p q n ko \\rv s. P (ksSchedulerAction s)\" + shows "\\s. P (ksSchedulerAction s)\ setObject t val \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma getObject_ep_inv: "\P\ (getObject addr :: endpoint kernel) \\rv. P\" + apply (rule getObject_inv) + apply (simp add: loadObject_default_inv) + done + +lemma getObject_ntfn_inv: + "\P\ (getObject addr :: Structures_H.notification kernel) \\rv. P\" + apply (rule getObject_inv) + apply (simp add: loadObject_default_inv) + done + +lemma get_ep_inv'[wp]: "\P\ getEndpoint ep \\rv. P\" + by (simp add: getEndpoint_def getObject_ep_inv) + +lemma get_ntfn_inv'[wp]: "\P\ getNotification ntfn \\rv. P\" + by (simp add: getNotification_def getObject_ntfn_inv) + +lemma get_ep'_valid_ep[wp]: + "\ invs' and ep_at' ep \ getEndpoint ep \ valid_ep' \" + apply (simp add: getEndpoint_def) + apply (rule hoare_chain) + apply (rule getObject_valid_obj) + apply simp + apply (simp add: objBits_simps') + apply clarsimp + apply (simp add: valid_obj'_def) + done + +lemma get_ntfn'_valid_ntfn[wp]: + "\ invs' and ntfn_at' ep \ getNotification ep \ valid_ntfn' \" + apply (simp add: getNotification_def) + apply (rule hoare_chain) + apply (rule getObject_valid_obj) + apply simp + apply (simp add: objBits_simps') + apply clarsimp + apply (simp add: valid_obj'_def) + done + +lemma setObject_distinct[wp]: + shows "\pspace_distinct'\ setObject p val \\rv. pspace_distinct'\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad lookupAround2_char1 + pspace_distinct'_def ps_clear_upd objBits_def[symmetric] + split: if_split_asm + dest!: updateObject_objBitsKO) + apply (fastforce dest: bspec[OF _ domI]) + apply (fastforce dest: bspec[OF _ domI]) + done + +lemma setObject_aligned[wp]: + shows "\pspace_aligned'\ setObject p val \\rv. pspace_aligned'\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad lookupAround2_char1 + pspace_aligned'_def ps_clear_upd objBits_def[symmetric] + split: if_split_asm + dest!: updateObject_objBitsKO) + apply (fastforce dest: bspec[OF _ domI]) + apply (fastforce dest: bspec[OF _ domI]) + done + +lemma set_ep_aligned' [wp]: + "\pspace_aligned'\ setEndpoint ep v \\rv. pspace_aligned'\" + unfolding setEndpoint_def by wp + +lemma set_ep_distinct' [wp]: + "\pspace_distinct'\ setEndpoint ep v \\rv. pspace_distinct'\" + unfolding setEndpoint_def by wp + +lemma setEndpoint_cte_wp_at': + "\cte_wp_at' P p\ setEndpoint ptr v \\rv. cte_wp_at' P p\" + unfolding setEndpoint_def + apply (rule setObject_cte_wp_at'[where Q="\", simplified]) + apply (clarsimp simp add: updateObject_default_def in_monad + intro!: set_eqI)+ + done + +lemma setEndpoint_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t\ setEndpoint ptr val \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def setEndpoint_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma get_ntfn_ko': + "\\\ getNotification ep \\rv. ko_at' rv ep\" + apply (simp add: getNotification_def) + apply (rule getObject_ko_at) + apply simp + apply (simp add: objBits_simps') + done + +lemma set_ntfn_aligned'[wp]: + "\pspace_aligned'\ setNotification p ntfn \\rv. pspace_aligned'\" + unfolding setNotification_def by wp + +lemma set_ntfn_distinct'[wp]: + "\pspace_distinct'\ setNotification p ntfn \\rv. pspace_distinct'\" + unfolding setNotification_def by wp + +lemma setNotification_cte_wp_at': + "\cte_wp_at' P p\ setNotification ptr v \\rv. cte_wp_at' P p\" + unfolding setNotification_def + apply (rule setObject_cte_wp_at'[where Q="\", simplified]) + apply (clarsimp simp add: updateObject_default_def in_monad + intro!: set_eqI)+ + done + +lemma setObject_ep_tcb': + "\tcb_at' t\ setObject p (e::endpoint) \\_. tcb_at' t\" + by (rule setObject_typ_ats) + +lemma setObject_ntfn_tcb': + "\tcb_at' t\ setObject p (e::Structures_H.notification) \\_. tcb_at' t\" + by (rule setObject_typ_ats) + +lemma set_ntfn_tcb' [wp]: + "\ tcb_at' t \ setNotification ntfn v \ \rv. tcb_at' t \" + by (simp add: setNotification_def setObject_ntfn_tcb') + +lemma pspace_dom_update: + "\ ps ptr = Some x; a_type x = a_type v \ \ pspace_dom (ps(ptr \ v)) = pspace_dom ps" + apply (simp add: pspace_dom_def dom_fun_upd2 del: dom_fun_upd) + apply (rule SUP_cong [OF refl]) + apply clarsimp + apply (simp add: obj_relation_cuts_def3) + done + +lemmas ps_clear_def3 = ps_clear_def2 [OF order_less_imp_le [OF aligned_less_plus_1]] + + +declare diff_neg_mask[simp del] + +lemma cte_wp_at_ctes_of: + "cte_wp_at' P p s = (\cte. ctes_of s p = Some cte \ P cte)" + apply (simp add: cte_wp_at_cases' map_to_ctes_def Let_def + cte_level_bits_def objBits_simps' + split del: if_split) + apply (safe del: disjCI) + apply (clarsimp simp: ps_clear_def3 field_simps) + apply (clarsimp simp: ps_clear_def3 field_simps + split del: if_split) + apply (frule is_aligned_sub_helper) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def split: if_split_asm) + apply (case_tac "n = 0") + apply (clarsimp simp: field_simps) + apply (subgoal_tac "ksPSpace s p = None") + apply clarsimp + apply (clarsimp simp: field_simps) + apply (elim conjE) + apply (subst(asm) mask_in_range, assumption) + apply (drule arg_cong[where f="\S. p \ S"]) + apply (simp add: dom_def field_simps) + apply (erule mp) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq) + apply clarsimp + apply (simp add: field_simps) + apply (clarsimp split: if_split_asm del: disjCI) + apply (simp add: ps_clear_def3 field_simps) + apply (rule disjI2, rule exI[where x="(p - (p && ~~ mask tcb_bits))"]) + apply (clarsimp simp: ps_clear_def3[where na=tcb_bits] is_aligned_mask add_ac + word_bw_assocs) + done + +lemma tcb_cte_cases_small: + "\ tcb_cte_cases v = Some (getF, setF) \ + \ v < 2 ^ tcbBlockSizeBits" + by (simp add: tcb_cte_cases_def objBits_defs split: if_split_asm) + +lemmas tcb_cte_cases_aligned_helpers = + is_aligned_add_helper [OF _ tcb_cte_cases_small] + is_aligned_sub_helper [OF _ tcb_cte_cases_small] + +lemma ctes_of_from_cte_wp_at: + assumes x: "\P P' p. \\s. P (cte_wp_at' P' p s) \ Q s\ f \\r s. P (cte_wp_at' P' p s)\" + shows "\\s. P (ctes_of s) \ Q s\ f \\rv s. P (ctes_of s)\" + apply (clarsimp simp: valid_def + elim!: rsubst[where P=P] + intro!: ext) + apply (case_tac "ctes_of s x", simp_all) + apply (drule_tac P1=Not and P'1="\" and p1=x in use_valid [OF _ x], + simp_all add: cte_wp_at_ctes_of) + apply (drule_tac P1=id and P'1="(=) aa" and p1=x in use_valid [OF _ x], + simp_all add: cte_wp_at_ctes_of) + done + +lemmas setObject_ctes_of = ctes_of_from_cte_wp_at [OF setObject_cte_wp_at2'] + +lemma map_to_ctes_upd_cte: + "\ s p = Some (KOCTE cte'); is_aligned p cte_level_bits; + {p + 1..p + mask cte_level_bits} \ dom s = {} \ \ + map_to_ctes (s (p \ (KOCTE cte))) = ((map_to_ctes s) (p \ cte))" + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd) + apply (case_tac "x = p") + apply (simp add: objBits_simps' cte_level_bits_def mask_def field_simps) + apply (case_tac "(x && ~~ mask (objBitsKO (KOTCB undefined))) = p") + apply clarsimp + apply (simp del: dom_fun_upd split del: if_split cong: if_cong + add: dom_fun_upd2 field_simps objBits_simps) + done + +declare overflow_plus_one_self[simp] + +lemma map_to_ctes_upd_tcb: + "\ s p = Some (KOTCB tcb'); is_aligned p tcbBlockSizeBits; {p + 1..p + mask tcbBlockSizeBits} \ dom s = {} \ \ + map_to_ctes (s (p \ (KOTCB tcb))) = + (\x. if \getF setF. tcb_cte_cases (x - p) = Some (getF, setF) + \ getF tcb \ getF tcb' + then (case tcb_cte_cases (x - p) of Some (getF, setF) \ Some (getF tcb)) + else map_to_ctes s x)" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (subgoal_tac "p && ~~ (mask tcbBlockSizeBits) = p") + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd + cong: option.case_cong if_cong) + apply (case_tac "x = p") + apply (simp add: objBits_simps' field_simps map_to_ctes_def mask_def) + apply (case_tac "x && ~~ mask (objBitsKO (KOTCB undefined)) = p") + apply (case_tac "tcb_cte_cases (x - p)") + apply (simp split del: if_split cong: if_cong option.case_cong) + apply (subgoal_tac "s x = None") + apply (simp add: field_simps objBits_simps' mask_def split del: if_split + cong: if_cong option.case_cong) + apply clarsimp + apply (subst(asm) mask_in_range[where bits="objBitsKO v" for v]) + apply (simp add: objBits_simps') + apply (drule_tac a=x in equals0D) + apply (simp add: dom_def objBits_simps' mask_def field_simps) + apply (erule mp) + apply (rule ccontr, simp add: linorder_not_le) + apply (drule word_le_minus_one_leq, simp) + apply (case_tac "tcb_cte_cases (x - p)") + apply (simp split del: if_split cong: if_cong option.case_cong) + apply (rule FalseE) + apply (subst(asm) mask_in_range[where bits="objBitsKO v" for v]) + apply (simp add: objBitsKO_def) + apply (subgoal_tac "x - p < 2 ^ tcbBlockSizeBits") + apply (frule word_le_minus_one_leq) + apply (frule(1) is_aligned_no_wrap') + apply (drule word_plus_mono_right[where x=p]) + apply (simp only: field_simps) + apply (erule is_aligned_no_overflow) + apply (simp add: objBits_simps' field_simps) + apply (clarsimp simp: tcb_cte_cases_def objBits_simps' mask_def field_simps + split: if_split_asm) + apply (subst mask_in_range, assumption) + apply (simp only: atLeastAtMost_iff order_refl simp_thms) + apply (erule is_aligned_no_overflow) + done + +lemma map_to_ctes_upd_other: + "\ s p = Some ko; case ko of KOTCB tcb \ False | KOCTE cte \ False | _ \ True; + case ko' of KOTCB tcb \ False | KOCTE cte \ False | _ \ True \ \ + map_to_ctes (s (p \ ko')) = (map_to_ctes s)" + apply (rule ext) + apply (simp add: map_to_ctes_def Let_def dom_fun_upd2 + split del: if_split del: dom_fun_upd + cong: if_cong) + apply (rule if_cong) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule if_cong) + apply clarsimp + apply fastforce + apply clarsimp + apply (rule refl) + done + +lemma ctes_of_eq_cte_wp_at': + "cte_wp_at' ((=) cte) x s \ ctes_of s x = Some cte" + by (simp add: cte_wp_at_ctes_of) + +lemma tcb_cte_cases_change: + "tcb_cte_cases x = Some (getF, setF) \ + (\getF. (\setF. tcb_cte_cases y = Some (getF, setF)) \ getF (setF f tcb) \ getF tcb) + = (x = y \ f (getF tcb) \ getF tcb)" + apply (rule iffI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + done + +lemma cte_level_bits_nonzero [simp]: "0 < cte_level_bits" + by (simp add: cte_level_bits_def) + +lemma ctes_of_setObject_cte: + "\\s. P ((ctes_of s) (p \ cte))\ setObject p (cte :: cte) \\rv s. P (ctes_of s)\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (drule(1) updateObject_cte_is_tcb_or_cte[OF _ refl, rotated]) + apply (elim exE conjE disjE rsubst[where P=P]) + apply (clarsimp simp: lookupAround2_char1) + apply (subst map_to_ctes_upd_tcb; assumption?) + apply (clarsimp simp: mask_def objBits_defs field_simps ps_clear_def3) + apply (clarsimp simp: tcb_cte_cases_change) + apply (rule ext, clarsimp) + apply (intro conjI impI) + apply (clarsimp simp: tcb_cte_cases_def split: if_split_asm) + apply (drule(1) cte_wp_at_tcbI'[where P="(=) cte"]) + apply (simp add: ps_clear_def3 field_simps) + apply assumption+ + apply (simp add: cte_wp_at_ctes_of) + apply (clarsimp simp: map_to_ctes_upd_cte ps_clear_def3 field_simps mask_def) + done + +declare foldl_True[simp] + +lemma real_cte_at': + "real_cte_at' p s \ cte_at' p s" + by (clarsimp simp add: cte_wp_at_cases' obj_at'_def objBits_simps' cte_level_bits_def + del: disjCI) + +lemma no_fail_getEndpoint [wp]: + "no_fail (ep_at' ptr) (getEndpoint ptr)" + apply (simp add: getEndpoint_def getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (clarsimp split: option.split_asm simp: objBits_simps') + done + +lemma getEndpoint_corres: + "corres ep_relation (ep_at ptr) (ep_at' ptr) + (get_endpoint ptr) (getEndpoint ptr)" + apply (rule corres_no_failI) + apply wp + apply (simp add: get_simple_ko_def getEndpoint_def get_object_def + getObject_def bind_assoc ep_at_def2) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def return_def) + apply (clarsimp simp: assert_def fail_def obj_at_def return_def is_ep partial_inv_def) + apply (clarsimp simp: loadObject_default_def in_monad in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply blast + apply (simp add: other_obj_relation_def) + done + +declare magnitudeCheck_inv [wp] + +declare alignCheck_inv [wp] + +lemma setObject_ct_inv: + "\\s. P (ksCurThread s)\ setObject t (v::tcb) \\rv s. P (ksCurThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_cd_inv: + "\\s. P (ksCurDomain s)\ setObject t (v::tcb) \\rv s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_it_inv: +"\\s. P (ksIdleThread s)\ setObject t (v::tcb) \\rv s. P (ksIdleThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ksDomSchedule_inv: + "\\s. P (ksDomSchedule s)\ setObject t (v::tcb) \\rv s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma projectKO_def2: + "projectKO ko = assert_opt (projectKO_opt ko)" + by (simp add: projectKO_def assert_opt_def) + +lemma no_fail_magnitudeCheck[wp]: + "no_fail (\s. case y of None \ True | Some z \ 2 ^ n \ z - x) + (magnitudeCheck x y n)" + apply (clarsimp simp add: magnitudeCheck_def split: option.splits) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma no_fail_setObject_other [wp]: + fixes ob :: "'a :: pspace_storable" + assumes x: "updateObject ob = updateObject_default ob" + shows "no_fail (obj_at' (\k::'a. objBits k = objBits ob) ptr) + (setObject ptr ob)" + apply (simp add: setObject_def x split_def updateObject_default_def + projectKO_def2 alignCheck_def alignError_def) + apply (rule no_fail_pre) + apply (wp ) + apply (clarsimp simp: is_aligned_mask[symmetric] obj_at'_def + objBits_def[symmetric] project_inject lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (erule is_aligned_get_word_bits) + apply (subst add_diff_eq[symmetric]) + apply (erule is_aligned_no_wrap') + apply simp + apply simp + apply fastforce + done + +lemma obj_relation_cut_same_type: + "\ (y, P) \ obj_relation_cuts ko x; P ko z; + (y', P') \ obj_relation_cuts ko' x'; P' ko' z \ + \ (a_type ko = a_type ko') + \ (\n n'. a_type ko = ACapTable n \ a_type ko' = ACapTable n') + \ (\sz sz'. a_type ko = AArch (AUserData sz) \ a_type ko' = AArch (AUserData sz')) + \ (\sz sz'. a_type ko = AArch (ADeviceData sz) \ a_type ko' = AArch (ADeviceData sz')) + \ (\pt_t pt_t'. a_type ko = AArch (APageTable pt_t) \ a_type ko' = AArch (APageTable pt_t'))" + apply (rule ccontr) + apply (simp add: obj_relation_cuts_def2 a_type_def) + apply (auto simp: other_obj_relation_def cte_relation_def pte_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + Structures_H.kernel_object.split_asm + arch_kernel_obj.split_asm arch_kernel_object.split_asm) + done + +definition exst_same :: "Structures_H.tcb \ Structures_H.tcb \ bool" +where + "exst_same tcb tcb' \ tcbPriority tcb = tcbPriority tcb' + \ tcbTimeSlice tcb = tcbTimeSlice tcb' + \ tcbDomain tcb = tcbDomain tcb'" + +fun exst_same' :: "Structures_H.kernel_object \ Structures_H.kernel_object \ bool" +where + "exst_same' (KOTCB tcb) (KOTCB tcb') = exst_same tcb tcb'" | + "exst_same' _ _ = True" + +lemma setObject_other_corres: + fixes ob' :: "'a :: pspace_storable" + assumes x: "updateObject ob' = updateObject_default ob'" + assumes z: "\s. obj_at' P ptr s + \ map_to_ctes ((ksPSpace s) (ptr \ injectKO ob')) = map_to_ctes (ksPSpace s)" + assumes t: "is_other_obj_relation_type (a_type ob)" + assumes b: "\ko. P ko \ objBits ko = objBits ob'" + assumes e: "\ko. P ko \ exst_same' (injectKO ko) (injectKO ob')" + assumes P: "\v::'a::pspace_storable. (1 :: machine_word) < 2 ^ objBits v" + shows "other_obj_relation ob (injectKO (ob' :: 'a :: pspace_storable)) \ + corres dc (obj_at (\ko. a_type ko = a_type ob) ptr and obj_at (same_caps ob) ptr) + (obj_at' (P :: 'a \ bool) ptr) + (set_object ptr ob) (setObject ptr ob')" + supply image_cong_simp [cong del] projectKOs[simp del] + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (rule x) + apply (clarsimp simp: b elim!: obj_at'_weakenE) + apply (unfold set_object_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def x + projectKOs obj_at_def + updateObject_default_def in_magnitude_check [OF _ P]) + apply (rename_tac ko) + apply (clarsimp simp add: state_relation_def z) + apply (clarsimp simp add: caps_of_state_after_update cte_wp_at_after_update + swp_def fun_upd_def obj_at_def) + apply (subst conj_assoc[symmetric]) + apply (rule conjI[rotated]) + apply (clarsimp simp add: ghost_relation_def) + apply (erule_tac x=ptr in allE)+ + apply (clarsimp simp: obj_at_def a_type_def + split: Structures_A.kernel_object.splits if_split_asm) + apply (simp split: arch_kernel_obj.splits if_splits) + apply (fold fun_upd_def) + apply (simp only: pspace_relation_def pspace_dom_update dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply (rule conjI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: is_other_obj_relation_type t) + apply (drule(1) bspec) + apply clarsimp + apply (frule_tac ko'=ko and x'=ptr in obj_relation_cut_same_type, + (fastforce simp add: is_other_obj_relation_type t)+) + apply (erule disjE) + apply (simp add: is_other_obj_relation_type t) + apply (erule disjE) + apply (insert t, clarsimp simp: is_other_obj_relation_type_CapTable a_type_def) + apply (erule disjE) + apply (insert t, clarsimp simp: is_other_obj_relation_type_UserData a_type_def) + apply (erule disjE) + apply (insert t, clarsimp simp: is_other_obj_relation_type_DeviceData a_type_def) + apply (insert t, clarsimp simp: is_other_obj_relation_type_PageTable a_type_def) + apply (simp only: ekheap_relation_def) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (insert e) + apply atomize + apply (clarsimp simp: obj_at'_def) + apply (erule_tac x=obj in allE) + apply (clarsimp simp: projectKO_eq project_inject) + (* FIXME AARCH64: slow due to multiple (too many?) splits *) + apply (case_tac ob; + simp add: a_type_def other_obj_relation_def etcb_relation_def + is_other_obj_relation_type t exst_same_def) + apply (clarsimp simp: is_other_obj_relation_type t exst_same_def + split: Structures_A.kernel_object.splits Structures_H.kernel_object.splits + arch_kernel_obj.splits)+ + done + +lemmas obj_at_simps = obj_at_def obj_at'_def map_to_ctes_upd_other + is_other_obj_relation_type_def + a_type_def objBits_simps other_obj_relation_def pageBits_def + +lemma setEndpoint_corres: + "ep_relation e e' \ + corres dc (ep_at ptr) (ep_at' ptr) + (set_endpoint ptr e) (setEndpoint ptr e')" + apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ + by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) + +lemma setNotification_corres: + "ntfn_relation ae ae' \ + corres dc (ntfn_at ptr) (ntfn_at' ptr) + (set_notification ptr ae) (setNotification ptr ae')" + apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ + by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) + +lemma no_fail_getNotification [wp]: + "no_fail (ntfn_at' ptr) (getNotification ptr)" + apply (simp add: getNotification_def getObject_def split_def) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp add: obj_at'_def objBits_simps' lookupAround2_known1) + apply (erule(1) ps_clear_lookupAround2) + apply simp + apply (simp add: field_simps) + apply (erule is_aligned_no_wrap') + apply (simp add: word_bits_conv) + apply (clarsimp split: option.split_asm simp: objBits_simps') + done + +lemma getNotification_corres: + "corres ntfn_relation (ntfn_at ptr) (ntfn_at' ptr) + (get_notification ptr) (getNotification ptr)" + apply (rule corres_no_failI) + apply wp + apply (simp add: get_simple_ko_def getNotification_def get_object_def + getObject_def bind_assoc) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def return_def) + apply (clarsimp simp: assert_def fail_def obj_at_def return_def is_ntfn partial_inv_def) + apply (clarsimp simp: loadObject_default_def in_monad in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def pspace_relation_def) + apply (drule bspec) + apply blast + apply (simp add: other_obj_relation_def) + done + +lemma setObject_ko_wp_at: + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. (updateObject v ko p y n s) + = (updateObject_default v ko p y n s)" + assumes n: "\v' :: 'a. objBits v' = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + shows "\\s. obj_at' (\x :: 'a. True) p s \ + P (ko_wp_at' (if p = p' then K (P' (injectKO v)) else P')p' s)\ + setObject p v + \\rv s. P (ko_wp_at' P' p' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad + ko_wp_at'_def split_def + R updateObject_default_def + obj_at'_real_def + split del: if_split) + apply (clarsimp simp: project_inject objBits_def[symmetric] n + in_magnitude_check [OF _ m] + elim!: rsubst[where P=P] + split del: if_split) + apply (rule iffI) + apply (clarsimp simp: n ps_clear_upd objBits_def[symmetric] + split: if_split_asm) + apply (clarsimp simp: n project_inject objBits_def[symmetric] + ps_clear_upd + split: if_split_asm) + done + +lemma typ_at'_valid_obj'_lift: + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + notes [wp] = hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_const_Ball_lift typ_at_lifts [OF P] + shows "\\s. valid_obj' obj s\ f \\rv s. valid_obj' obj s\" + apply (cases obj; simp add: valid_obj'_def hoare_TrueI) + apply (rename_tac endpoint) + apply (case_tac endpoint; simp add: valid_ep'_def, wp) + apply (rename_tac notification) + apply (case_tac "ntfnObj notification"; + simp add: valid_ntfn'_def valid_bound_tcb'_def split: option.splits, + (wpsimp|rule conjI)+) + apply (rename_tac tcb) + apply (case_tac "tcbState tcb"; + simp add: valid_tcb'_def valid_tcb_state'_def split_def valid_bound_ntfn'_def + split: option.splits, + wpsimp) + apply (wpsimp simp: valid_cte'_def) + done + +lemmas setObject_valid_obj = typ_at'_valid_obj'_lift [OF setObject_typ_at'] + +lemma setObject_valid_objs': + assumes x: "\x n ko s ko' s'. + \ (ko', s') \ fst (updateObject val ko ptr x n s); P s; + valid_obj' ko s; lookupAround2 ptr (ksPSpace s) = (Some (x, ko), n) \ + \ valid_obj' ko' s" + shows "\valid_objs' and P\ setObject ptr val \\rv. valid_objs'\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "\ko. valid_obj' ko s \ valid_obj' ko b") + defer + apply clarsimp + apply (erule(1) use_valid [OF _ setObject_valid_obj]) + apply (clarsimp simp: setObject_def split_def in_monad + lookupAround2_char1) + apply (simp add: valid_objs'_def) + apply clarsimp + apply (drule spec, erule mp) + apply (drule(1) x) + apply (simp add: ranI) + apply (simp add: prod_eqI lookupAround2_char1) + apply (clarsimp elim!: ranE split: if_split_asm simp: ranI) + done + +lemma setObject_iflive': + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s x y n. (updateObject v ko ptr y n s) + = (updateObject_default v ko ptr y n s)" + assumes n: "\x :: 'a. objBits x = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + shows "\\s. if_live_then_nonz_cap' s \ (live' (injectKO v) \ ex_nonz_cap_to' ptr s) \ P s\ + setObject ptr v + \\rv s. if_live_then_nonz_cap' s\" + unfolding if_live_then_nonz_cap'_def ex_nonz_cap_to'_def + apply (rule hoare_pre) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule setObject_ko_wp_at [OF R n m]) + apply (rule hoare_vcg_ex_lift) + apply (rule setObject_cte_wp_at'[where Q = P, OF x y]) + apply assumption+ + apply clarsimp + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma setObject_qs[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueues s)\ updateObject v obj p q n \\rv s. P (ksReadyQueues s)\" + shows "\\s. P (ksReadyQueues s)\ setObject p v \\rv s. P (ksReadyQueues s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_qsL1[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueuesL1Bitmap s)\ updateObject v obj p q n \\rv s. P (ksReadyQueuesL1Bitmap s)\" + shows "\\s. P (ksReadyQueuesL1Bitmap s)\ setObject p v \\rv s. P (ksReadyQueuesL1Bitmap s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_qsL2[wp]: + assumes x: "\q n obj. \\s. P (ksReadyQueuesL2Bitmap s)\ updateObject v obj p q n \\rv s. P (ksReadyQueuesL2Bitmap s)\" + shows "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject p v \\rv s. P (ksReadyQueuesL2Bitmap s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +lemma setObject_ifunsafe': + fixes v :: "'a :: pspace_storable" + assumes x: "\x n tcb s t. \ t \ fst (updateObject v (KOTCB tcb) ptr x n s); P s; + lookupAround2 ptr (ksPSpace s) = (Some (x, KOTCB tcb), n) \ + \ \tcb'. t = (KOTCB tcb', s) \ (\(getF, setF) \ ran tcb_cte_cases. getF tcb' = getF tcb)" + assumes y: "\x n cte s. fst (updateObject v (KOCTE cte) ptr x n s) = {}" + assumes z: "\P. \\s. P (intStateIRQNode (ksInterruptState s))\ + setObject ptr v \\rv s. P (intStateIRQNode (ksInterruptState s))\" + shows "\\s. if_unsafe_then_cap' s \ P s\ + setObject ptr v + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + cte_wp_at_ctes_of) + apply (rule hoare_use_eq_irq_node' [OF z]) + apply (rule setObject_ctes_of [OF x y], assumption+) + done + +lemma setObject_it[wp]: + assumes x: "\p q n ko. \\s. P (ksIdleThread s)\ updateObject val p q n ko \\rv s. P (ksIdleThread s)\" + shows "\\s. P (ksIdleThread s)\ setObject t val \\rv s. P (ksIdleThread s)\" + apply (simp add: setObject_def split_def) + apply (wp x | simp)+ + done + +\\ + `idle_tcb_ps val` asserts that `val` is a pspace_storable value + which corresponds to an idle TCB. +\ +definition idle_tcb_ps :: "('a :: pspace_storable) \ bool" where + "idle_tcb_ps val \ (\tcb. projectKO_opt (injectKO val) = Some tcb \ idle_tcb' tcb)" + +lemma setObject_idle': + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. (updateObject v ko ptr y n s) + = (updateObject_default v ko ptr y n s)" + assumes n: "\x :: 'a. objBits x = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes z: "\P p q n ko. + \\s. P (ksIdleThread s)\ updateObject v p q n ko + \\rv s. P (ksIdleThread s)\" + shows "\\s. valid_idle' s \ + (ptr = ksIdleThread s + \ (\val :: 'a. idle_tcb_ps val) + \ idle_tcb_ps v)\ + setObject ptr v + \\rv s. valid_idle' s\" + apply (simp add: valid_idle'_def pred_tcb_at'_def o_def) + apply (rule hoare_pre) + apply (rule hoare_lift_Pf2 [where f="ksIdleThread"]) + apply (simp add: pred_tcb_at'_def obj_at'_real_def) + apply (rule setObject_ko_wp_at [OF R n m]) + apply (wp z) + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_real_def ko_wp_at'_def idle_tcb_ps_def) + apply (clarsimp simp add: project_inject) + done + +lemma setObject_no_0_obj' [wp]: + "\no_0_obj'\ setObject p v \\r. no_0_obj'\" + apply (clarsimp simp: setObject_def split_def) + apply (clarsimp simp: valid_def no_0_obj'_def ko_wp_at'_def in_monad + lookupAround2_char1 ps_clear_upd) + done + +lemma valid_updateCapDataI: + "s \' c \ s \' updateCapData b x c" + apply (unfold updateCapData_def Let_def AARCH64_H.updateCapData_def) + apply (cases c) + apply (simp_all add: isCap_defs valid_cap'_def capUntypedPtr_def isCap_simps + capAligned_def word_size word_bits_def word_bw_assocs + split: arch_capability.splits capability.splits) + done + +lemma no_fail_threadGet [wp]: + "no_fail (tcb_at' t) (threadGet f t)" + by (simp add: threadGet_def, wp) + +lemma no_fail_getThreadState [wp]: + "no_fail (tcb_at' t) (getThreadState t)" + by (simp add: getThreadState_def, wp) + +lemma no_fail_setObject_tcb [wp]: + "no_fail (tcb_at' t) (setObject t (t'::tcb))" + apply (rule no_fail_pre, wp) + apply (rule ext)+ + apply simp + apply (simp add: objBits_simps) + done + +lemma no_fail_threadSet [wp]: + "no_fail (tcb_at' t) (threadSet f t)" + apply (simp add: threadSet_def) + apply (rule no_fail_pre, wp) + apply simp + done + +lemma dmo_return' [simp]: + "doMachineOp (return x) = return x" + apply (simp add: doMachineOp_def select_f_def return_def gets_def get_def + bind_def modify_def put_def) + done + +lemma dmo_storeWordVM' [simp]: + "doMachineOp (storeWordVM x y) = return ()" + by (simp add: storeWordVM_def) + +declare mapM_x_return [simp] + +lemma no_fail_dmo' [wp]: + "no_fail P f \ no_fail (P o ksMachineState) (doMachineOp f)" + apply (simp add: doMachineOp_def split_def) + apply (rule no_fail_pre, wp) + apply simp + apply (simp add: no_fail_def) + done + +lemma setEndpoint_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setEndpoint val ptr + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setEndpoint_def) + apply (rule setObject_nosch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma setNotification_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setNotification val ptr + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setNotification_def) + apply (rule setObject_nosch) + apply (simp add: updateObject_default_def) + apply wp + apply simp + done + +lemma set_ep_valid_objs': + "\valid_objs' and valid_ep' ep\ + setEndpoint epptr ep + \\r s. valid_objs' s\" + apply (simp add: setEndpoint_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + done + +lemma set_ep_ctes_of[wp]: + "\\s. P (ctes_of s)\ setEndpoint p val \\rv s. P (ctes_of s)\" + apply (simp add: setEndpoint_def) + apply (rule setObject_ctes_of[where Q="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma set_ep_valid_mdb' [wp]: + "\valid_mdb'\ + setObject epptr (ep::endpoint) + \\_. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply (rule set_ep_ctes_of[simplified setEndpoint_def]) + done + +lemma setEndpoint_valid_mdb': + "\valid_mdb'\ setEndpoint p v \\rv. valid_mdb'\" + unfolding setEndpoint_def + by (rule set_ep_valid_mdb') + +lemma set_ep_valid_pspace'[wp]: + "\valid_pspace' and valid_ep' ep\ + setEndpoint epptr ep + \\r. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp set_ep_aligned' [simplified] set_ep_valid_objs') + apply (wp hoare_vcg_conj_lift) + apply (simp add: setEndpoint_def) + apply (wp setEndpoint_valid_mdb')+ + apply auto + done + +lemma set_ep_valid_bitmapQ[wp]: + "\Invariants_H.valid_bitmapQ\ setEndpoint epptr ep \\rv. Invariants_H.valid_bitmapQ\" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_ep_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setEndpoint epptr ep \\rv. bitmapQ_no_L1_orphans \" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_ep_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setEndpoint epptr ep \\rv. bitmapQ_no_L2_orphans \" + apply (unfold setEndpoint_def) + apply (rule setObject_ep_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_ep_valid_queues[wp]: + "\Invariants_H.valid_queues\ setEndpoint epptr ep \\rv. Invariants_H.valid_queues\" + apply (simp add: Invariants_H.valid_queues_def) + apply (wp hoare_vcg_conj_lift) + apply (simp add: setEndpoint_def valid_queues_no_bitmap_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] + | simp add: valid_queues_no_bitmap_def)+ + done + +lemma set_ep_valid_queues'[wp]: + "\valid_queues'\ setEndpoint epptr ep \\rv. valid_queues'\" + apply (unfold setEndpoint_def) + apply (simp only: valid_queues'_def imp_conv_disj + obj_at'_real_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule setObject_ko_wp_at) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (wp updateObject_default_inv | simp)+ + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma ct_in_state_thread_state_lift': + assumes ct: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes st: "\t. \st_tcb_at' P t\ f \\_. st_tcb_at' P t\" + shows "\ct_in_state' P\ f \\_. ct_in_state' P\" + apply (clarsimp simp: ct_in_state'_def) + apply (clarsimp simp: valid_def) + apply (frule (1) use_valid [OF _ ct]) + apply (drule (1) use_valid [OF _ st], assumption) + done + +lemma sch_act_wf_lift: + assumes tcb: "\P t. \st_tcb_at' P t\ f \\rv. st_tcb_at' P t\" + assumes tcb_cd: "\P t. \ tcb_in_cur_domain' t\ f \\_ . tcb_in_cur_domain' t \" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (frule (1) use_valid [OF _ ksA]) + apply (case_tac "ksSchedulerAction b", simp_all) + apply (drule (2) use_valid [OF _ ct_in_state_thread_state_lift' [OF kCT tcb]]) + apply (clarsimp) + apply (rule conjI) + apply (drule (2) use_valid [OF _ tcb]) + apply (drule (2) use_valid [OF _ tcb_cd]) + done + +lemma tcb_in_cur_domain'_lift: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\x. \obj_at' (\tcb. x = tcbDomain tcb) t\ f \\_. obj_at' (\tcb. x = tcbDomain tcb) t\" + shows "\ tcb_in_cur_domain' t \ f \ \_. tcb_in_cur_domain' t \" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (rule b) + apply (rule a) + done + +lemma ct_idle_or_in_cur_domain'_lift: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + assumes c: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes d: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes e: "\d a t t'. \\s. t = t' \ obj_at' (\tcb. d = tcbDomain tcb) t s\ + f + \\_ s. t = t' \ obj_at' (\tcb. d = tcbDomain tcb) t s\" + shows "\ ct_idle_or_in_cur_domain' \ f \ \_. ct_idle_or_in_cur_domain' \" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurThread" in hoare_lift_Pf) + apply (rule_tac f="ksIdleThread" in hoare_lift_Pf) + apply (rule_tac f="ksSchedulerAction" in hoare_lift_Pf) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp hoare_vcg_imp_lift) + apply (rule e) + apply simp + apply (rule a) + apply (rule b) + apply (rule c) + apply (rule d) + done + + +lemma setObject_ep_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setObject ptr (e::endpoint) \\_. obj_at' (P :: tcb \ bool) t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_ep_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ setObject ptr (e::endpoint) \\_ s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setEndpoint epptr ep \\_. tcb_in_cur_domain' t\" + apply (clarsimp simp: setEndpoint_def) + apply (rule tcb_in_cur_domain'_lift; wp) + done + +lemma setEndpoint_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setEndpoint ptr (e::endpoint) \\_. obj_at' (P :: tcb \ bool) t\" + by (clarsimp simp: setEndpoint_def, wp) + +lemma set_ep_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setEndpoint epptr ep + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift) + apply (simp add: setEndpoint_def split_def setObject_def + | wp updateObject_default_inv)+ + done + +lemma setObject_state_refs_of': + assumes x: "updateObject val = updateObject_default val" + assumes y: "(1 :: machine_word) < 2 ^ objBits val" + shows + "\\s. P ((state_refs_of' s) (ptr := refs_of' (injectKO val)))\ + setObject ptr val + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def x in_magnitude_check y + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (clarsimp simp: state_refs_of'_def objBits_def[symmetric] + ps_clear_upd + cong: if_cong option.case_cong) + done + +lemma setObject_state_refs_of_eq: + assumes x: "\s s' obj obj' ptr' ptr''. + (obj', s') \ fst (updateObject val obj ptr ptr' ptr'' s) + \ refs_of' obj' = refs_of' obj" + shows + "\\s. P (state_refs_of' s)\ + setObject ptr val + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def in_magnitude_check lookupAround2_char1 + elim!: rsubst[where P=P] + intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (frule x, drule updateObject_objBitsKO) + apply (simp add: state_refs_of'_def ps_clear_upd + cong: option.case_cong if_cong) + done + +lemma set_ep_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (epptr := ep_q_refs_of' ep))\ + setEndpoint epptr ep + \\rv s. P (state_refs_of' s)\" + unfolding setEndpoint_def + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def[symmetric]) + +lemma setObject_state_hyp_refs_of': + assumes x: "updateObject val = updateObject_default val" + assumes y: "(1 :: machine_word) < 2 ^ objBits val" + shows + "\\s. P ((state_hyp_refs_of' s) (ptr := hyp_refs_of' (injectKO val)))\ + setObject ptr val + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def x in_magnitude_check y + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (clarsimp simp: state_hyp_refs_of'_def objBits_def[symmetric] + ps_clear_upd + cong: if_cong option.case_cong) + done + +lemma setObject_state_hyp_refs_of_eq: + assumes x: "\s s' obj obj' ptr' ptr''. + (obj', s') \ fst (updateObject val obj ptr ptr' ptr'' s) + \ hyp_refs_of' obj' = hyp_refs_of' obj" + shows + "\\s. P (state_hyp_refs_of' s)\ + setObject ptr val + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def in_magnitude_check + lookupAround2_char1 + elim!: rsubst[where P=P] intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (frule x, drule updateObject_objBitsKO) + apply (simp add: state_hyp_refs_of'_def ps_clear_upd + cong: option.case_cong if_cong) + done + +lemma state_hyp_refs_of'_ep: + "ep_at' epptr s \ (state_hyp_refs_of' s)(epptr := {}) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +lemma setObject_gen_obj_at: + fixes v :: "'a :: pspace_storable" + assumes R: "\ko s y n. updateObject v ko p y n s = updateObject_default v ko p y n s" + assumes n: "\v' :: 'a. objBits v' = n" + assumes m: "(1 :: machine_word) < 2 ^ n" + assumes o: "\\s. obj_at' (\x :: 'a. True) p s \ P s\ setObject p v \Q\" + shows "\P\ setObject p v \Q\" + using o + apply (clarsimp simp: setObject_def valid_def in_monad split_def split_paired_Ball + R updateObject_default_def project_inject objBits_def[symmetric] n + in_magnitude_check [OF _ m]) + apply (erule allE, erule impE) + apply (fastforce simp: obj_at'_def objBits_def[symmetric] n project_inject) + apply (auto simp: project_inject objBits_def[symmetric] n in_magnitude_check [OF _ m]) + done + +lemma set_ep_state_hyp_refs_of'[wp]: + "setEndpoint epptr ep \\s. P (state_hyp_refs_of' s)\" + unfolding setEndpoint_def + apply (rule setObject_gen_obj_at, simp, simp add: objBits_simps', simp) + apply (wp setObject_state_hyp_refs_of'; simp add: objBits_simps' state_hyp_refs_of'_ep) + done + +lemma set_ntfn_ctes_of[wp]: + "\\s. P (ctes_of s)\ setNotification p val \\rv s. P (ctes_of s)\" + apply (simp add: setNotification_def) + apply (rule setObject_ctes_of[where Q="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma set_ntfn_valid_mdb' [wp]: + "\valid_mdb'\ + setObject epptr (ntfn::Structures_H.notification) + \\_. valid_mdb'\" + apply (simp add: valid_mdb'_def) + apply (rule set_ntfn_ctes_of[simplified setNotification_def]) + done + +lemma set_ntfn_valid_objs': + "\valid_objs' and valid_ntfn' ntfn\ + setNotification p ntfn + \\r s. valid_objs' s\" + apply (simp add: setNotification_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad + valid_obj'_def) + done + +lemma set_ntfn_valid_pspace'[wp]: + "\valid_pspace' and valid_ntfn' ntfn\ + setNotification p ntfn + \\r. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp set_ntfn_aligned' [simplified] set_ntfn_valid_objs') + apply (simp add: setNotification_def,wp) + apply auto + done + +lemma set_ntfn_valid_bitmapQ[wp]: + "\Invariants_H.valid_bitmapQ\ setNotification p ntfn \\rv. Invariants_H.valid_bitmapQ\" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setNotification p ntfn \\rv. bitmapQ_no_L1_orphans \" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setNotification p ntfn \\rv. bitmapQ_no_L2_orphans \" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp)+ + done + +lemma set_ntfn_valid_queues[wp]: + "\Invariants_H.valid_queues\ setNotification p ntfn \\rv. Invariants_H.valid_queues\" + apply (simp add: Invariants_H.valid_queues_def) + apply (rule hoare_pre) + apply (wp hoare_vcg_conj_lift) + apply (simp add: setNotification_def valid_queues_no_bitmap_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + apply (wp updateObject_default_inv set_ep_valid_bitmapQ[unfolded setEndpoint_def] + | simp add: valid_queues_no_bitmap_def)+ + done + +lemma set_ntfn_valid_queues'[wp]: + "\valid_queues'\ setNotification p ntfn \\rv. valid_queues'\" + apply (unfold setNotification_def) + apply (rule setObject_ntfn_pre) + apply (simp only: valid_queues'_def imp_conv_disj + obj_at'_real_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule setObject_ko_wp_at) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (wp updateObject_default_inv | simp)+ + apply (clarsimp simp: ko_wp_at'_def) + done + +lemma set_ntfn_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (epptr := ntfn_q_refs_of' (ntfnObj ntfn) + \ ntfn_bound_refs' (ntfnBoundTCB ntfn)))\ + setNotification epptr ntfn + \\rv s. P (state_refs_of' s)\" + unfolding setNotification_def + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def) + +lemma state_hyp_refs_of'_ntfn: + "ntfn_at' ntfn s \ (state_hyp_refs_of' s) (ntfn := {}) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +lemma set_ntfn_state_hyp_refs_of'[wp]: + "setNotification epptr ntfn \\s. P (state_hyp_refs_of' s)\" + unfolding setNotification_def + apply (rule setObject_gen_obj_at, simp, simp add: objBits_simps', simp) + apply (wp setObject_state_hyp_refs_of'; simp add: objBits_simps' state_hyp_refs_of'_ntfn) + done + +lemma setNotification_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t\ setNotification ptr val \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def setNotification_def) + apply (rule obj_at_setObject2) + apply simp + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_ntfn_cur_domain[wp]: + "\ \s. P (ksCurDomain s) \ setObject ptr (ntfn::Structures_H.notification) \ \_s . P (ksCurDomain s) \" + apply (clarsimp simp: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ntfn_obj_at'_tcb[wp]: + "\obj_at' (P :: tcb \ bool) t \ setObject ptr (ntfn::Structures_H.notification) \\_. obj_at' (P :: tcb \ bool) t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setNotification_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setNotification ptr (ntfn::Structures_H.notification) \ \_s . P (ksCurDomain s) \" + apply (simp add: setNotification_def) + apply wp + done + +lemma setNotification_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setNotification epptr ep \\_. tcb_in_cur_domain' t\" + apply (clarsimp simp: setNotification_def) + apply (rule tcb_in_cur_domain'_lift; wp) + done + +lemma set_ntfn_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setNotification ntfnptr ntfn + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift | clarsimp simp: setNotification_def)+ + apply (simp add: setNotification_def split_def setObject_def + | wp updateObject_default_inv)+ + done + +lemmas cur_tcb_lift = + hoare_lift_Pf [where f = ksCurThread and P = tcb_at', folded cur_tcb'_def] + +lemma set_ntfn_cur_tcb'[wp]: + "\cur_tcb'\ setNotification ptr ntfn \\rv. cur_tcb'\" + apply (wp cur_tcb_lift) + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ setEndpoint ptr val \\rv s. P (typ_at' T p s)\" + unfolding setEndpoint_def + by (rule setObject_typ_at') + +lemmas setEndpoint_typ_ats[wp] = typ_at_lifts [OF setEndpoint_typ_at'] + +lemma get_ep_sp': + "\P\ getEndpoint r \\t. P and ko_at' t r\" + by (clarsimp simp: getEndpoint_def getObject_def loadObject_default_def + in_monad valid_def obj_at'_def objBits_simps' in_magnitude_check split_def) + +lemma setEndpoint_cur_tcb'[wp]: + "\cur_tcb'\ setEndpoint p v \\rv. cur_tcb'\" + apply (wp cur_tcb_lift) + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (v \ IdleEP \ ex_nonz_cap_to' p s)\ + setEndpoint p v + \\rv. if_live_then_nonz_cap'\" + unfolding setEndpoint_def + apply (wp setObject_iflive'[where P="\"]) + apply simp + apply (simp add: objBits_simps') + apply simp + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply (clarsimp simp: live'_def) + done + +declare setEndpoint_cte_wp_at'[wp] + +lemma ex_nonz_cap_to_pres': + assumes y: "\P p. \cte_wp_at' P p\ f \\rv. cte_wp_at' P p\" + shows "\ex_nonz_cap_to' p\ f \\rv. ex_nonz_cap_to' p\" + apply (simp only: ex_nonz_cap_to'_def) + apply (intro hoare_vcg_disj_lift hoare_vcg_ex_lift + y hoare_vcg_all_lift) + done + +lemma setEndpoint_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setEndpoint p' v \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma setEndpoint_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ setEndpoint p v \\rv. if_unsafe_then_cap'\" + unfolding setEndpoint_def + apply (rule setObject_ifunsafe'[where P="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad + intro!: equals0I)+ + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setEndpoint_idle'[wp]: + "\\s. valid_idle' s\ + setEndpoint p v + \\_. valid_idle'\" + unfolding setEndpoint_def + apply (wp setObject_idle') + apply (simp add: objBits_simps' updateObject_default_inv idle_tcb_ps_def)+ + done + +crunch it[wp]: setEndpoint "\s. P (ksIdleThread s)" + (simp: updateObject_default_inv) + +lemma setObject_ksPSpace_only: + "\ \p q n ko. \P\ updateObject val p q n ko \\rv. P \; + \f s. P (ksPSpace_update f s) = P s \ + \ \P\ setObject ptr val \\rv. P\" + apply (simp add: setObject_def split_def) + apply (wp | simp | assumption)+ + done + +lemma setObject_ksMachine: + "\ \p q n ko. \\s. P (ksMachineState s)\ updateObject val p q n ko \\rv s. P (ksMachineState s)\ \ + \ \\s. P (ksMachineState s)\ setObject ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setObject_ksPSpace_only) + +lemma setObject_ksInterrupt: + "\ \p q n ko. \\s. P (ksInterruptState s)\ updateObject val p q n ko \\rv s. P (ksInterruptState s)\ \ + \ \\s. P (ksInterruptState s)\ setObject ptr val \\rv s. P (ksInterruptState s)\" + by (simp add: setObject_ksPSpace_only) + +lemma valid_irq_handlers_lift': + assumes x: "\P. \\s. P (cteCaps_of s)\ f \\rv s. P (cteCaps_of s)\" + assumes y: "\P. \\s. P (ksInterruptState s)\ f \\rv s. P (ksInterruptState s)\" + shows "\valid_irq_handlers'\ f \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers'_def irq_issued'_def) + apply (rule hoare_use_eq [where f=cteCaps_of, OF x y]) + done + +lemmas valid_irq_handlers_lift'' = valid_irq_handlers_lift' [unfolded cteCaps_of_def] + +crunch ksInterruptState[wp]: setEndpoint "\s. P (ksInterruptState s)" + (wp: setObject_ksInterrupt updateObject_default_inv) + +lemmas setEndpoint_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF set_ep_ctes_of setEndpoint_ksInterruptState] + +declare set_ep_arch' [wp] + +lemma set_ep_maxObj [wp]: + "\\s. P (gsMaxObjectSize s)\ setEndpoint ptr val \\rv s. P (gsMaxObjectSize s)\" + by (simp add: setEndpoint_def | wp setObject_ksPSpace_only updateObject_default_inv)+ + +lemma valid_global_refs_lift': + assumes ctes: "\P. \\s. P (ctes_of s)\ f \\_ s. P (ctes_of s)\" + assumes arch: "\P. \\s. P (ksArchState s)\ f \\_ s. P (ksArchState s)\" + assumes idle: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes irqn: "\P. \\s. P (irq_node' s)\ f \\_ s. P (irq_node' s)\" + assumes maxObj: "\P. \\s. P (gsMaxObjectSize s)\ f \\_ s. P (gsMaxObjectSize s)\" + shows "\valid_global_refs'\ f \\_. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_def global_refs'_def valid_cap_sizes'_def) + apply (rule hoare_lift_Pf [where f="ksArchState"]) + apply (rule hoare_lift_Pf [where f="ksIdleThread"]) + apply (rule hoare_lift_Pf [where f="irq_node'"]) + apply (rule hoare_lift_Pf [where f="gsMaxObjectSize"]) + apply (wp ctes hoare_vcg_const_Ball_lift arch idle irqn maxObj)+ + done + +lemma valid_arch_state_lift': + assumes typs: "\T p P. f \\s. P (typ_at' T p s)\" + assumes arch: "\P. f \\s. P (ksArchState s)\" + assumes vcpu: "\P p. f \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + shows "f \valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs) + apply (wp_pre, wps arch) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_const_imp_lift vcpu[unfolded pred_conj_def] + split: option.split) + apply (clarsimp simp: pred_conj_def) + done + +lemma setObject_ep_ct: + "\\s. P (ksCurThread s)\ setObject p (e::endpoint) \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def updateObject_ep_eta split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_ntfn_ct: + "\\s. P (ksCurThread s)\ setObject p (e::Structures_H.notification) + \\_ s. P (ksCurThread s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma get_ntfn_sp': + "\P\ getNotification r \\t. P and ko_at' t r\" + by (clarsimp simp: getNotification_def getObject_def loadObject_default_def + in_monad valid_def obj_at'_def objBits_simps' in_magnitude_check split_def) + +lemma set_ntfn_pred_tcb_at' [wp]: + "\ pred_tcb_at' proj P t \ + setNotification ep v + \ \rv. pred_tcb_at' proj P t \" + apply (simp add: setNotification_def pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma set_ntfn_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (live' (KONotification v) \ ex_nonz_cap_to' p s)\ + setNotification p v + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setNotification_def) + apply (wp setObject_iflive'[where P="\"]) + apply simp + apply (simp add: objBits_simps) + apply (simp add: objBits_simps') + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def bind_def) + apply clarsimp + done + +declare setNotification_cte_wp_at'[wp] + +lemma set_ntfn_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setNotification p' v \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma setNotification_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ setNotification p v \\rv. if_unsafe_then_cap'\" + unfolding setNotification_def + apply (rule setObject_ifunsafe'[where P="\", simplified]) + apply (clarsimp simp: updateObject_default_def in_monad + intro!: equals0I)+ + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_idle'[wp]: + "\\s. valid_idle' s\ setNotification p v \\rv. valid_idle'\" + unfolding setNotification_def + apply (wp setObject_idle') + apply (simp add: objBits_simps' updateObject_default_inv idle_tcb_ps_def)+ + done + +crunch it[wp]: setNotification "\s. P (ksIdleThread s)" + (wp: updateObject_default_inv) + +lemma set_ntfn_arch' [wp]: + "\\s. P (ksArchState s)\ setNotification ntfn p \\_ s. P (ksArchState s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv|simp)+ + done + +lemma set_ntfn_ksInterrupt[wp]: + "\\s. P (ksInterruptState s)\ setNotification ptr val \\rv s. P (ksInterruptState s)\" + by (simp add: setNotification_def | wp setObject_ksInterrupt updateObject_default_inv)+ + +lemma set_ntfn_ksMachine[wp]: + "\\s. P (ksMachineState s)\ setNotification ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setNotification_def | wp setObject_ksMachine updateObject_default_inv)+ + +lemma set_ntfn_maxObj [wp]: + "\\s. P (gsMaxObjectSize s)\ setNotification ptr val \\rv s. P (gsMaxObjectSize s)\" + by (simp add: setNotification_def | wp setObject_ksPSpace_only updateObject_default_inv)+ + +lemma set_ntfn_global_refs' [wp]: + "\valid_global_refs'\ setNotification ptr val \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +crunch typ_at' [wp]: setNotification "\s. P (typ_at' T p s)" (ignore_del: setObject) + +lemma set_ntfn_hyp[wp]: + "setNotification ptr val \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + unfolding setNotification_def + by (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', rule refl, simp) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma set_ep_hyp[wp]: + "setEndpoint ptr val \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p s)\" + unfolding setEndpoint_def + by (wpsimp wp: setObject_ko_wp_at simp: objBits_simps', rule refl, simp) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +crunches setEndpoint, setNotification + for valid_arch'[wp]: valid_arch_state' + (wp: valid_arch_state_lift') + +lemmas valid_irq_node_lift = + hoare_use_eq_irq_node' [OF _ typ_at_lift_valid_irq_node'] + +lemmas untyped_ranges_zero_lift + = hoare_use_eq[where f="gsUntypedZeroRanges" + and Q="\v s. untyped_ranges_zero_inv (f s) v" for f] + +lemma valid_irq_states_lift': + assumes x: "\P. \\s. P (intStateIRQTable (ksInterruptState s))\ f \\rv s. P (intStateIRQTable (ksInterruptState s))\" + assumes y: "\P. \\s. P (irq_masks (ksMachineState s))\ f \\rv s. P (irq_masks (ksMachineState s))\" + shows "\valid_irq_states'\ f \\rv. valid_irq_states'\" + apply (rule hoare_use_eq [where f="\s. irq_masks (ksMachineState s)"], rule y) + apply (rule hoare_use_eq [where f="\s. intStateIRQTable (ksInterruptState s)"], rule x) + apply wp + done + +lemmas set_ntfn_irq_handlers'[wp] = valid_irq_handlers_lift'' [OF set_ntfn_ctes_of set_ntfn_ksInterrupt] + +lemmas set_ntfn_irq_states' [wp] = valid_irq_states_lift' [OF set_ntfn_ksInterrupt set_ntfn_ksMachine] + +lemma set_ntfn_vms'[wp]: + "\valid_machine_state'\ setNotification ptr val \\rv. valid_machine_state'\" + apply (simp add: setNotification_def valid_machine_state'_def pointerInDeviceData_def pointerInUserData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift) + by (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv | + simp)+ + +lemma irqs_masked_lift: + assumes "\P. \\s. P (intStateIRQTable (ksInterruptState s))\ f + \\rv s. P (intStateIRQTable (ksInterruptState s))\" + shows "\irqs_masked'\ f \\_. irqs_masked'\" + apply (simp add: irqs_masked'_def) + apply (wp assms) + done + +lemma setObject_pspace_domain_valid[wp]: + "\pspace_domain_valid\ + setObject ptr val + \\rv. pspace_domain_valid\" + apply (clarsimp simp: setObject_def split_def pspace_domain_valid_def + valid_def in_monad + split: if_split_asm) + apply (drule updateObject_objBitsKO) + apply (clarsimp simp: lookupAround2_char1) + done + +crunches setNotification, setEndpoint + for pspace_domain_valid[wp]: "pspace_domain_valid" + +lemma ct_not_inQ_lift: + assumes sch_act: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + and not_inQ: "\\s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\ + f \\_ s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + shows "\ct_not_inQ\ f \\_. ct_not_inQ\" + unfolding ct_not_inQ_def + by (rule hoare_convert_imp [OF sch_act not_inQ]) + +lemma setNotification_ct_not_inQ[wp]: + "\ct_not_inQ\ setNotification ptr rval \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setNotification_nosch]) + apply (simp add: setNotification_def ct_not_inQ_def) + apply (rule hoare_weaken_pre) + apply (wps setObject_ntfn_ct) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad)+ + done + +lemma setNotification_ksCurThread[wp]: + "\\s. P (ksCurThread s)\ setNotification a b \\rv s. P (ksCurThread s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setNotification a b \\rv s. P (ksDomSchedule s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ksDomScheduleId[wp]: + "\\s. P (ksDomScheduleIdx s)\ setNotification a b \\rv s. P (ksDomScheduleIdx s)\" + apply (simp add: setNotification_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setNotification_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ setNotification ptr ntfn \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +crunch gsUntypedZeroRanges[wp]: setNotification "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma set_ntfn_minor_invs': + "\invs' and obj_at' (\ntfn. ntfn_q_refs_of' (ntfnObj ntfn) = ntfn_q_refs_of' (ntfnObj val) + \ ntfn_bound_refs' (ntfnBoundTCB ntfn) = ntfn_bound_refs' (ntfnBoundTCB val)) + ptr + and valid_ntfn' val + and (\s. live' (KONotification val) \ ex_nonz_cap_to' ptr s) + and (\s. ptr \ ksIdleThread s) \ + setNotification ptr val + \\rv. invs'\" + apply (clarsimp simp add: invs'_def valid_state'_def cteCaps_of_def) + apply (wp irqs_masked_lift valid_irq_node_lift untyped_ranges_zero_lift, + simp_all add: o_def) + apply (clarsimp elim!: rsubst[where P=sym_refs] + intro!: ext + dest!: obj_at_state_refs_ofD')+ + done + +lemma getEndpoint_wp: + "\\s. \ep. ko_at' ep e s \ P ep s\ getEndpoint e \P\" + apply (rule hoare_strengthen_post) + apply (rule get_ep_sp') + apply simp + done + +lemma getNotification_wp: + "\\s. \ntfn. ko_at' ntfn e s \ P ntfn s\ getNotification e \P\" + apply (rule hoare_strengthen_post) + apply (rule get_ntfn_sp') + apply simp + done + +lemma ep_redux_simps': + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ SendEP xs) + = (set xs \ {EPSend})" + "ep_q_refs_of' (case xs of [] \ IdleEP | y # ys \ RecvEP xs) + = (set xs \ {EPRecv})" + "ntfn_q_refs_of' (case xs of [] \ IdleNtfn | y # ys \ WaitingNtfn xs) + = (set xs \ {NTFNSignal})" + by (fastforce split: list.splits + simp: valid_ep_def valid_ntfn_def)+ + + +(* There are two wp rules for preserving valid_ioc over set_object. + First, the more involved rule for CNodes and TCBs *) +(* Second, the simpler rule suitable for all objects except CNodes and TCBs. *) +lemma valid_refs'_def2: + "valid_refs' R (ctes_of s) = (\cref. \cte_wp_at' (\c. R \ capRange (cteCap c) \ {}) cref s)" + by (auto simp: valid_refs'_def cte_wp_at_ctes_of ran_def) + +lemma idle_is_global [intro!]: + "ksIdleThread s \ global_refs' s" + by (simp add: global_refs'_def) + +lemma valid_globals_cte_wpD': + "\ valid_global_refs' s; cte_wp_at' P p s \ + \ \cte. P cte \ ksIdleThread s \ capRange (cteCap cte)" + by (fastforce simp: valid_global_refs'_def valid_refs'_def cte_wp_at_ctes_of) + +lemma dmo_aligned'[wp]: + "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_distinct'[wp]: + "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_valid_objs'[wp]: + "\valid_objs'\ doMachineOp f \\_. valid_objs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + done + +lemma dmo_inv': + assumes R: "\P. \P\ f \\_. P\" + shows "\P\ doMachineOp f \\_. P\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule in_inv_by_hoareD [OF R]) + apply simp + done + +crunch cte_wp_at'2[wp]: doMachineOp "\s. P (cte_wp_at' P' p s)" + +crunch typ_at'[wp]: doMachineOp "\s. P (typ_at' T p s)" + +lemmas doMachineOp_typ_ats[wp] = typ_at_lifts [OF doMachineOp_typ_at'] + +lemma doMachineOp_invs_bits[wp]: + "\valid_pspace'\ doMachineOp m \\rv. valid_pspace'\" + "\\s. sch_act_wf (ksSchedulerAction s) s\ + doMachineOp m \\rv s. sch_act_wf (ksSchedulerAction s) s\" + "\Invariants_H.valid_queues\ doMachineOp m \\rv. Invariants_H.valid_queues\" + "\valid_queues'\ doMachineOp m \\rv. valid_queues'\" + "\\s. P (state_refs_of' s)\ doMachineOp m \\rv s. P (state_refs_of' s)\" + "\\s. P (state_hyp_refs_of' s)\ doMachineOp m \\rv s. P (state_hyp_refs_of' s)\" + "\if_live_then_nonz_cap'\ doMachineOp m \\rv. if_live_then_nonz_cap'\" + "\cur_tcb'\ doMachineOp m \\rv. cur_tcb'\" + "\if_unsafe_then_cap'\ doMachineOp m \\rv. if_unsafe_then_cap'\" + by (simp add: doMachineOp_def split_def + valid_pspace'_def valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs + | wp cur_tcb_lift sch_act_wf_lift tcb_in_cur_domain'_lift + | fastforce elim: state_refs_of'_pspaceI)+ + +crunch obj_at'[wp]: doMachineOp "\s. P (obj_at' P' p s)" + +crunch it[wp]: doMachineOp "\s. P (ksIdleThread s)" +crunch idle'[wp]: doMachineOp "valid_idle'" + (wp: crunch_wps simp: crunch_simps valid_idle'_pspace_itI) + +lemma setEndpoint_ksMachine: + "\\s. P (ksMachineState s)\ setEndpoint ptr val \\rv s. P (ksMachineState s)\" + by (simp add: setEndpoint_def | wp setObject_ksMachine updateObject_default_inv)+ + +lemmas setEndpoint_valid_irq_states' = + valid_irq_states_lift' [OF setEndpoint_ksInterruptState setEndpoint_ksMachine] + +lemma setEndpoint_ct': + "\\s. P (ksCurThread s)\ setEndpoint a b \\rv s. P (ksCurThread s)\" + apply (simp add: setEndpoint_def setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemmas setEndpoint_valid_globals[wp] + = valid_global_refs_lift' [OF set_ep_ctes_of set_ep_arch' + setEndpoint_it setEndpoint_ksInterruptState] + +end +end diff --git a/proof/refine/AARCH64/KernelInit_R.thy b/proof/refine/AARCH64/KernelInit_R.thy new file mode 100644 index 0000000000..517a1a8a83 --- /dev/null +++ b/proof/refine/AARCH64/KernelInit_R.thy @@ -0,0 +1,41 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Kernel init refinement. Currently axiomatised. +*) + +theory KernelInit_R +imports + IncKernelInit + "AInvs.KernelInit_AI" +begin + +(* Axiomatisation of the rest of the initialisation code *) +axiomatization where + init_refinement: + "Init_H \ lift_state_relation state_relation `` Init_A" + +axiomatization where + ckernel_init_invs: + "\((tc,s),x) \ Init_H. invs' s" + +axiomatization where + ckernel_init_sch_norm: + "((tc,s),x) \ Init_H \ ksSchedulerAction s = ResumeCurrentThread" + +axiomatization where + ckernel_init_ctr: + "((tc,s),x) \ Init_H \ ct_running' s" + +axiomatization where + ckernel_init_domain_time: + "((tc,s),x) \ Init_H \ ksDomainTime s \ 0" + +axiomatization where + ckernel_init_domain_list: + "((tc,s),x) \ Init_H \ length (ksDomSchedule s) > 0 \ (\(d,time) \ set (ksDomSchedule s). time > 0)" + +end diff --git a/proof/refine/AARCH64/LevityCatch.thy b/proof/refine/AARCH64/LevityCatch.thy new file mode 100644 index 0000000000..29272dce95 --- /dev/null +++ b/proof/refine/AARCH64/LevityCatch.thy @@ -0,0 +1,57 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory LevityCatch +imports + "BaseRefine.Include" + "Lib.AddUpdSimps" + "Lib.LemmaBucket" + "Lib.SimpStrategy" + "Lib.Corres_Method" +begin + +no_notation bind_drop (infixl ">>" 60) + +lemma magnitudeCheck_assert: + "magnitudeCheck x y n = assert (case y of None \ True | Some z \ 1 << n \ z - x)" + by (fastforce simp: magnitudeCheck_def assert_def when_def + split: option.split) + +lemma projectKO_inv: "projectKO ko \P\" + by (simp add: projectKO_def fail_def valid_def return_def + split: option.splits) + +lemma alignCheck_assert: + "alignCheck ptr n = assert (is_aligned ptr n)" + by (simp add: is_aligned_mask alignCheck_def assert_def + alignError_def unless_def when_def) + +lemma magnitudeCheck_inv: + "magnitudeCheck x y n \P\" + by (wpsimp simp: magnitudeCheck_def) + +lemma alignCheck_inv: + "alignCheck x n \P\" + by (wpsimp simp: alignCheck_def alignError_def) + +lemma updateObject_default_inv: + "updateObject_default obj ko ptr ptr' next \P\" + unfolding updateObject_default_def + by (wpsimp wp: magnitudeCheck_inv alignCheck_inv projectKO_inv) + + +context begin interpretation Arch . + +lemmas makeObject_simps = + makeObject_endpoint makeObject_notification makeObject_cte + makeObject_tcb makeObject_user_data makeObject_pte makeObject_asidpool + +lemma to_from_apiType[simp]: "toAPIType (fromAPIType x) = Some x" + by (cases x) (auto simp: fromAPIType_def toAPIType_def) + +end + +end diff --git a/proof/refine/AARCH64/Machine_R.thy b/proof/refine/AARCH64/Machine_R.thy new file mode 100644 index 0000000000..34709d376a --- /dev/null +++ b/proof/refine/AARCH64/Machine_R.thy @@ -0,0 +1,83 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Properties of machine operations. +*) + +theory Machine_R +imports Bits_R +begin + +definition "irq_state_independent_H (P :: kernel_state \ bool)\ + \(f :: nat \ nat) (s :: kernel_state). P s \ P (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\)" + +lemma irq_state_independent_HI[intro!, simp]: + "\\s f. P (s\ksMachineState := ksMachineState s + \irq_state := f (irq_state (ksMachineState s))\\) = P s\ + \ irq_state_independent_H P" + by (simp add: irq_state_independent_H_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma dmo_getirq_inv[wp]: + "irq_state_independent_H P \ \P\ doMachineOp (getActiveIRQ in_kernel) \\rv. P\" + apply (simp add: getActiveIRQ_def doMachineOp_def split_def exec_gets + select_f_select[simplified liftM_def] + select_modify_comm gets_machine_state_modify) + apply wp + apply (clarsimp simp: irq_state_independent_H_def in_monad return_def split: if_splits) + done + +lemma getActiveIRQ_masked: + "\\s. valid_irq_masks' table (irq_masks s)\ getActiveIRQ in_kernel + \\rv s. \irq. rv = Some irq \ table irq \ IRQInactive\" + apply (simp add: getActiveIRQ_def) + apply wp + apply (clarsimp simp: valid_irq_masks'_def) + done + +lemma dmo_maskInterrupt: + "\\s. P (ksMachineState_update (irq_masks_update (\t. t (irq := m))) s)\ + doMachineOp (maskInterrupt m irq) \\_. P\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply (clarsimp simp: maskInterrupt_def in_monad) + apply (erule rsubst [where P=P]) + apply simp + done + +lemma dmo_maskInterrupt_True: + "\invs'\ doMachineOp (maskInterrupt True irq) \\_. invs'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma setIRQState_irq_states': + "\valid_irq_states'\ + setIRQState state irq + \\rv. valid_irq_states'\" + apply (simp add: setIRQState_def setInterruptState_def getInterruptState_def) + apply (wp dmo_maskInterrupt) + apply (simp add: valid_irq_masks'_def) + done + +lemma getActiveIRQ_le_maxIRQ: + "\irqs_masked' and valid_irq_states'\ doMachineOp (getActiveIRQ in_kernel) \\rv s. \x. rv = Some x \ x \ maxIRQ\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid, rule getActiveIRQ_le_maxIRQ') + prefer 2 + apply simp + apply (simp add: irqs_masked'_def valid_irq_states'_def) + done + +end +end diff --git a/proof/refine/AARCH64/PageTableDuplicates.thy b/proof/refine/AARCH64/PageTableDuplicates.thy new file mode 100644 index 0000000000..08eb06bcc3 --- /dev/null +++ b/proof/refine/AARCH64/PageTableDuplicates.thy @@ -0,0 +1,42 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory PageTableDuplicates +imports Syscall_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma doMachineOp_ksPSpace_inv[wp]: + "\\s. P (ksPSpace s)\ doMachineOp f \\ya s. P (ksPSpace s)\" + by (simp add:doMachineOp_def split_def | wp)+ + +lemma foldr_data_map_insert[simp]: + "foldr (\addr map a. if a = addr then Some b else map a) = foldr (\addr. data_map_insert addr b)" + apply (rule ext)+ + apply (simp add:data_map_insert_def[abs_def] fun_upd_def) + done + +lemma mapM_x_mapM_valid: + "\ P \ mapM_x f xs \\r. Q\ \ \P\mapM f xs \\r. Q\" + apply (simp add: mapM_x_mapM) + apply (clarsimp simp:valid_def return_def bind_def) + apply (drule spec) + apply (erule impE) + apply simp + apply (drule(1) bspec) + apply fastforce + done + +declare withoutPreemption_lift [wp del] + +crunch valid_cap'[wp]: + isFinalCapability "\s. valid_cap' cap s" + (wp: crunch_wps filterM_preserved simp: crunch_simps unless_def) + +end + +end diff --git a/proof/refine/AARCH64/RAB_FN.thy b/proof/refine/AARCH64/RAB_FN.thy new file mode 100644 index 0000000000..cbe2a89c2f --- /dev/null +++ b/proof/refine/AARCH64/RAB_FN.thy @@ -0,0 +1,147 @@ +(* + * Copyright 2014, General Dynamics C4 Systems + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory RAB_FN + +imports + "CSpace1_R" + "Lib.MonadicRewrite" + +begin + +definition + "only_cnode_caps ctes = + option_map ((\x. if isCNodeCap x then x else NullCap) o cteCap) o ctes" + +definition locateSlotFun_def: +"locateSlotFun cnode offset \ cnode + 2 ^ cte_level_bits * offset" + +definition + "cnode_caps_gsCNodes cts cns + = (\cap \ ran cts. isCNodeCap cap + \ cns (capCNodePtr cap) = Some (capCNodeBits cap))" + +abbreviation (input) + "cnode_caps_gsCNodes' s \ cnode_caps_gsCNodes (only_cnode_caps (ctes_of s)) (gsCNodes s)" + +function + resolveAddressBitsFn :: + "capability \ cptr \ nat \ (machine_word \ capability option) + \ (lookup_failure + (machine_word * nat))" +where + "resolveAddressBitsFn a b c = +(\x0 capptr bits caps. (let nodeCap = x0 in + if isCNodeCap nodeCap + then (let + radixBits = capCNodeBits nodeCap; + guardBits = capCNodeGuardSize nodeCap; + levelBits = radixBits + guardBits; + offset = (fromCPtr capptr `~shiftR~` (bits-levelBits)) && + (mask radixBits); + guard = (fromCPtr capptr `~shiftR~` (bits-guardBits)) && + (mask guardBits); + bitsLeft = bits - levelBits; + slot = locateSlotFun (capCNodePtr nodeCap) offset + in + if levelBits = 0 then Inr (0, 0) + else if \ (guardBits \ bits \ guard = capCNodeGuard nodeCap) + then Inl $ GuardMismatch_ \ + guardMismatchBitsLeft= bits, + guardMismatchGuardFound= capCNodeGuard nodeCap, + guardMismatchGuardSize= guardBits \ + else if (levelBits > bits) then Inl $ DepthMismatch_ \ + depthMismatchBitsLeft= bits, + depthMismatchBitsFound= levelBits \ + else if (bitsLeft = 0) + then Inr (slot, 0) + else (case caps slot of Some NullCap + \ Inr (slot, bitsLeft) + | Some nextCap + \ resolveAddressBitsFn nextCap capptr bitsLeft caps + | None \ Inr (0, 0)) + ) + else Inl InvalidRoot + )) + +a b c" + by auto + +termination + apply (relation "measure (snd o snd)") + apply (auto split: if_split_asm) + done + +declare resolveAddressBitsFn.simps[simp del] + +lemma isCNodeCap_capUntypedPtr_capCNodePtr: + "isCNodeCap c \ capUntypedPtr c = capCNodePtr c" + by (clarsimp simp: isCap_simps) + +lemma resolveAddressBitsFn_eq: + "monadic_rewrite F E (\s. (isCNodeCap cap \ (\slot. cte_wp_at' (\cte. cteCap cte = cap) slot s)) + \ valid_objs' s \ cnode_caps_gsCNodes' s) + (resolveAddressBits cap capptr bits) + (gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))" + (is "monadic_rewrite F E (?P cap) (?f cap bits) (?g cap capptr bits)") +proof (induct cap capptr bits rule: resolveAddressBits.induct) + case (1 cap cref depth) + show ?case + apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps) + apply (simp only: Let_def haskell_assertE_def K_bind_def) + apply (rule monadic_rewrite_name_pre) + apply (rule monadic_rewrite_guard_imp) + apply (rule_tac P="(=) s" in monadic_rewrite_trans) + (* step 1, apply the induction hypothesis on the lhs *) + apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y] + monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s] + TrueI)+ + apply (rule_tac g="case nextCap of CNodeCap a b c d + \ ?g nextCap cref bitsLeft + | _ \ returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp) + apply (wpc | rule monadic_rewrite_refl "1.hyps" + | simp only: capability.case haskell_assertE_def simp_thms)+ + apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def + dest!: in_getCTE fst_stateAssertD) + apply (fastforce elim: cte_wp_at_weakenE') + apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp) + (* step 2, split and match based on the lhs structure *) + apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def + if_to_top_of_bindE assertE_def stateAssert_def bind_assoc + assert_def if_to_top_of_bind getSlotCap_def + split del: if_split cong: if_cong) + apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated] + empty_fail_get no_fail_get impI + monadic_rewrite_refl get_wp + | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P + isCNodeCap_capUntypedPtr_capCNodePtr + cong: if_cong split del: if_split)+ + apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated]) + apply simp + apply (rule impI, rule no_fail_getCTE) + apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def + only_cnode_caps_def cte_wp_at_ctes_of isCap_simps + locateSlotFun_def isCNodeCap_capUntypedPtr_capCNodePtr + split: capability.split) + apply (rule monadic_rewrite_name_pre[where P="\_. False" and f=fail] + monadic_rewrite_refl get_wp + | simp add: throwError_def returnOk_def locateSlotFun_def if_not_P + isCNodeCap_capUntypedPtr_capCNodePtr + cong: if_cong split del: if_split)+ + (* step 3, prove the non-failure conditions *) + apply (clarsimp simp: isCap_simps) + apply (frule(1) cte_wp_at_valid_objs_valid_cap') + apply (clarsimp simp: cte_level_bits_def valid_cap_simps' + real_cte_at' isCap_simps cteSizeBits_def objBits_simps) + apply (clarsimp simp: cte_wp_at_ctes_of only_cnode_caps_def ball_Un + cnode_caps_gsCNodes_def ran_map_option o_def) + apply (drule bspec, rule IntI, erule ranI, simp add: isCap_simps) + apply (simp add: isCap_simps capAligned_def word_bits_def and_mask_less') + done +qed + +end diff --git a/proof/refine/AARCH64/Refine.thy b/proof/refine/AARCH64/Refine.thy new file mode 100644 index 0000000000..ab4825d870 --- /dev/null +++ b/proof/refine/AARCH64/Refine.thy @@ -0,0 +1,1014 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + The main theorem +*) + +theory Refine +imports + KernelInit_R + ADT_H + InitLemmas + PageTableDuplicates +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +text \User memory content is the same on both levels\ +lemma typ_at_AUserDataI: + "\ typ_at (AArch (AUserData sz)) p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s'; n < 2 ^ (pageBitsForSize sz - pageBits) \ + \ typ_at' UserDataT (p + n * 2 ^ pageBits) s'" + apply (clarsimp simp add: obj_at_def a_type_def ) + apply (simp split: Structures_A.kernel_object.split_asm + arch_kernel_obj.split_asm split: if_split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp) + apply (drule_tac x = "p + n * 2 ^ pageBits" in spec) + apply (drule_tac x = "\_ obj. obj = KOUserData" in spec) + apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def) + apply (rule exI [where x = KOUserData]) + apply (drule mp) + apply (rule exI [where x = n]) + apply (simp add: shiftl_t2n) + apply (clarsimp simp: pspace_aligned'_def) + apply (drule (1) bspec [OF _ domI]) + apply (clarsimp simp: objBits_simps) + apply (fastforce dest!: pspace_distinctD' simp: objBits_simps) + done + +lemma typ_at_ADeviceDataI: + "\ typ_at (AArch (ADeviceData sz)) p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s'; n < 2 ^ (pageBitsForSize sz - pageBits) \ + \ typ_at' UserDataDeviceT (p + n * 2 ^ pageBits) s'" + apply (clarsimp simp add: obj_at_def a_type_def ) + apply (simp split: Structures_A.kernel_object.split_asm + arch_kernel_obj.split_asm split: if_split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp) + apply (drule_tac x = "p + n * 2 ^ pageBits" in spec) + apply (drule_tac x = "\_ obj. obj = KOUserDataDevice" in spec) + apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def) + apply (rule exI [where x = KOUserDataDevice]) + apply (drule mp) + apply (rule exI [where x = n]) + apply (simp add: shiftl_t2n) + apply (clarsimp simp: pspace_aligned'_def) + apply (drule (1) bspec [OF _ domI]) + apply (clarsimp simp: objBits_simps) + apply (fastforce dest!: pspace_distinctD' simp: objBits_simps) + done + +lemma typ_at_UserDataI: + "\ typ_at' UserDataT (p && ~~ mask pageBits) s'; + pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s \ + \ \sz. typ_at (AArch (AUserData sz)) (p && ~~ mask (pageBitsForSize sz)) s" + apply (clarsimp simp: exists_disj obj_at'_def typ_at'_def ko_wp_at'_def) + apply (frule (1) in_related_pspace_dom) + apply (clarsimp simp: pspace_dom_def) + apply (clarsimp simp: pspace_relation_def dom_def) + apply (erule allE, erule impE, blast) + apply clarsimp + apply (drule (1) bspec) + apply clarsimp + apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + cte_relation_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (rename_tac vmpage_size n) + apply (rule_tac x = vmpage_size in exI) + apply (subst conjunct2 [OF is_aligned_add_helper]) + apply (drule (1) pspace_alignedD) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2 [OF _ pbfs_atleast_pageBits]) + apply (case_tac vmpage_size, simp_all add: word_bits_conv bit_simps)[1] + apply (simp add: obj_at_def a_type_def) + done + +lemma typ_at_DeviceDataI: + "\ typ_at' UserDataDeviceT (p && ~~ mask pageBits) s'; + pspace_relation (kheap s) (ksPSpace s'); pspace_aligned s \ + \ \sz. typ_at (AArch (ADeviceData sz)) (p && ~~ mask (pageBitsForSize sz)) s" + apply (clarsimp simp: exists_disj obj_at'_def typ_at'_def ko_wp_at'_def) + apply (frule (1) in_related_pspace_dom) + apply (clarsimp simp: pspace_dom_def) + apply (clarsimp simp: pspace_relation_def dom_def) + apply (erule allE, erule impE, blast) + apply clarsimp + apply (drule (1) bspec) + apply clarsimp + apply (subst mask_lower_twice [where n = pageBits, OF pbfs_atleast_pageBits, symmetric]) + apply (clarsimp simp: obj_relation_cuts_def2 pte_relation_def + cte_relation_def other_obj_relation_def + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (rename_tac vmpage_size n) + apply (rule_tac x = vmpage_size in exI) + apply (subst conjunct2 [OF is_aligned_add_helper]) + apply (drule (1) pspace_alignedD) + apply simp + apply (simp add: shiftl_t2n mult_ac) + apply (erule word_less_power_trans2 [OF _ pbfs_atleast_pageBits]) + apply (case_tac vmpage_size, simp_all add: word_bits_conv bit_simps)[1] + apply (simp add: obj_at_def a_type_def) + done + +lemma pointerInUserData_relation: + "\ (s,s') \ state_relation; valid_state' s'; valid_state s\ + \ pointerInUserData p s' = in_user_frame p s" + apply (simp add: pointerInUserData_def in_user_frame_def) + apply (rule iffI) + apply (erule typ_at_UserDataI, (clarsimp simp: valid_state_def)+)[1] + apply clarsimp + apply (drule_tac sz = sz and + n = "(p && mask (pageBitsForSize sz)) >> pageBits" + in typ_at_AUserDataI [where s = s and s' = s']) + apply (fastforce simp: valid_state'_def)+ + apply (rule shiftr_less_t2n') + apply (simp add: pbfs_atleast_pageBits mask_twice) + apply (case_tac sz, simp_all add: bit_simps)[1] + apply (subgoal_tac "(p && ~~ mask (pageBitsForSize sz)) + (p && mask (pageBitsForSize sz) >> pageBits) * 2 ^ pageBits = (p && ~~ mask pageBits)") + apply simp + apply (subst mult.commute) + apply (subst shiftl_t2n [symmetric]) + apply (simp add: shiftr_shiftl1) + apply (subst mask_out_add_aligned) + apply (rule is_aligned_neg_mask) + apply (simp add: pbfs_atleast_pageBits) + apply (subst add.commute) + apply (simp add: word_plus_and_or_coroll2) + done + +lemma pointerInDeviceData_relation: + "\ (s,s') \ state_relation; valid_state' s'; valid_state s\ + \ pointerInDeviceData p s' = in_device_frame p s" + apply (simp add: pointerInDeviceData_def in_device_frame_def) + apply (rule iffI) + apply (erule typ_at_DeviceDataI, (clarsimp simp: valid_state_def)+)[1] + apply clarsimp + apply (drule_tac sz = sz and + n = "(p && mask (pageBitsForSize sz)) >> pageBits" + in typ_at_ADeviceDataI [where s = s and s' = s']) + apply (fastforce simp: valid_state'_def)+ + apply (rule shiftr_less_t2n') + apply (simp add: pbfs_atleast_pageBits mask_twice) + apply (case_tac sz, simp_all add: bit_simps)[1] + apply (subgoal_tac "(p && ~~ mask (pageBitsForSize sz)) + (p && mask (pageBitsForSize sz) >> pageBits) * 2 ^ pageBits = (p && ~~ mask pageBits)") + apply simp + apply (subst mult.commute) + apply (subst shiftl_t2n [symmetric]) + apply (simp add: shiftr_shiftl1) + apply (subst mask_out_add_aligned) + apply (rule is_aligned_neg_mask) + apply (simp add: pbfs_atleast_pageBits) + apply (subst add.commute) + apply (simp add: word_plus_and_or_coroll2) + done + +lemma user_mem_relation: + "\(s,s') \ state_relation; valid_state' s'; valid_state s\ + \ user_mem' s' = user_mem s" + apply (rule ext) + apply (clarsimp simp: user_mem_def user_mem'_def pointerInUserData_relation pointerInDeviceData_relation) + apply (simp add: state_relation_def) + done + +lemma device_mem_relation: + "\(s,s') \ state_relation; valid_state' s'; valid_state s\ + \ device_mem' s' = device_mem s" + apply (rule ext) + apply (clarsimp simp: device_mem_def device_mem'_def pointerInUserData_relation + pointerInDeviceData_relation) + done + +lemma absKState_correct: + assumes invs: "einvs (s :: det_ext state)" and invs': "invs' s'" + assumes rel: "(s,s') \ state_relation" + shows "absKState s' = abs_state s" + using assms + apply (intro state.equality, simp_all add: absKState_def abs_state_def) + apply (rule absHeap_correct, clarsimp+) + apply (clarsimp elim!: state_relationE) + apply (rule absCDT_correct, clarsimp+) + apply (rule absIsOriginalCap_correct, clarsimp+) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: user_mem_relation invs_def invs'_def) + apply (simp add: state_relation_def) + apply (rule absInterruptIRQNode_correct, simp add: state_relation_def) + apply (rule absInterruptStates_correct, simp add: state_relation_def) + apply (rule absArchState_correct, simp) + apply (rule absExst_correct, simp+) + done + +text \The top-level invariance\ + +lemma set_thread_state_sched_act: + "\(\s. runnable state) and (\s. P (scheduler_action s))\ + set_thread_state thread state + \\rs s. P (scheduler_action (s::det_state))\" + apply (simp add: set_thread_state_def) + apply wp + apply (simp add: set_thread_state_ext_def) + apply wp + apply (rule hoare_pre_cont) + apply (rule_tac Q="\rv. (\s. runnable ts) and (\s. P (scheduler_action s))" + in hoare_strengthen_post) + apply wp + apply force + apply (wp gts_st_tcb_at)+ + apply (rule_tac Q="\rv. st_tcb_at ((=) state) thread and (\s. runnable state) and (\s. P (scheduler_action s))" in hoare_strengthen_post) + apply (simp add: st_tcb_at_def) + apply (wp obj_set_prop_at)+ + apply (force simp: st_tcb_at_def obj_at_def) + apply wp + apply clarsimp + done + +lemma activate_thread_sched_act: + "\ct_in_state activatable and (\s. P (scheduler_action s))\ + activate_thread + \\rs s. P (scheduler_action (s::det_state))\" + by (simp add: activate_thread_def set_thread_state_def arch_activate_idle_thread_def + | (wp set_thread_state_sched_act gts_wp)+ | wpc)+ + +lemma schedule_sched_act_rct[wp]: + "\\\ Schedule_A.schedule + \\rs (s::det_state). scheduler_action s = resume_cur_thread\" + unfolding Schedule_A.schedule_def + by (wpsimp) + +lemma call_kernel_sched_act_rct[wp]: + "\einvs and (\s. e \ Interrupt \ ct_running s) + and (\s. scheduler_action s = resume_cur_thread)\ + call_kernel e + \\rs (s::det_state). scheduler_action s = resume_cur_thread\" + apply (simp add: call_kernel_def) + apply (wp activate_thread_sched_act | simp)+ + apply (clarsimp simp: active_from_running) + done + +lemma kernel_entry_invs: + "\einvs and (\s. e \ Interrupt \ ct_running s) + and (\s. 0 < domain_time s) and valid_domain_list and (ct_running or ct_idle) + and (\s. scheduler_action s = resume_cur_thread)\ + kernel_entry e us + \\rv. einvs and (\s. ct_running s \ ct_idle s) + and (\s. 0 < domain_time s) and valid_domain_list + and (\s. scheduler_action s = resume_cur_thread)\" + apply (rule_tac Q="\rv. invs and (\s. ct_running s \ ct_idle s) and valid_sched and + (\s. 0 < domain_time s) and valid_domain_list and + valid_list and (\s. scheduler_action s = resume_cur_thread)" + in hoare_post_imp) + apply clarsimp + apply (simp add: kernel_entry_def) + apply (wp akernel_invs_det_ext call_kernel_valid_sched thread_set_invs_trivial + thread_set_ct_running thread_set_not_state_valid_sched + hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state + call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext + hoare_weak_lift_imp + | clarsimp simp add: tcb_cap_cases_def active_from_running)+ + done + +definition + "full_invs \ {((tc, s :: det_ext state), m, e). einvs s \ + (ct_running s \ ct_idle s) \ + (m = KernelMode \ e \ None) \ + (m = UserMode \ ct_running s) \ + (m = IdleMode \ ct_idle s) \ + (e \ None \ e \ Some Interrupt \ ct_running s) \ + 0 < domain_time s \ valid_domain_list s \ + (scheduler_action s = resume_cur_thread)}" + +lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_sched_act: + "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" + unfolding do_user_op_def + apply (wp | simp add: split_def)+ + done + +lemma do_user_op_invs2: + "\einvs and ct_running and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list \ + do_user_op f tc + \\_. (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + and (\s. 0 < domain_time s) and valid_domain_list \" + apply (rule_tac Q="\_. valid_list and valid_sched and + (\s. scheduler_action s = resume_cur_thread) and (invs and ct_running) and + (\s. 0 < domain_time s) and valid_domain_list" + in hoare_strengthen_post) + apply (wp do_user_op_valid_list do_user_op_valid_sched do_user_op_sched_act + do_user_op_invs | simp | force)+ + done + +lemmas ext_init_def = ext_init_det_ext_ext_def ext_init_unit_def + +lemma valid_list_init[simp]: + "valid_list init_A_st" + by (simp add: valid_list_2_def init_A_st_def ext_init_def init_cdt_def) + +lemmas valid_list_inits[simp] = valid_list_init[simplified] + +lemma valid_sched_init[simp]: + "valid_sched init_A_st" + apply (simp add: valid_sched_def init_A_st_def ext_init_def) + apply (clarsimp simp: valid_etcbs_def init_kheap_def st_tcb_at_kh_def obj_at_kh_def + obj_at_def is_etcb_at_def idle_thread_ptr_def + valid_queues_2_def ct_not_in_q_def not_queued_def + valid_sched_action_def is_activatable_def init_irq_node_ptr_def + arm_global_pt_ptr_def + ct_in_cur_domain_2_def valid_blocked_2_def valid_idle_etcb_def + etcb_at'_def default_etcb_def) + done + +lemma valid_domain_list_init[simp]: + "valid_domain_list init_A_st" + by (simp add: init_A_st_def ext_init_def valid_domain_list_def) + +lemma akernel_invariant: + "ADT_A uop \ full_invs" + unfolding full_invs_def + apply (rule invariantI) + apply (clarsimp simp: ADT_A_def subset_iff) + apply (frule bspec[OF akernel_init_invs]) + apply (simp add: Let_def Init_A_def) + apply (simp add: init_A_st_def ext_init_def) + apply (clarsimp simp: ADT_A_def global_automaton_def) + + apply (rename_tac tc' s' mode' e' tc s mode e) + apply (elim disjE) + apply ((clarsimp simp: kernel_call_A_def + | drule use_valid[OF _ kernel_entry_invs])+)[2] + apply ((clarsimp simp: do_user_op_A_def monad_to_transition_def + check_active_irq_A_def + | drule use_valid[OF _ do_user_op_invs2] + | drule use_valid[OF _ check_active_irq_invs_just_running])+)[2] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply ((clarsimp simp add: do_user_op_A_def check_active_irq_A_def + | drule use_valid[OF _ do_user_op_invs2] + | drule use_valid[OF _ check_active_irq_invs_just_running])+)[1] + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs_just_idle])+)[1] + apply ((clarsimp simp add: check_active_irq_A_def + | drule use_valid[OF _ check_active_irq_invs])+)[1] + done + +lemma dmo_getActiveIRQ_notin_non_kernel_IRQs[wp]: + "\\\ doMachineOp (getActiveIRQ True) \\irq _. irq \ Some ` non_kernel_IRQs\" + unfolding doMachineOp_def + by (wpsimp simp: getActiveIRQ_def in_monad split: if_split_asm) + +lemma non_kernel_IRQs_strg: + "invs' s \ irq \ Some ` non_kernel_IRQs \ Q \ + (\y. irq = Some y) \ invs' s \ (the irq \ non_kernel_IRQs \ P) \ Q" + by auto + +lemma ckernel_invs: + "\invs' and + (\s. e \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + callKernel e + \\rs. (\s. ksSchedulerAction s = ResumeCurrentThread) + and (invs' and (ct_running' or ct_idle'))\" + apply (simp add: callKernel_def) + apply (rule hoare_pre) + apply (wp activate_invs' activate_sch_act schedule_sch + schedule_sch_act_simple he_invs' schedule_invs' hoare_vcg_if_lift3 + hoare_drop_imp[where R="\_. kernelExitAssertions"] + | simp add: no_irq_getActiveIRQ + | strengthen non_kernel_IRQs_strg[where Q=True, simplified], simp cong: conj_cong)+ + done + +(* abstract and haskell have identical domain list fields *) +abbreviation valid_domain_list' :: "'a kernel_state_scheme \ bool" where + "valid_domain_list' \ \s. valid_domain_list_2 (ksDomSchedule s)" + +lemmas valid_domain_list'_def = valid_domain_list_2_def + +defs kernelExitAssertions_def: + "kernelExitAssertions s \ 0 < ksDomainTime s \ valid_domain_list' s" + +lemma callKernel_domain_time_left: + "\ \ \ callKernel e \\_ s. 0 < ksDomainTime s \ valid_domain_list' s \" + unfolding callKernel_def kernelExitAssertions_def by wpsimp + +lemma doMachineOp_sch_act_simple: + "doMachineOp f \sch_act_simple\" + by (wp sch_act_simple_lift) + +lemma kernelEntry_invs': + "\ invs' and (\s. e \ Interrupt \ ct_running' s) and + (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + (\s. 0 < ksDomainTime s) and valid_domain_list' \ + kernelEntry e tc + \\rs. (\s. ksSchedulerAction s = ResumeCurrentThread) and + (invs' and (ct_running' or ct_idle')) and + (\s. 0 < ksDomainTime s) and valid_domain_list' \" + apply (simp add: kernelEntry_def) + apply (wp ckernel_invs callKernel_domain_time_left + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp + doMachineOp_ct_in_state' doMachineOp_sch_act_simple + callKernel_domain_time_left + | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' + valid_domain_list'_def)+ + done + +lemma absKState_correct': + "\einvs s; invs' s'; (s,s') \ state_relation\ + \ absKState s' = abs_state s" + apply (intro state.equality, simp_all add: absKState_def abs_state_def) + apply (rule absHeap_correct) + apply (clarsimp simp: valid_state_def valid_pspace_def)+ + apply (clarsimp dest!: state_relationD) + apply (rule absCDT_correct) + apply (clarsimp simp: valid_state_def valid_pspace_def + valid_state'_def valid_pspace'_def)+ + apply (rule absIsOriginalCap_correct, clarsimp+) + apply (simp add: state_relation_def) + apply (simp add: state_relation_def) + apply (clarsimp simp: user_mem_relation invs_def invs'_def) + apply (simp add: state_relation_def) + apply (rule absInterruptIRQNode_correct, simp add: state_relation_def) + apply (rule absInterruptStates_correct, simp add: state_relation_def) + apply (erule absArchState_correct) + apply (rule absExst_correct, simp, assumption+) + done + +lemma ptable_lift_abs_state[simp]: + "ptable_lift t (abs_state s) = ptable_lift t s" + by (simp add: ptable_lift_def abs_state_def) + +lemma ptable_rights_abs_state[simp]: + "ptable_rights t (abs_state s) = ptable_rights t s" + by (simp add: ptable_rights_def abs_state_def) + +lemma ptable_rights_imp_UserData: + assumes invs: "einvs s" and invs': "invs' s'" + assumes rel: "(s,s') : state_relation" + assumes rights: "ptable_rights t (absKState s') x \ {}" + assumes trans: + "ptable_lift t (absKState s') x = Some (AARCH64.addrFromPPtr y)" + shows "pointerInUserData y s' \ pointerInDeviceData y s'" +proof - + from invs invs' rel have [simp]: "absKState s' = abs_state s" + by - (rule absKState_correct', simp_all) + from invs have valid: "valid_state s" by auto + from invs' have valid': "valid_state' s'" by auto + have "in_user_frame y s \ in_device_frame y s " + by (rule ptable_rights_imp_frame[OF valid rights[simplified] + trans[simplified]]) + thus ?thesis + by (auto simp add: pointerInUserData_relation[OF rel valid' valid] + pointerInDeviceData_relation[OF rel valid' valid]) +qed + +definition + "ex_abs G \ \s'. \s. ((s :: (det_ext) state),s') \ state_relation \ G s" + +lemma device_update_invs': + "\invs'\doMachineOp (device_memory_update ds) + \\_. invs'\" + apply (simp add: doMachineOp_def device_memory_update_def simpler_modify_def select_f_def + gets_def get_def bind_def valid_def return_def) + by (clarsimp simp: invs'_def valid_state'_def valid_irq_states'_def valid_machine_state'_def) + +crunches doMachineOp + for ksDomainTime[wp]: "\s. P (ksDomainTime s)" + +lemma doUserOp_invs': + "\invs' and ex_abs einvs and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and + (\s. 0 < ksDomainTime s) and valid_domain_list'\ + doUserOp f tc + \\_. invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and + (\s. 0 < ksDomainTime s) and valid_domain_list'\" + apply (simp add: doUserOp_def split_def ex_abs_def) + apply (wp device_update_invs' doMachineOp_ct_in_state' + | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def + user_memory_update_def))+ + apply (clarsimp simp: user_memory_update_def simpler_modify_def + restrict_map_def + split: option.splits) + apply (frule ptable_rights_imp_UserData[rotated 2], auto) + done + + +text \The top-level correspondence\ + +lemma None_drop: + "P \ x = None \ P" + by simp + +lemma Ex_Some_conv: + "((\y. x = Some y) \ P x) = (\y. x = Some y \ P (Some y))" + by auto + + +lemma kernel_corres': + "corres dc (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) + and (\s. scheduler_action s = resume_cur_thread)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (call_kernel event) + (do _ \ runExceptT $ + handleEvent event `~catchError~` + (\_. withoutPreemption $ do + irq <- doMachineOp (getActiveIRQ True); + when (isJust irq) $ handleInterrupt (fromJust irq) + od); + _ \ ThreadDecls_H.schedule; + activateThread + od)" + unfolding call_kernel_def callKernel_def + apply (simp add: call_kernel_def callKernel_def) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_split_handle[OF handleEvent_corres]) + apply simp + apply (rule corres_split[OF corres_machine_op]) + apply (rule corres_underlying_trivial) + apply (rule no_fail_getActiveIRQ) + apply clarsimp + apply (rule_tac x=irq in option_corres) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply (simp add: when_def) + apply (rule corres_when[simplified dc_def], simp) + apply simp + apply (rule handleInterrupt_corres[simplified dc_def]) + apply simp + apply (wp hoare_drop_imps hoare_vcg_all_lift)[1] + apply simp + apply (rule_tac Q="\irq s. invs' s \ + (\irq'. irq = Some irq' \ + intStateIRQTable (ksInterruptState s ) irq' \ + IRQInactive)" + in hoare_post_imp) + apply simp + apply (wp doMachineOp_getActiveIRQ_IRQ_active handle_event_valid_sched | simp)+ + apply (rule_tac Q="\_. \" and E="\_. invs'" in hoare_post_impErr) + apply wpsimp+ + apply (simp add: invs'_def valid_state'_def) + apply (rule corres_split[OF schedule_corres]) + apply (rule activateThread_corres) + apply (wp schedule_invs' hoare_vcg_if_lift2 dmo_getActiveIRQ_non_kernel + | simp cong: rev_conj_cong | strengthen None_drop | subst Ex_Some_conv)+ + apply (rule_tac Q="\_. valid_sched and invs and valid_list" and + E="\_. valid_sched and invs and valid_list" + in hoare_post_impErr) + apply (wp handle_event_valid_sched hoare_vcg_imp_lift' |simp)+ + apply (wp handle_event_valid_sched hoare_vcg_if_lift3 + | simp + | strengthen non_kernel_IRQs_strg[where Q=True, simplified], simp cong: conj_cong)+ + apply (clarsimp simp: active_from_running) + apply (clarsimp simp: active_from_running') + done + +lemma kernel_corres: + "corres dc (einvs and (\s. event \ Interrupt \ ct_running s) and (ct_running or ct_idle) and + (\s. scheduler_action s = resume_cur_thread) and + (\s. 0 < domain_time s \ valid_domain_list s)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (call_kernel event) (callKernel event)" + unfolding callKernel_def K_bind_def + apply (rule corres_guard_imp) + apply (rule corres_add_noop_lhs2) + apply (simp only: bind_assoc[symmetric]) + apply (rule corres_split[where r'=dc and + R="\_ s. 0 < domain_time s \ valid_domain_list s" and + R'="\_. \"]) + apply (simp only: bind_assoc) + apply (rule kernel_corres') + apply (rule corres_bind_return2, rule corres_stateAssert_assume_stronger) + apply simp + apply (simp add: kernelExitAssertions_def state_relation_def) + apply (wp call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext) + apply wp + apply clarsimp + apply clarsimp + done + +lemma user_mem_corres: + "corres (=) invs invs' (gets (\x. g (user_mem x))) (gets (\x. g (user_mem' x)))" + by (clarsimp simp add: gets_def get_def return_def bind_def + invs_def invs'_def + corres_underlying_def user_mem_relation) + +lemma device_mem_corres: + "corres (=) invs invs' (gets (\x. g (device_mem x))) (gets (\x. g (device_mem' x)))" + by (clarsimp simp add: gets_def get_def return_def bind_def + invs_def invs'_def + corres_underlying_def device_mem_relation) + +lemma entry_corres: + "corres (=) (einvs and (\s. event \ Interrupt \ ct_running s) and + (\s. 0 < domain_time s) and valid_domain_list and (ct_running or ct_idle) and + (\s. scheduler_action s = resume_cur_thread)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and + (\s. 0 < ksDomainTime s) and valid_domain_list' and (ct_running' or ct_idle') and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (kernel_entry event tc) (kernelEntry event tc)" + apply (simp add: kernel_entry_def kernelEntry_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split) + apply simp + apply (rule threadset_corresT) + apply (simp add: tcb_relation_def arch_tcb_relation_def + arch_tcb_context_set_def atcbContextSet_def) + apply (clarsimp simp: tcb_cap_cases_def cteSizeBits_def) + apply (clarsimp simp: tcb_cte_cases_def cteSizeBits_def) + apply (simp add: exst_same_def) + apply (rule corres_split[OF kernel_corres]) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def arch_tcb_relation_def + arch_tcb_context_get_def atcbContextGet_def) + apply wp+ + apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, + simp add: invs_def valid_state_def valid_pspace_def cur_tcb_def) + apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) + apply (wp thread_set_invs_trivial thread_set_ct_running + threadSet_invs_trivial threadSet_ct_running' + thread_set_not_state_valid_sched hoare_weak_lift_imp + hoare_vcg_disj_lift ct_in_state_thread_state_lift + | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state + | (wps, wp threadSet_st_tcb_at2) )+ + apply (clarsimp simp: invs_def cur_tcb_def valid_state_def valid_pspace_def) + apply (clarsimp simp: ct_in_state'_def) + done + +lemma corres_gets_machine_state: + "corres (=) \ \ (gets (f \ machine_state)) (gets (f \ ksMachineState))" + by (clarsimp simp: gets_def corres_underlying_def + in_monad bind_def get_def return_def state_relation_def) + +lemma do_user_op_corres: + "corres (=) (einvs and ct_running) + (invs' and (%s. ksSchedulerAction s = ResumeCurrentThread) and + ct_running') + (do_user_op f tc) (doUserOp f tc)" + apply (simp add: do_user_op_def doUserOp_def split_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule_tac r'="(=)" and P=einvs and P'=invs' in corres_split) + apply (fastforce dest: absKState_correct [rotated]) + apply (rule_tac r'="(=)" and P=einvs and P'=invs' in corres_split) + apply (fastforce dest: absKState_correct [rotated]) + apply (rule_tac r'="(=)" and P=invs and P'=invs' in corres_split) + apply (rule user_mem_corres) + apply (rule_tac r'="(=)" and P=invs and P'=invs' in corres_split) + apply (rule device_mem_corres) + apply (rule_tac r'="(=)" in corres_split) + apply (rule corres_gets_machine_state) + apply (rule_tac F = "dom (rvb \ addrFromPPtr) \ - dom rvd" in corres_gen_asm) + apply (rule_tac F = "dom (rvc \ addrFromPPtr) \ dom rvd" in corres_gen_asm) + apply simp + apply (rule_tac r'="(=)" in corres_split[OF corres_select]) + apply simp + apply (rule corres_underlying_split[OF corres_machine_op]) + apply simp + apply (rule corres_underlying_trivial) + apply (simp add: user_memory_update_def) + apply (wp | simp)+ + apply (rule corres_underlying_split[OF corres_machine_op,where Q = dc and Q'=dc]) + apply (rule corres_underlying_trivial) + apply (wp | simp add: dc_def device_memory_update_def)+ + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def) + apply fastforce + done + +lemma ct_running_related: + "\ (a, c) \ state_relation; ct_running' c \ + \ ct_running a" + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + curthread_relation) + apply (frule(1) st_tcb_at_coerce_abstract) + apply (erule st_tcb_weakenE) + apply (case_tac st, simp_all)[1] + done + +lemma ct_idle_related: + "\ (a, c) \ state_relation; ct_idle' c \ + \ ct_idle a" + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + curthread_relation) + apply (frule(1) st_tcb_at_coerce_abstract) + apply (erule st_tcb_weakenE) + apply (case_tac st, simp_all)[1] + done + +definition + "full_invs' \ {((tc,s),m,e). invs' s \ + ex_abs (einvs::det_ext state \ bool) s \ + ksSchedulerAction s = ResumeCurrentThread \ + (ct_running' s \ ct_idle' s) \ + (m = KernelMode \ e \ None) \ + (m = UserMode \ ct_running' s) \ + (m = IdleMode \ ct_idle' s) \ + (e \ None \ e \ Some Interrupt \ ct_running' s) \ + 0 < ksDomainTime s \ valid_domain_list' s}" + +lemma check_active_irq_corres': + "corres (=) \ \ (check_active_irq) (checkActiveIRQ)" + apply (simp add: check_active_irq_def checkActiveIRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_machine_op[OF corres_underlying_trivial], where R="\_. \" and R'="\_. \"]) + apply simp + apply (rule no_fail_getActiveIRQ) + apply (wp | simp )+ + done + +lemma check_active_irq_corres: + "corres (=) + (invs and (ct_running or ct_idle) and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) + and (\s. 0 < ksDomainTime s) and valid_domain_list' and (ct_running' or ct_idle')) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_just_running_corres: + "corres (=) + (invs and ct_running and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and ct_running' + and (\s. 0 < ksDomainTime s) and valid_domain_list' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_just_idle_corres: + "corres (=) + (invs and ct_idle and einvs and (\s. scheduler_action s = resume_cur_thread) + and (\s. 0 < domain_time s) and valid_domain_list) + (invs' and ct_idle' + and (\s. 0 < ksDomainTime s) and valid_domain_list' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (check_active_irq) (checkActiveIRQ)" + apply (rule corres_guard_imp) + apply (rule check_active_irq_corres', auto) + done + +lemma checkActiveIRQ_invs': + "\invs' and ex_abs invs and (ct_running' or ct_idle') + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and (ct_running' or ct_idle') + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def ex_abs_def) + apply (wp dmo_invs' | simp)+ + done + +lemma checkActiveIRQ_invs'_just_running: + "\invs' and ex_abs invs and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def) + apply (wp | simp)+ + done + +lemma checkActiveIRQ_invs'_just_idle: + "\invs' and ex_abs invs and ct_idle' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\ + checkActiveIRQ + \\_. invs' and ct_idle' + and (\s. ksSchedulerAction s = ResumeCurrentThread)\" + apply (simp add: checkActiveIRQ_def) + apply (wp | simp)+ + done + +lemma sched_act_rct_related: + "\ (a, c) \ state_relation; ksSchedulerAction c = ResumeCurrentThread\ + \ scheduler_action a = resume_cur_thread" + by (case_tac "scheduler_action a", simp_all add: state_relation_def) + +lemma domain_time_rel_eq: + "(a, c) \ state_relation \ P (ksDomainTime c) = P (domain_time a)" + by (clarsimp simp: state_relation_def) + +lemma domain_list_rel_eq: + "(a, c) \ state_relation \ P (ksDomSchedule c) = P (domain_list a)" + by (clarsimp simp: state_relation_def) + +crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' + (wp: crunch_wps) + +lemma ckernel_invariant: + "ADT_H uop \ full_invs'" + unfolding full_invs'_def + supply word_neq_0_conv[simp] + supply domain_time_rel_eq[simp] domain_list_rel_eq[simp] + apply (rule invariantI) + apply (clarsimp simp add: ADT_H_def) + apply (subst conj_commute, simp) + apply (rule conjI) + apply (frule init_refinement[simplified subset_eq, THEN bspec]) + apply (clarsimp simp: ex_abs_def lift_state_relation_def) + apply (frule akernel_init_invs[THEN bspec]) + apply (rule_tac x = s in exI) + apply (clarsimp simp: Init_A_def) + apply (insert ckernel_init_invs)[1] + apply clarsimp + apply (frule ckernel_init_sch_norm) + apply (frule ckernel_init_ctr) + apply (frule ckernel_init_domain_time) + apply (frule ckernel_init_domain_list) + apply (fastforce simp: Init_H_def valid_domain_list'_def) + apply (clarsimp simp: ADT_A_def ADT_H_def global_automaton_def) + + apply (erule_tac P="a \ (\x. b x)" for a b in disjE) + + apply (clarsimp simp: kernel_call_H_def) + + apply (drule use_valid[OF _ valid_corres_combined + [OF kernel_entry_invs entry_corres], + OF _ kernelEntry_invs'[THEN hoare_weaken_pre]]) + apply fastforce + apply (fastforce simp: ex_abs_def sch_act_simple_def ct_running_related ct_idle_related + sched_act_rct_related) + apply (clarsimp simp: kernel_call_H_def) + apply (fastforce simp: ex_abs_def sch_act_simple_def ct_running_related ct_idle_related + sched_act_rct_related) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp add: do_user_op_H_def monad_to_transition_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule doUserOp_valid_objs') + apply (rule valid_corres_combined[OF do_user_op_invs2 corres_guard_imp2[OF do_user_op_corres]]) + apply clarsimp + apply (rule doUserOp_invs'[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (clarsimp simp: invs_valid_objs' ex_abs_def, rule_tac x=s in exI, + clarsimp simp: ct_running_related sched_act_rct_related) + apply (clarsimp simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + + apply (erule_tac P="a \ b \ c \ (\x. d x)" for a b c d in disjE) + apply (clarsimp simp add: do_user_op_H_def monad_to_transition_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule doUserOp_valid_objs') + apply (rule valid_corres_combined[OF do_user_op_invs2 corres_guard_imp2[OF do_user_op_corres]]) + apply clarsimp + apply (rule doUserOp_invs'[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs_just_running checkActiveIRQ_just_running_corres]) + apply (rule checkActiveIRQ_invs'_just_running[THEN hoare_weaken_pre]) + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs_just_idle checkActiveIRQ_just_idle_corres]) + apply (rule checkActiveIRQ_invs'_just_idle[THEN hoare_weaken_pre]) + apply clarsimp + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_idle_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + + apply (clarsimp simp: check_active_irq_H_def) + apply (drule use_valid) + apply (rule hoare_vcg_conj_lift) + apply (rule checkActiveIRQ_valid_objs') + apply (rule valid_corres_combined[OF check_active_irq_invs check_active_irq_corres]) + apply (rule checkActiveIRQ_invs'[THEN hoare_weaken_pre]) + apply clarsimp + apply (fastforce simp: ex_abs_def) + apply (fastforce simp: ex_abs_def ct_running_related ct_idle_related sched_act_rct_related) + apply (fastforce simp: ex_abs_def) + done + +text \The top-level theorem\ + +lemma fw_sim_A_H: + "LI (ADT_A uop) + (ADT_H uop) + (lift_state_relation state_relation) + (full_invs \ full_invs')" + apply (unfold LI_def full_invs_def full_invs'_def) + apply (simp add: ADT_H_def ADT_A_def) + apply (intro conjI) + apply (rule init_refinement) + apply (clarsimp simp: rel_semi_def relcomp_unfold in_lift_state_relation_eq) + apply (rename_tac tc ak m ev tc' ck' m' ev' ck) + apply (simp add: global_automaton_def) + + apply (erule_tac P="a \ (\x. b x)" for a b in disjE) + apply (clarsimp simp add: kernel_call_H_def kernel_call_A_def) + apply (rule rev_mp, rule_tac tc=tc and event=x in entry_corres) + apply (clarsimp simp: corres_underlying_def) + apply (drule (1) bspec) + apply (clarsimp simp: sch_act_simple_def) + apply (drule (1) bspec) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule_tac x=b in exI) + apply (rule conjI) + apply (rule impI, simp) + apply (frule (2) ct_running_related) + apply clarsimp + apply (rule_tac x=b in exI) + apply (drule use_valid, rule kernelEntry_invs') + apply (simp add: sch_act_simple_def) + apply clarsimp + apply (frule (1) ct_idle_related) + apply (clarsimp simp: ct_in_state_def st_tcb_at_def obj_at_def) + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: do_user_op_H_def do_user_op_A_def monad_to_transition_def) + apply (rule rev_mp, rule_tac tc1=tc and f1=uop and P="ct_running and einvs" in corres_guard_imp2[OF do_user_op_corres]) + apply simp + apply (clarsimp simp add: corres_underlying_def) + apply (drule (1) bspec, clarsimp) + apply (drule (1) bspec, clarsimp) + apply fastforce + + apply (erule_tac P="a \ b \ c \ (\x. d x)" for a b c d in disjE) + apply (clarsimp simp: do_user_op_H_def do_user_op_A_def monad_to_transition_def) + apply (rule rev_mp, rule_tac tc1=tc and f1=uop and P="ct_running and einvs" in corres_guard_imp2[OF do_user_op_corres]) + apply simp + apply (clarsimp simp add: corres_underlying_def) + apply (drule (1) bspec, clarsimp) + apply (drule (1) bspec, clarsimp) + apply fastforce + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (erule_tac P="a \ b" for a b in disjE) + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (clarsimp simp: check_active_irq_H_def check_active_irq_A_def) + apply (rule rev_mp, rule check_active_irq_corres) + apply (clarsimp simp: corres_underlying_def) + apply fastforce + + apply (clarsimp simp: absKState_correct dest!: lift_state_relationD) + done + +theorem refinement: + "ADT_H uop \ ADT_A uop" + apply (rule sim_imp_refines) + apply (rule L_invariantI) + apply (rule akernel_invariant) + apply (rule ckernel_invariant) + apply (rule fw_sim_A_H) + done + +end + +end diff --git a/proof/refine/AARCH64/Retype_R.thy b/proof/refine/AARCH64/Retype_R.thy new file mode 100644 index 0000000000..0df5280d1d --- /dev/null +++ b/proof/refine/AARCH64/Retype_R.thy @@ -0,0 +1,5300 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Retype refinement +*) + +theory Retype_R +imports VSpace_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + APIType_map2 :: "kernel_object + AARCH64_H.object_type \ Structures_A.apiobject_type" +where + "APIType_map2 ty \ case ty of + Inr (APIObjectType ArchTypes_H.Untyped) \ Structures_A.Untyped + | Inr (APIObjectType ArchTypes_H.TCBObject) \ Structures_A.TCBObject + | Inr (APIObjectType ArchTypes_H.EndpointObject) \ Structures_A.EndpointObject + | Inr (APIObjectType ArchTypes_H.NotificationObject) \ Structures_A.NotificationObject + | Inr (APIObjectType ArchTypes_H.CapTableObject) \ Structures_A.CapTableObject + | Inr LargePageObject \ ArchObject LargePageObj + | Inr HugePageObject \ ArchObject HugePageObj + | Inr PageTableObject \ ArchObject PageTableObj + | Inr VSpaceObject \ ArchObject VSpaceObj + | Inl (KOArch (KOASIDPool _)) \ ArchObject ASIDPoolObj + | Inr VCPUObject \ ArchObject AARCH64_A.VCPUObj + | _ \ ArchObject SmallPageObj" + +lemma placeNewObject_def2: + "placeNewObject ptr val gb = createObjects' ptr 1 (injectKO val) gb" + apply (clarsimp simp:placeNewObject_def placeNewObject'_def + createObjects'_def shiftL_nat) + done + +lemma createObjects_ret: + "\n < 2^word_bits;n\ 0\ \ + \\\ createObjects y n ko gbits + \\r s. r = map (\p. ptr_add y (p * 2 ^ objBitsKO ko * 2 ^ gbits)) + [0..< n]\" + unfolding createObjects_def createObjects'_def + apply (simp add: split_def) + apply (wp|simp cong: if_cong)+ + apply (clarsimp simp: ptr_add_def upto_enum_def o_def + unat_sub word_le_nat_alt + power_sub[symmetric] + objBits_def[symmetric] + simp del: upt_Suc) + apply (clarsimp simp: unat_of_nat_minus_1 word_bits_def + shiftl_t2n power_add) + done + +lemma objBitsKO_bounded2[simp]: + "objBitsKO ko < word_bits" + by (simp add: objBits_simps' word_bits_def pageBits_def pte_bits_def word_size_bits_def + split: kernel_object.split arch_kernel_object.split) + +definition + APIType_capBits :: "AARCH64_H.object_type \ nat \ nat" +where + "APIType_capBits ty us \ case ty of + APIObjectType ArchTypes_H.Untyped \ us + | APIObjectType ArchTypes_H.TCBObject \ objBits (makeObject :: tcb) + | APIObjectType ArchTypes_H.EndpointObject \ objBits (makeObject :: endpoint) + | APIObjectType ArchTypes_H.NotificationObject \ objBits (makeObject :: Structures_H.notification) + | APIObjectType ArchTypes_H.CapTableObject \ objBits (makeObject :: cte) + us + | SmallPageObject \ pageBitsForSize ARMSmallPage + | LargePageObject \ pageBitsForSize ARMLargePage + | HugePageObject \ pageBitsForSize ARMHugePage + | PageTableObject \ ptBits NormalPT_T + | VSpaceObject \ ptBits VSRootPT_T + | VCPUObject \ vcpuBits" + +definition + makeObjectKO :: "bool \ (kernel_object + AARCH64_H.object_type) \ kernel_object" +where + "makeObjectKO dev ty \ case ty of + Inl KOUserData \ Some KOUserData + | Inl (KOArch (KOASIDPool _)) \ Some (KOArch (KOASIDPool makeObject)) + | Inl (KOArch (KOVCPU _)) \ Some (KOArch (KOVCPU makeObject)) + | Inr VCPUObject \ Some (KOArch (KOVCPU makeObject)) + | Inr (APIObjectType ArchTypes_H.TCBObject) \ Some (KOTCB makeObject) + | Inr (APIObjectType ArchTypes_H.EndpointObject) \ Some (KOEndpoint makeObject) + | Inr (APIObjectType ArchTypes_H.NotificationObject) \ Some (KONotification makeObject) + | Inr (APIObjectType ArchTypes_H.CapTableObject) \ Some (KOCTE makeObject) + | Inr PageTableObject \ Some (KOArch (KOPTE makeObject)) + | Inr VSpaceObject \ Some (KOArch (KOPTE makeObject)) + | Inr SmallPageObject \ Some (if dev then KOUserDataDevice else KOUserData) + | Inr LargePageObject \ Some(if dev then KOUserDataDevice else KOUserData) + | Inr HugePageObject \ Some (if dev then KOUserDataDevice else KOUserData) + | _ \ None" + +text \makeObject etc. lemmas\ + +lemma NullCap_valid' [iff]: "s \' capability.NullCap" + unfolding valid_cap'_def by simp + +lemma valid_obj_makeObject_cte [simp]: + "valid_obj' (KOCTE makeObject) s" + unfolding valid_obj'_def valid_cte'_def + by (clarsimp simp: makeObject_cte) + +lemma valid_obj_makeObject_tcb [simp]: + "valid_obj' (KOTCB makeObject) s" + unfolding valid_obj'_def valid_tcb'_def valid_tcb_state'_def valid_arch_tcb'_def + by (clarsimp simp: makeObject_tcb makeObject_cte tcb_cte_cases_def minBound_word newArchTCB_def + cteSizeBits_def) + +lemma valid_obj_makeObject_endpoint [simp]: + "valid_obj' (KOEndpoint makeObject) s" + unfolding valid_obj'_def valid_ep'_def + by (clarsimp simp: makeObject_endpoint) + +lemma valid_obj_makeObject_notification [simp]: + "valid_obj' (KONotification makeObject) s" + unfolding valid_obj'_def valid_ntfn'_def + by (clarsimp simp: makeObject_notification) + +lemma valid_obj_makeObject_user_data [simp]: + "valid_obj' (KOUserData) s" + unfolding valid_obj'_def by simp + +lemma valid_obj_makeObject_user_data_device [simp]: + "valid_obj' (KOUserDataDevice) s" + unfolding valid_obj'_def by simp + +lemma valid_obj_makeObject_pte[simp]: + "valid_obj' (KOArch (KOPTE makeObject)) s" + unfolding valid_obj'_def by (simp add: makeObject_pte) + +lemma valid_obj_makeObject_asid_pool[simp]: + "valid_obj' (KOArch (KOASIDPool makeObject)) s" + unfolding valid_obj'_def + by (simp add: makeObject_asidpool Let_def ran_def dom_def) + +lemma valid_obj_makeObject_vcpu[simp]: + "valid_obj' (KOArch (KOVCPU makeObject)) s" + unfolding valid_obj'_def + by (simp add: makeObject_vcpu makeVCPUObject_def) + +lemmas valid_obj_makeObject_rules = + valid_obj_makeObject_user_data valid_obj_makeObject_tcb + valid_obj_makeObject_endpoint valid_obj_makeObject_notification + valid_obj_makeObject_cte valid_obj_makeObject_pte + valid_obj_makeObject_asid_pool valid_obj_makeObject_user_data_device + valid_obj_makeObject_vcpu + +text \On the abstract side\ + +text \Lemmas for createNewObjects etc.\ + +lemma pspace_dom_upd: + assumes orth: "set as \ dom ps = {}" + shows "pspace_dom (foldr (\p ps. ps(p \ ko)) as ps) = + pspace_dom ps \ (\x \ set as. fst ` obj_relation_cuts ko x)" + using orth + apply (subst foldr_upd_app_if) + apply (rule set_eqI, simp add: pspace_dom_def) + apply (rule iffI) + apply (clarsimp split: if_split_asm) + apply (rule rev_bexI, erule domI) + apply (fastforce simp: image_def) + apply (erule disjE) + apply clarsimp + apply (rule rev_bexI) + apply (clarsimp simp: domIff) + apply (erule exI) + apply clarsimp + apply (intro conjI impI) + apply (drule equals0D, erule notE, erule IntI, erule domI) + apply (fastforce simp: image_def) + apply clarsimp + apply (rule rev_bexI) + apply (clarsimp simp: domIff) + apply (erule(1) notE) + apply clarsimp + apply (fastforce simp: image_def) + done + +definition + "new_cap_addrs \ \n ptr ko. map (\p. ptr + ((of_nat p :: machine_word) << (objBitsKO ko))) + [0 ..< n]" + +definition + null_filter' :: "('a \ cte) \ ('a \ cte)" +where + "null_filter' f \ \x. if f x = Some (CTE NullCap nullMDBNode) then None else f x" + +lemma across_null_filter_eq': + assumes eq: "null_filter' xs = null_filter' ys" + shows "\ xs x = Some v; ys x = Some v \ R; + \ v = CTE NullCap nullMDBNode; ys x = None \ \ R \ + \ R" + apply (cases "null_filter' xs x") + apply (subgoal_tac "null_filter' ys x = None") + apply (simp add: null_filter'_def split: if_split_asm) + apply (simp add: eq) + apply (subgoal_tac "null_filter' ys x = Some a") + apply (simp add: null_filter'_def split: if_split_asm) + apply (simp add: eq) + done + +lemma null_filter_parent_of'': + "\ null_filter' xs = null_filter' ys; xs \ x \ c; c \ 0 \ + \ ys \ x \ c" + apply (clarsimp simp add: mdb_next_unfold) + apply (drule arg_cong[where f="\xs. xs x"]) + apply (simp add: null_filter'_def nullPointer_def split: if_split_asm) + done + +lemma null_filter_parentOf: + "\ null_filter' xs = null_filter' ys; xs \ x parentOf y \ + \ ys \ x parentOf y" + apply (clarsimp simp add: parentOf_def) + apply (rule across_null_filter_eq'[where x=x], assumption+) + apply (erule(1) across_null_filter_eq') + apply clarsimp + apply simp + apply simp + done + +lemma null_filter_descendant: + "\ null_filter' xs = null_filter' ys; xs \ x \ y \ + \ ys \ x \ y" + apply (erule subtree.induct) + apply (rule subtree.direct_parent) + apply (erule(2) null_filter_parent_of'') + apply assumption + apply (erule(1) null_filter_parentOf) + apply (erule subtree.trans_parent) + apply (erule(2) null_filter_parent_of'') + apply assumption + apply (erule(1) null_filter_parentOf) + done + +lemma null_filter_descendants_of': + "null_filter' xs = null_filter' ys + \ descendants_of' x xs = descendants_of' x ys" + apply (simp add: descendants_of'_def) + apply (rule set_eqI, rule iffI) + apply simp + apply (erule(1) null_filter_descendant) + apply simp + apply (erule(1) null_filter_descendant[OF sym]) + done + +lemma descendants_of_cte_at': + "\ p \ descendants_of x (cdt s); valid_mdb s \ + \ cte_wp_at (\c. c \ cap.NullCap) p s" + apply (simp add: descendants_of_def) + apply (drule tranclD2) + apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def + simp del: split_paired_All) + apply (fastforce elim: cte_wp_at_weakenE) + done + + +lemma descendants_of_cte_at2': + "\ p \ descendants_of x (cdt s); valid_mdb s \ + \ cte_wp_at (\c. c \ cap.NullCap) x s" + apply (simp add: descendants_of_def) + apply (drule tranclD) + apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def + simp del: split_paired_All) + apply (fastforce elim: cte_wp_at_weakenE) + done + +lemma cte_at_next_slot'': + notes split_paired_All[simp del] split_paired_Ex[simp del] + shows "\valid_list s; valid_mdb s; finite_depth (cdt s)\ + \ next_slot p (cdt_list s) (cdt s) = Some n \ cte_wp_at (\c. c \ cap.NullCap) p s" + apply(simp add: next_slot_def) + apply(simp split: if_split_asm) + apply(drule next_childD, simp) + apply(rule_tac p=n in descendants_of_cte_at2') + apply(simp add: child_descendant) + apply(simp) + apply(subgoal_tac "next_not_child_dom (p, cdt_list s, cdt s)") + prefer 2 + apply(simp add: next_not_child_termination valid_mdb_def valid_list_def) + apply(simp split: if_split_asm) + apply(case_tac "cdt s p") + apply(simp) + apply(rule descendants_of_cte_at') + apply(simp add: descendants_of_def cdt_parent_defs) + apply(rule r_into_trancl, simp) + apply(simp) + apply(drule next_sibD) + apply(elim exE conjE) + apply(drule after_in_list_in_list) + apply(rule descendants_of_cte_at') + apply(simp add: descendants_of_def cdt_parent_defs) + apply(rule r_into_trancl, simp) + apply(simp) + done + + +lemma state_relation_null_filterE: + "\ (s, s') \ state_relation; t = kheap_update f (ekheap_update ef s); + \f' g' h' pt_fn'. + t' = s'\ksPSpace := f' (ksPSpace s'), gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := (ksArchState s') \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\; + null_filter (caps_of_state t) = null_filter (caps_of_state s); + null_filter' (ctes_of t') = null_filter' (ctes_of s'); + pspace_relation (kheap t) (ksPSpace t'); + ekheap_relation (ekheap t) (ksPSpace t'); + ghost_relation (kheap t) (gsUserPages t') (gsCNodes t') (gsPTTypes (ksArchState t')); + valid_list s; + pspace_aligned' s'; pspace_distinct' s'; valid_objs s; valid_mdb s; + pspace_aligned' t'; pspace_distinct' t'; + mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s) \ + \ (t, t') \ state_relation" + apply (clarsimp simp: state_relation_def) + apply (intro conjI) + apply (simp add: cdt_relation_def cte_wp_at_caps_of_state) + apply (elim allEI) + apply clarsimp + apply (erule(1) across_null_filter_eq) + apply simp + apply (rule null_filter_descendants_of', simp) + apply simp + apply (case_tac "cdt s (a, b)") + apply (subst mdb_cte_at_no_descendants, assumption) + apply (simp add: cte_wp_at_caps_of_state swp_def) + apply (cut_tac s="kheap_update f (ekheap_update ef s)" and + s'="s'\ksPSpace := f' (ksPSpace s'), + gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := ksArchState s' \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\" + in pspace_relation_ctes_ofI, simp_all)[1] + apply (simp add: trans_state_update[symmetric] del: trans_state_update) + apply (erule caps_of_state_cteD) + apply (clarsimp simp: descendants_of'_def) + apply (case_tac cte) + apply (erule Null_not_subtree[rotated]) + apply simp + apply (drule(1) mdb_cte_atD) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply(simp add: cdt_list_relation_def cte_wp_at_caps_of_state) + apply(elim allEI) + apply(clarsimp) + apply(case_tac "next_slot (a, b) (cdt_list (s)) (cdt s)") + apply(simp) + apply(subgoal_tac "cte_wp_at (\c. c \ cap.NullCap) (a, b) s") + apply(drule_tac f="\cs. cs (a, b)" in arg_cong) + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(clarsimp simp: null_filter_def split: if_split_asm) + apply(drule_tac f="\ctes. ctes (cte_map (a, b))" in arg_cong) + apply(simp add: null_filter'_def cte_wp_at_ctes_of split: if_split_asm) + apply(frule pspace_relation_cte_wp_at) + apply(simp add: cte_wp_at_caps_of_state) + apply(simp) + apply(simp) + apply(simp add: cte_wp_at_ctes_of) + apply (simp add: mdb_cte_at_def) + apply(frule finite_depth) + apply(frule(3) cte_at_next_slot'') + apply simp + apply (simp add: revokable_relation_def) + apply (elim allEI, rule impI, drule(1) mp, elim allEI) + apply (clarsimp elim!: null_filterE) + apply (drule(3) pspace_relation_cte_wp_at [OF _ caps_of_state_cteD]) + apply (drule_tac f="\ctes. ctes (cte_map (a, b))" in arg_cong) + apply (clarsimp simp: null_filter'_def cte_wp_at_ctes_of + split: if_split_asm) + apply (simp add: arch_state_relation_def) + done + +lemma lookupAround2_pspace_no: + "is_aligned ptr sz \ + (case fst (lookupAround2 (ptr + 2 ^ sz - 1) ps) of None \ return () + | Some (x, y) \ haskell_assert (x < fromPPtr ptr) []) + = assert ({ptr..ptr + 2 ^ sz - 1} \ dom ps = {})" + apply (simp add: assert_def split: option.split) + apply safe + apply (clarsimp simp: lookupAround2_None1) + apply (clarsimp simp: lookupAround2_char1) + apply (clarsimp simp: lookupAround2_char1) + apply (drule_tac a=a in equals0D) + apply (simp add: linorder_not_less) + apply fastforce + done + +lemma pspace_no_overlap_disjoint': + "\pspace_aligned' s;pspace_no_overlap' x n s\ + \ {x .. (x && ~~ mask n) + 2 ^ n - 1} \ dom (ksPSpace s) = {}" + unfolding pspace_no_overlap'_def + apply (rule disjointI) + apply (rule ccontr) + apply (clarsimp simp: mask_def add_diff_eq) + apply (elim allE impE notE) + apply (simp add:field_simps)+ + apply (erule(2) order_trans[OF _ is_aligned_no_overflow,OF _ pspace_alignedD']) + apply (erule(1) is_aligned_no_overflow[OF pspace_alignedD']) + apply (erule order_trans) + apply (simp add:p_assoc_help) +done + +lemma foldr_update_ko_wp_at': + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + shows + "ko_wp_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if p \ set addrs then P obj + else ko_wp_at' P p s)" + (is "ko_wp_at' P p ?s' = ?Q") + apply (clarsimp simp: ko_wp_at'_def al) + apply (intro conjI impI) + apply safe[1] + apply (rule pspace_distinctD' [OF _ pv'(2)]) + apply simp + apply safe[1] + apply (simp add: ps_clear_def dom_if_Some) + apply blast + apply simp + apply (rule pspace_distinctD' [OF _ pv'(2)]) + apply simp + done + +lemma foldr_update_obj_at': + assumes pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + shows + "obj_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if p \ set addrs then (\obj'. projectKO_opt obj = Some obj' \ P obj') + else obj_at' P p s)" + apply (simp only: obj_at'_real_def) + apply (rule foldr_update_ko_wp_at' [OF pv pv' al]) + done + +lemma makeObjectKO_eq: + assumes x: "makeObjectKO dev tp = Some v" + shows + "(v = KOCTE cte) = + (tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ cte = makeObject)" + "(v = KOTCB tcb) = + (tp = Inr (APIObjectType ArchTypes_H.TCBObject) \ tcb = makeObject)" + using x + by (simp add: makeObjectKO_def eq_commute + split: apiobject_type.split_asm sum.split_asm kernel_object.split_asm + AARCH64_H.object_type.split_asm arch_kernel_object.split_asm)+ + +lemma pspace_no_overlap_base': + "\pspace_aligned' s;pspace_no_overlap' x n s; is_aligned x n \ \ ksPSpace s x = None" + apply (drule(1) pspace_no_overlap_disjoint') + apply (drule equals0D[where a=x]) + apply (rule ccontr, clarsimp) + apply (erule is_aligned_get_word_bits) + apply (erule impE) + apply (frule mask_out_add_aligned[where q = 0,simplified,symmetric]) + apply (fastforce simp add: is_aligned_no_overflow) + apply clarsimp+ + done + +lemma the_ctes_makeObject: + "fst (the (tcb_cte_cases n)) makeObject + = (if tcb_cte_cases n = None + then fst (the None :: (Structures_H.tcb \ cte) \ ((cte \ cte) \ Structures_H.tcb \ Structures_H.tcb)) + (makeObject :: tcb) + else makeObject)" + apply (simp add: makeObject_tcb) + apply (clarsimp simp: tcb_cte_cases_def) + done + +lemma cte_wp_at_obj_cases_mask: + "cte_wp_at' P p s = + (obj_at' P p s \ + (p && mask tcbBlockSizeBits \ dom tcb_cte_cases + \ obj_at' (P \ fst (the (tcb_cte_cases (p && mask tcbBlockSizeBits)))) + (p && ~~ mask tcbBlockSizeBits) s))" + apply (simp add: cte_wp_at_obj_cases') + apply (rule arg_cong [where f="\x. F \ x" for F]) + apply (rule iffI) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (frule(1) tcb_cte_cases_aligned_helpers) + apply fastforce + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (rule bexI[where x="p && mask tcbBlockSizeBits"]) + apply (clarsimp simp: subtract_mask) + apply fastforce + done + +lemma ps_clearD: + "\ ps_clear x n s; ksPSpace s y = Some v; x < y; y \ x + 2 ^ n - 1 \ \ False" + apply (clarsimp simp: ps_clear_def) + apply (drule_tac a=y in equals0D) + apply (simp add: dom_def mask_def add_diff_eq) + apply fastforce + done + +lemma cte_wp_at_retype': + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "cte_wp_at' P p (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s) + = (if tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ p \ set addrs + \ tp = Inr (APIObjectType ArchTypes_H.TCBObject) + \ (p && ~~ mask tcbBlockSizeBits \ set addrs) \ (p && mask tcbBlockSizeBits \ dom tcb_cte_cases) + then P (CTE NullCap nullMDBNode) + else cte_wp_at' P p s)" + (is "cte_wp_at' P p ?s' = ?Q") + apply (subgoal_tac "\p \ set addrs. \(P :: cte \ bool). \ obj_at' P p s") + apply (subgoal_tac "\p \ set addrs. \(P :: tcb \ bool). \ obj_at' P p s") + apply (subgoal_tac "(\P :: cte \ bool. obj_at' P p ?s') + \ (\ (\P :: tcb \ bool. obj_at' P (p && ~~ mask tcbBlockSizeBits) ?s'))") + apply (simp only: cte_wp_at_obj_cases_mask foldr_update_obj_at'[OF pv pv' al]) + apply (simp add: the_ctes_makeObject makeObjectKO_eq [OF ko] makeObject_cte dom_def + split del: if_split + cong: if_cong) + apply (insert al ko) + apply (simp, safe, simp_all) + apply fastforce + apply fastforce + apply (clarsimp elim!: obj_atE' simp: objBits_simps) + apply (drule ps_clearD[where y=p and n=tcbBlockSizeBits]) + apply simp + apply (rule order_trans_rules(17)) + apply (clarsimp cong: if_cong) + apply (rule word_and_le2) + apply (simp add: word_neg_and_le[simplified field_simps]) + apply simp + apply (clarsimp elim!: obj_atE' simp: pn) + apply (clarsimp elim!: obj_atE' simp: pn) + done + +lemma ctes_of_retype: + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "map_to_ctes (\ xa. if xa \ set addrs then Some obj else ksPSpace s xa) + = (\x. if tp = Inr (APIObjectType ArchTypes_H.CapTableObject) \ x \ set addrs + \ tp = Inr (APIObjectType ArchTypes_H.TCBObject) + \ (x && ~~ mask tcbBlockSizeBits \ set addrs) \ (x && mask tcbBlockSizeBits \ dom tcb_cte_cases) + then Some (CTE NullCap nullMDBNode) + else map_to_ctes (ksPSpace s) x)" + (is "map_to_ctes ?ps' = ?map'") + using cte_wp_at_retype' [where P="(=) cte" for cte, OF ko pv pv' al pn] + arg_cong [where f=Not, OF cte_wp_at_retype' [OF ko pv pv' al pn, where P="\"]] + apply (simp(no_asm_use) add: cte_wp_at_ctes_of cong: if_cong) + apply (rule ext) + apply (case_tac "map_to_ctes ?ps' x") + apply (simp(no_asm_simp)) + apply (simp split: if_split_asm) + apply simp + done + +lemma None_ctes_of_cte_at: + "(None = ctes_of s x) = (\ cte_at' x s)" + by (fastforce simp add: cte_wp_at_ctes_of) + +lemma null_filter_ctes_retype: + assumes ko: "makeObjectKO dev tp = Some obj" + and pv: "pspace_aligned' s" "pspace_distinct' s" + and pv': "pspace_aligned' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + "pspace_distinct' (ksPSpace_update (\x xa. if xa \ set addrs then Some obj else ksPSpace s xa) s)" + and al: "\x \ set addrs. is_aligned x (objBitsKO obj)" + and pn: "\x \ set addrs. ksPSpace s x = None" + shows + "null_filter' (map_to_ctes (foldr (\addr. data_map_insert addr obj) addrs (ksPSpace s))) + = null_filter' (map_to_ctes (ksPSpace s))" + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (subst ctes_of_retype[OF ko pv pv' al pn]) + apply (rule ext) + apply (clarsimp simp: null_filter'_def None_ctes_of_cte_at) + apply (intro conjI impI notI) + apply (elim cte_wp_atE' disjE conjE) + apply (simp_all add: pn) + apply (cut_tac x="ptr'" and v="if ptr' \ set addrs then obj else KOTCB tcb" + in pspace_distinctD'[OF _ pv'(2)])[1] + apply simp + apply (insert ko[symmetric], + simp add: makeObjectKO_def objBits_simps pn + split: if_split_asm)[1] + apply (drule(2) tcb_ctes_clear[where s="ksPSpace_update f s" for f s]) + apply simp + apply fastforce + apply (cut_tac x="x && ~~ mask tcbBlockSizeBits" in pspace_distinctD'[OF _ pv'(2)])[1] + apply simp + apply (elim cte_wp_atE' disjE conjE) + apply (insert ko[symmetric], simp add: makeObjectKO_def objBits_simps) + apply clarsimp + apply (subst(asm) subtract_mask[symmetric], + erule_tac v="if x \ set addrs then KOTCB makeObject else KOCTE cte" + in tcb_space_clear) + apply (simp add: is_aligned_mask word_bw_assocs) + apply assumption + apply simp + apply simp + apply (simp add: pn) + apply (clarsimp simp: makeObjectKO_def) + apply (drule(1) tcb_cte_cases_aligned_helpers) + apply (clarsimp simp: pn) + done + +lemma new_cap_addrs_aligned: + "\ is_aligned ptr (objBitsKO ko) \ + \ \x \ set (new_cap_addrs n ptr ko). is_aligned x (objBitsKO ko)" + apply (clarsimp simp: new_cap_addrs_def) + apply (erule aligned_add_aligned[OF _ is_aligned_shift]) + apply simp + done + +lemma new_cap_addrs_distinct: + assumes cover: "range_cover ptr sz (objBitsKO ko) n" + shows "distinct (new_cap_addrs n ptr ko)" + unfolding new_cap_addrs_def + apply (simp add: distinct_map) + apply (rule comp_inj_on[where f=of_nat, unfolded o_def]) + apply (rule subset_inj_on) + apply (rule word_unat.Abs_inj_on) + apply (clarsimp simp only: unats_def atLeastLessThan_iff + dest!: less_two_pow_divD) + apply (insert cover) + apply (erule less_le_trans) + apply (insert range_cover.range_cover_n_le[OF cover]) + apply (erule le_trans) + apply (cases "objBitsKO ko = 0") + apply (simp add:word_bits_def) + apply (rule less_imp_le) + apply (rule power_strict_increasing) + apply (simp add:word_bits_def) + apply simp + apply (rule inj_onI) + apply clarsimp + apply (drule arg_cong[where f="\x. x >> objBitsKO ko"]) + apply (cases "objBitsKO ko = 0") + apply simp + apply (subst(asm) shiftl_shiftr_id, simp add: range_cover_def) + apply (subst word_unat_power, rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (erule order_less_le_trans) + apply simp + apply (subst(asm) shiftl_shiftr_id, simp add: range_cover_def) + apply (subst word_unat_power, rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (erule order_less_le_trans) + apply simp + apply assumption + done + +lemma new_cap_addrs_subset: + assumes range_cover:"range_cover ptr sz (objBitsKO ko) n" + shows "set (new_cap_addrs n ptr ko) \ {ptr .. ptr_add (ptr && ~~ mask sz) (2 ^ sz - 1)}" + apply (clarsimp simp add: new_cap_addrs_def shiftl_t2n + field_simps + dest!: less_two_pow_divD) + apply (intro conjI) + apply (insert range_cover) + apply (rule machine_word_plus_mono_right_split[OF range_cover.range_cover_compare]) + apply assumption + apply simp + apply (simp add:range_cover_def word_bits_def) + apply (clarsimp simp:ptr_add_def) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (drule(1) range_cover.range_cover_compare) + apply (rule iffD1[OF le_m1_iff_lt,THEN iffD2]) + using range_cover + apply (simp add: p2_gt_0 range_cover_def word_bits_def) + apply (rule iffD2[OF word_less_nat_alt]) + apply (rule le_less_trans[OF unat_plus_gt]) + using range_cover + apply (clarsimp simp: range_cover_def) + apply (insert range_cover) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask,OF le_refl ]) + apply (simp add:range_cover_def)+ +done + +definition + obj_relation_retype :: "Structures_A.kernel_object \ + Structures_H.kernel_object \ bool" +where + "obj_relation_retype ko ko' \ + obj_bits ko \ objBitsKO ko' + \ (\p. fst ` obj_relation_cuts ko p + = {p + x * 2 ^ (objBitsKO ko') | x. x < 2 ^ (obj_bits ko - objBitsKO ko')} + \ (\x \ obj_relation_cuts ko p. snd x ko ko'))" + +lemma obj_relation_retype_cutsD: + "\ (x, P) \ obj_relation_cuts ko p; obj_relation_retype ko ko' \ + \ \y. x = p + y * 2 ^ (objBitsKO ko') \ y < 2 ^ (obj_bits ko - objBitsKO ko') + \ P ko ko'" + apply (clarsimp simp: obj_relation_retype_def) + apply (drule spec[where x=p]) + apply clarsimp + apply (drule(1) bspec) + apply (drule arg_cong[where f="\S. x \ S"]) + apply clarsimp + apply (fastforce simp: image_def) + done + +lemma APIType_map2_Untyped[simp]: + "(APIType_map2 tp = Structures_A.Untyped) + = (tp = Inr (APIObjectType ArchTypes_H.Untyped))" + by (simp add: APIType_map2_def + split: sum.split object_type.split kernel_object.split arch_kernel_object.splits + apiobject_type.split) + +lemma obj_relation_retype_leD: + "\ obj_relation_retype ko ko' \ + \ objBitsKO ko' \ obj_bits ko" + by (simp add: obj_relation_retype_def) + +lemma obj_relation_retype_default_leD: + "\ obj_relation_retype (default_object (APIType_map2 ty) dev us) ko; + ty \ Inr (APIObjectType ArchTypes_H.Untyped) \ + \ objBitsKO ko \ obj_bits_api (APIType_map2 ty) us" + by (simp add: obj_relation_retype_def objBits_def obj_bits_dev_irr) + +lemma makeObjectKO_Untyped: + "makeObjectKO dev ty = Some v \ ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (clarsimp simp: makeObjectKO_def) + +lemma obj_relation_cuts_trivial: + "ptr \ fst ` obj_relation_cuts ty ptr" + apply (case_tac ty) + apply (rename_tac sz cs) + apply (clarsimp simp:image_def cte_map_def well_formed_cnode_n_def) + apply (rule_tac x = "replicate sz False" in exI) + apply clarsimp+ + apply (rename_tac arch_kernel_obj) + apply (case_tac arch_kernel_obj; simp add: image_def pageBits_def) + apply (rule exI, rule_tac x=0 in bexI, simp, simp) + apply (rule_tac x=0 in exI, simp) + apply (rule p2_gt_0[THEN iffD2]) + apply (rename_tac vmpage_size) + apply (case_tac vmpage_size; clarsimp simp:pageBitsForSize_def bit_simps) + done + +lemma obj_relation_retype_addrs_eq: + assumes not_unt:"ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + assumes amp: "m = 2^ ((obj_bits_api (APIType_map2 ty) us) - (objBitsKO ko)) * n" + assumes orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + shows "\ range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n \ \ + (\x \ set (retype_addrs ptr (APIType_map2 ty) n us). + fst ` obj_relation_cuts (default_object (APIType_map2 ty) dev us) x) + = set (new_cap_addrs m ptr ko)" + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: retype_addrs_def) + apply (rename_tac p a b) + apply (drule obj_relation_retype_cutsD[OF _ orr]) + apply (cut_tac obj_relation_retype_default_leD[OF orr not_unt]) + apply (clarsimp simp: new_cap_addrs_def image_def + dest!: less_two_pow_divD) + apply (rule_tac x="p * 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) + unat y" + in rev_bexI) + apply (simp add: amp obj_bits_api_default_object not_unt obj_bits_dev_irr) + apply (rule less_le_trans[OF nat_add_left_cancel_less[THEN iffD2]]) + apply (erule unat_mono) + apply (subst unat_power_lower) + apply (rule le_less_trans[OF diff_le_self]) + apply (clarsimp simp: range_cover_def + split: Structures_A.apiobject_type.splits) + apply (simp add:field_simps,subst mult_Suc[symmetric]) + apply (rule mult_le_mono1) + apply simp + apply (simp add: ptr_add_def shiftl_t2n field_simps + objBits_def[symmetric] word_unat_power[symmetric]) + apply (simp add: power_add[symmetric]) + apply (clarsimp simp: new_cap_addrs_def retype_addrs_def + dest!: less_two_pow_divD) + apply (rename_tac p) + apply (cut_tac obj_relation_retype_default_leD[OF orr not_unt]) + apply (cut_tac obj_relation_retype_leD[OF orr]) + apply (case_tac "n = 0") + apply (simp add:amp) + apply (case_tac "p = 0") + apply simp + apply (rule_tac x = 0 in rev_bexI) + apply simp+ + apply (rule obj_relation_cuts_trivial) + apply (rule_tac x="p div (2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko))" + in rev_bexI) + apply (simp add:amp) + apply (rule td_gal_lt[THEN iffD1]) + apply (simp add:field_simps)+ + using orr + apply (clarsimp simp: obj_relation_retype_def ptr_add_def) + apply (thin_tac "\x. P x" for P) + apply (rule_tac x="of_nat (p mod (2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko)))" in exI) + apply (simp only: word_unat_power Abs_fnat_homs shiftl_t2n) + apply (rule conjI) + apply (rule arg_cong[where f=of_nat]) + apply (subst mult_div_rearrange) + apply simp + apply (subst minus_mod_eq_mult_div[symmetric]) + apply (simp add:diff_mult_distrib2) + apply (rule of_nat_mono_maybe) + apply (rule power_strict_increasing) + apply (rule le_less_trans[OF diff_le_self]) + apply (clarsimp simp: range_cover_def obj_bits_api_default_object obj_bits_dev_irr + not_unt word_bits_def)+ +done + +lemma objBits_le_obj_bits_api: + "makeObjectKO dev ty = Some ko \ + objBitsKO ko \ obj_bits_api (APIType_map2 ty) us" + apply (case_tac ty) + apply (auto simp: default_arch_object_def bit_simps + makeObjectKO_def objBits_simps' APIType_map2_def obj_bits_api_def slot_bits_def + split: Structures_H.kernel_object.splits arch_kernel_object.splits object_type.splits + Structures_H.kernel_object.splits arch_kernel_object.splits apiobject_type.splits) + done + + +lemma obj_relation_retype_other_obj: + "\ is_other_obj_relation_type (a_type ko); other_obj_relation ko ko' \ + \ obj_relation_retype ko ko'" + apply (simp add: obj_relation_retype_def) + apply (subgoal_tac "objBitsKO ko' = obj_bits ko") + apply (clarsimp simp: is_other_obj_relation_type) + apply (fastforce simp: other_obj_relation_def objBits_simps' + split: Structures_A.kernel_object.split_asm + Structures_H.kernel_object.split_asm + Structures_H.kernel_object.split + arch_kernel_obj.split_asm arch_kernel_object.split) + done + +lemma retype_pspace_relation: + assumes sr: "pspace_relation (kheap s) (ksPSpace s')" + and vs: "valid_pspace s" "valid_mdb s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and ko: "makeObjectKO dev ty = Some ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "pspace_relation (foldr (\p ps. ps(p \ default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)) + (foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s'))" + (is "pspace_relation ?ps ?ps'") + unfolding pspace_relation_def +proof + have not_unt: "ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (rule makeObjectKO_Untyped[OF ko]) + + have dom_not_ra: + "\x \ dom (kheap s). x \ set (retype_addrs ptr (APIType_map2 ty) n us)" + apply clarsimp + apply (erule(1) pspace_no_overlapC[OF pn _ _ cover vs(1)]) + done + + hence dom_Int_ra: + "set (retype_addrs ptr (APIType_map2 ty) n us) \ dom (kheap s) = {}" + by auto + + note pdom = pspace_dom_upd [OF dom_Int_ra, where ko="default_object (APIType_map2 ty) dev us"] + + have pdom': "dom ?ps' = dom (ksPSpace s') \ set (new_cap_addrs m ptr ko)" + by (clarsimp simp add: foldr_upd_app_if[folded data_map_insert_def] + dom_if_Some Un_commute + split del: if_split) + + note not_unt = makeObjectKO_Untyped [OF ko] + + have "pspace_dom (kheap s) = dom (ksPSpace s')" + using sr by (simp add: pspace_relation_def) + + thus "pspace_dom ?ps = dom ?ps'" + apply (simp add: pdom pdom') + apply (rule arg_cong[where f="\T. S \ T" for S]) + apply (rule obj_relation_retype_addrs_eq[OF not_unt num_r orr cover]) + done + + have dom_same: + "\x v. kheap s x = Some v \ ?ps x = Some v" + apply (frule bspec [OF dom_not_ra, OF domI]) + apply (simp add: foldr_upd_app_if) + done + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + have dom_same': + "\x v. ksPSpace s' x = Some v \ ?ps' x = Some v" + apply (clarsimp simp:foldr_upd_app_if[folded data_map_insert_def]) + apply (drule domI[where m = "ksPSpace s'"]) + apply (drule(1) IntI) + apply (erule_tac A = "A \ B" for A B in in_emptyE[rotated]) + apply (rule disjoint_subset[OF new_cap_addrs_subset[OF cover']]) + apply (clarsimp simp:ptr_add_def field_simps) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + done + + show "\x \ dom ?ps. \(y, P) \ obj_relation_cuts (the (?ps x)) x. + P (the (?ps x)) (the (?ps' y))" + using sr + apply (clarsimp simp: pspace_relation_def) + apply (simp add: foldr_upd_app_if split: if_split_asm) + apply (clarsimp simp: foldr_upd_app_if[folded data_map_insert_def]) + apply (rule conjI) + apply (drule obj_relation_retype_cutsD [OF _ orr], clarsimp) + apply (rule impI, erule notE) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (erule rev_bexI) + apply (simp add: image_def) + apply (erule rev_bexI, simp) + apply (drule bspec, erule domI) + apply clarsimp + apply (drule(1) bspec, simp) + apply (subgoal_tac "a \ pspace_dom (kheap s)") + apply clarsimp + apply (frule dom_same', simp) + apply (simp(no_asm) add: pspace_dom_def) + apply (rule rev_bexI, erule domI) + apply (simp add: image_def) + apply (erule rev_bexI, simp) + done +qed + + +(*Clagged from Retype_AC*) +lemma foldr_upd_app_if': "foldr (\p ps. ps(p := f p)) as g = (\x. if x \ set as then (f x) else g x)" + apply (induct as) + apply simp + apply simp + apply (rule ext) + apply simp + done + +lemma etcb_rel_makeObject: "etcb_relation default_etcb makeObject" + apply (simp add: etcb_relation_def default_etcb_def) + apply (simp add: makeObject_tcb default_priority_def default_domain_def) + done + + +lemma ekh_at_tcb_at: "valid_etcbs_2 ekh kh \ ekh x = Some y \ \tcb. kh x = Some (TCB tcb)" + apply (simp add: valid_etcbs_2_def + st_tcb_at_kh_def obj_at_kh_def + is_etcb_at'_def obj_at_def) + apply force + done + +lemma default_etcb_default_domain_futz [simp]: + "default_etcb\tcb_domain := default_domain\ = default_etcb" +unfolding default_etcb_def by simp + +lemma retype_ekheap_relation: + assumes sr: "ekheap_relation (ekheap s) (ksPSpace s')" + and sr': "pspace_relation (kheap s) (ksPSpace s')" + and vs: "valid_pspace s" "valid_mdb s" + and et: "valid_etcbs s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and ko: "makeObjectKO dev ty = Some ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "ekheap_relation (foldr (\p ps. ps(p := default_ext (APIType_map2 ty) default_domain)) + (retype_addrs ptr (APIType_map2 ty) n us) (ekheap s)) + (foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) (ksPSpace s'))" + (is "ekheap_relation ?ps ?ps'") + proof - + have not_unt: "ty \ Inr (APIObjectType ArchTypes_H.Untyped)" + by (rule makeObjectKO_Untyped[OF ko]) + show ?thesis + apply (case_tac "ty \ Inr (APIObjectType apiobject_type.TCBObject)") + apply (insert ko) + apply (cut_tac retype_pspace_relation[OF sr' vs vs' pn pn' ko cover orr num_r]) + apply (simp add: foldr_upd_app_if' foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (insert sr) + apply (clarsimp simp add: ekheap_relation_def + pspace_relation_def default_ext_def cong: if_cong + split: if_split_asm) + subgoal by (clarsimp simp add: makeObjectKO_def APIType_map2_def cong: if_cong + split: sum.splits Structures_H.kernel_object.splits + arch_kernel_object.splits AARCH64_H.object_type.splits apiobject_type.splits) + + apply (frule ekh_at_tcb_at[OF et]) + apply (intro impI conjI) + apply clarsimp + apply (drule_tac x=a in bspec,force) + apply (clarsimp simp add: other_obj_relation_def split: if_split_asm) + apply (case_tac ko,simp_all) + apply (clarsimp simp add: makeObjectKO_def cong: if_cong split: sum.splits Structures_H.kernel_object.splits + arch_kernel_object.splits AARCH64_H.object_type.splits + apiobject_type.splits if_split_asm) + apply (drule_tac x=xa in bspec,simp) + subgoal by force + subgoal by force + apply (simp add: foldr_upd_app_if' foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: obj_relation_retype_addrs_eq[OF not_unt num_r orr cover,symmetric]) + apply (clarsimp simp add: APIType_map2_def default_ext_def ekheap_relation_def + default_object_def makeObjectKO_def etcb_rel_makeObject + cong: if_cong + split: if_split_asm) + apply force + done +qed + +lemma pspace_no_overlapD': + "\ ksPSpace s x = Some ko; pspace_no_overlap' p bits s \ + \ {x .. x + 2 ^ objBitsKO ko - 1} \ {p .. (p && ~~ mask bits) + 2 ^ bits - 1} = {}" + by (simp add:pspace_no_overlap'_def mask_def add_diff_eq) + +lemma new_range_subset: + assumes + cover: "range_cover ptr sz (objBitsKO ko) n" + and addr: "x \ set (new_cap_addrs n ptr ko)" + shows "mask_range x (objBitsKO ko) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}" + (is "?lhs \ ?rhs") +proof - + have base_in: "x \ {ptr..ptr_add (ptr && ~~ mask sz) (2 ^ sz - 1)}" + by (rule set_mp[OF new_cap_addrs_subset[OF cover] addr]) + have aligned: "is_aligned x (objBitsKO ko)" + apply (insert cover) + apply (clarsimp simp:range_cover_def) + apply (drule new_cap_addrs_aligned) + apply (erule bspec[OF _ addr]) + done + show ?thesis using base_in aligned addr + apply (intro range_subsetI) + apply (clarsimp simp:ptr_add_def field_simps)+ + apply (simp add:x_power_minus_1) + apply (clarsimp simp:new_cap_addrs_def) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + apply (simp add:mask_2pm1[symmetric]) + apply (rule iffD2[OF shiftr_mask_cmp[where c = "objBitsKO ko"]]) + apply (insert cover) + apply (simp add:range_cover_def) + apply (simp add:range_cover_def word_bits_def) + apply (subst aligned_shift') + apply (simp add:mask_lt_2pn range_cover_def word_bits_def ) + apply (drule is_aligned_addD1) + apply (simp add:range_cover_def) + apply (rule aligned_add_aligned) + apply (rule aligned_already_mask) + apply (fastforce simp:range_cover_def) + apply (simp_all add: range_cover_def)[3] + apply (subst shiftr_mask2[symmetric]) + apply (simp add:range_cover_def word_bits_def) + apply (rule le_shiftr) + apply (subst le_mask_iff_lt_2n[THEN iffD1]) + apply (simp add:range_cover_def word_bits_def) + apply (clarsimp simp:word_less_nat_alt) + apply (rule le_less_trans[OF unat_plus_gt]) + apply (frule(1) range_cover.range_cover_compare) + apply (clarsimp simp:shiftl_t2n mult.commute range_cover_def) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask]) + apply (rule le_refl) + apply (simp add:range_cover_def) + done +qed + +lemma retype_aligned_distinct': + assumes vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn': "pspace_no_overlap' ptr sz s'" + and cover: "range_cover ptr sz (objBitsKO ko) n " + shows + "pspace_distinct' (s' \ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs n ptr ko) (ksPSpace s')\)" + "pspace_aligned' (s' \ksPSpace := foldr (\addr. data_map_insert addr ko) + (new_cap_addrs n ptr ko) (ksPSpace s')\)" + (is "pspace_aligned' (s'\ksPSpace := ?ps\)") +proof - + have al: "is_aligned ptr (objBitsKO ko)" + using cover + by (simp add:cover range_cover_def) + let ?s' = "s'\ksPSpace := ?ps\" + note nc_al = bspec [OF new_cap_addrs_aligned [OF al]] + note nc_al' = nc_al[unfolded objBits_def] + + show pa': "pspace_aligned' ?s'" using vs'(1) + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (clarsimp simp add: pspace_aligned'_def nc_al' + split: if_split_asm) + apply (drule bspec, erule domI, simp) + done + + have okov: "objBitsKO ko < word_bits" + by (simp add: objBits_def) + + have new_range_disjoint: + "\x. x \ set (new_cap_addrs n ptr ko) \ + ({x .. x + 2 ^ (objBitsKO ko) - 1} - {x}) \ set (new_cap_addrs n ptr ko) = {}" + apply safe + apply (rule ccontr) + apply (frule(2) aligned_neq_into_no_overlap [OF _ nc_al nc_al]) + apply (drule_tac a=xa in equals0D) + apply (clarsimp simp: field_simps is_aligned_no_overflow [OF nc_al]) + done + note new_range_sub = new_range_subset [OF cover] + + show pd': "pspace_distinct' ?s'" using vs'(2) + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (simp add: pspace_distinct'_def dom_if_Some ball_Un) + apply (intro conjI ballI impI) + apply (simp add: ps_clear_def dom_if_Some Int_Un_distrib mask_def add_diff_eq + objBits_def[symmetric]) + apply (rule conjI) + apply (erule new_range_disjoint) + apply (rule disjoint_subset[OF Diff_subset]) + apply (simp only: add_mask_fold) + apply (erule disjoint_subset[OF new_range_sub]) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (clarsimp simp add: ps_clear_def dom_if_Some Int_Un_distrib mask_def add_diff_eq) + apply (rule conjI) + apply (erule new_range_disjoint) + apply (rule disjoint_subset[OF Diff_subset]) + apply (simp only: add_mask_fold) + apply (erule disjoint_subset[OF new_range_sub]) + apply (rule pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (clarsimp simp add: ps_clear_def dom_if_Some Int_Un_distrib) + apply (subst Int_commute) + apply (rule disjoint_subset[OF new_cap_addrs_subset,OF cover]) + apply (subst Int_commute) + apply (simp add:ptr_add_def field_simps) + apply (rule disjoint_subset[OF Diff_subset]) + apply (drule pspace_no_overlapD' [OF _ pn']) + apply (simp add: mask_def add_diff_eq) + done +qed + +definition + update_gs :: "Structures_A.apiobject_type \ nat \ machine_word set + \ 'a kernel_state_scheme \ 'a kernel_state_scheme" +where + "update_gs ty us ptrs \ + case ty of + Structures_A.CapTableObject \ gsCNodes_update + (\cns x. if x \ ptrs then Some us else cns x) + | ArchObject SmallPageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMSmallPage else ups x) + | ArchObject LargePageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMLargePage else ups x) + | ArchObject HugePageObj \ gsUserPages_update + (\ups x. if x \ ptrs then Some ARMHugePage else ups x) + | ArchObject PageTableObj \ ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some NormalPT_T else pt_types x) as) + | ArchObject VSpaceObj \ ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some VSRootPT_T else pt_types x) as) + | _ \ id" + +lemma ksPSpace_update_gs_eq[simp]: + "ksPSpace (update_gs ty us ptrs s) = ksPSpace s" + by (simp add: update_gs_def + split: Structures_A.apiobject_type.splits aobject_type.splits) + +end + +global_interpretation update_gs: PSpace_update_eq "update_gs ty us ptrs" + by (simp add: PSpace_update_eq_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma ksMachineState_update_gs[simp]: + "ksMachineState (update_gs tp us addrs s) = ksMachineState s" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + +lemma update_gs_ksMachineState_update_swap: + "update_gs tp us addrs (ksMachineState_update f s) = + ksMachineState_update f (update_gs tp us addrs s)" + by (simp add: update_gs_def + split: aobject_type.splits Structures_A.apiobject_type.splits) + +lemma update_gs_id: + "tp \ no_gs_types \ update_gs tp us addrs = id" + by (simp add: no_gs_types_def update_gs_def + split: Structures_A.apiobject_type.splits aobject_type.splits) + +lemma update_gs_simps[simp]: + "update_gs Structures_A.apiobject_type.CapTableObject us ptrs = + gsCNodes_update (\cns x. if x \ ptrs then Some us else cns x)" + "update_gs (ArchObject SmallPageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMSmallPage else ups x)" + "update_gs (ArchObject LargePageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMLargePage else ups x)" + "update_gs (ArchObject HugePageObj) us ptrs = + gsUserPages_update (\ups x. if x \ ptrs then Some ARMHugePage else ups x)" + "update_gs (ArchObject PageTableObj) us ptrs = ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some NormalPT_T else pt_types x) as)" + "update_gs (ArchObject VSpaceObj) us ptrs = ksArchState_update + (\as. gsPTTypes_update (\pt_types x. if x \ ptrs then Some VSRootPT_T else pt_types x) as)" + by (simp_all add: update_gs_def) + +lemma retype_state_relation: + notes data_map_insert_def[simp del] + assumes sr: "(s, s') \ state_relation" + and vs: "valid_pspace s" "valid_mdb s" + and et: "valid_etcbs s" "valid_list s" + and vs': "pspace_aligned' s'" "pspace_distinct' s'" + and pn: "pspace_no_overlap_range_cover ptr sz s" + and pn': "pspace_no_overlap' ptr sz s'" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and ko: "makeObjectKO dev ty = Some ko" + and api: "obj_bits_api (APIType_map2 ty) us \ sz" + and orr: "obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and num_r: "m = 2 ^ (obj_bits_api (APIType_map2 ty) us - objBitsKO ko) * n" + shows + "(ekheap_update + (\_. foldr (\p ekh a. if a = p then default_ext (APIType_map2 ty) default_domain else ekh a) + (retype_addrs ptr (APIType_map2 ty) n us) (ekheap s)) + s + \kheap := + foldr (\p. data_map_insert p (default_object (APIType_map2 ty) dev us)) + (retype_addrs ptr (APIType_map2 ty) n us) (kheap s)\, + update_gs (APIType_map2 ty) us (set (retype_addrs ptr (APIType_map2 ty) n us)) + (s'\ksPSpace := + foldr (\addr. data_map_insert addr ko) (new_cap_addrs m ptr ko) + (ksPSpace s')\)) + \ state_relation" + (is "(ekheap_update (\_. ?eps) s\kheap := ?ps\, update_gs _ _ _ (s'\ksPSpace := ?ps'\)) + \ state_relation") + proof (rule state_relation_null_filterE[OF sr refl _ _ _ _ _ _ _ vs'], simp_all add: trans_state_update[symmetric] del: trans_state_update) + + have cover':"range_cover ptr sz (objBitsKO ko) m" + by (rule range_cover_rel[OF cover objBits_le_obj_bits_api[OF ko] num_r]) + have al':"is_aligned ptr (objBitsKO ko)" + using cover' + by (simp add:range_cover_def) + have sz:"sz < word_bits" + using cover' + by (simp add:range_cover_def word_bits_def) + let ?t = "s\kheap := ?ps\" + let ?tp = "APIType_map2 ty" + let ?al = "retype_addrs ptr ?tp n us" + let ?t' = "update_gs ?tp us (set ?al) (s'\ksPSpace := ?ps'\)" + + note pad' = retype_aligned_distinct' [OF vs' pn' cover'] + thus pa': "pspace_aligned' (s'\ksPSpace := ?ps'\)" + and pd': "pspace_distinct' (s'\ksPSpace := ?ps'\)" + by simp_all + + note pa'' = pa'[simplified foldr_upd_app_if[folded data_map_insert_def]] + note pd'' = pd'[simplified foldr_upd_app_if[folded data_map_insert_def]] + + note not_unt = makeObjectKO_Untyped [OF ko] + show "null_filter (caps_of_state ?t) = null_filter (caps_of_state s)" + apply (rule null_filter_caps_of_state_foldr[folded data_map_insert_def]) + apply (simp add: not_unt) + apply (rule ballI) + apply (erule pspace_no_overlapD2 [OF pn _ cover vs(1)]) + done + + have nc_dis: "distinct (new_cap_addrs m ptr ko)" + by (rule new_cap_addrs_distinct [OF cover']) + + note nc_al = bspec [OF new_cap_addrs_aligned [OF al']] + note nc_al' = nc_al[unfolded objBits_def] + show "null_filter' (map_to_ctes ?ps') = null_filter' (ctes_of s')" + apply (rule null_filter_ctes_retype [OF ko vs' pa'' pd'']) + apply (simp add: nc_al) + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset [OF cover']]) + apply (insert pspace_no_overlap_disjoint'[OF vs'(1) pn']) + apply (drule orthD1) + apply (simp add:ptr_add_def field_simps) + apply clarsimp + done + + show "valid_objs s" using vs + by (clarsimp simp: valid_pspace_def) + + show "valid_mdb s" using vs + by (clarsimp) + + show "valid_list s" using et + by (clarsimp) + + show "mdb_cte_at (swp (cte_wp_at ((\) cap.NullCap)) s) (cdt s)" using vs + by (clarsimp simp: valid_mdb_def) + + have pspr: "pspace_relation (kheap s) (ksPSpace s')" + using sr by (simp add: state_relation_def) + + thus "pspace_relation ?ps ?ps'" + by (rule retype_pspace_relation [OF _ vs vs' pn pn' ko cover orr num_r, + folded data_map_insert_def]) + + have "ekheap_relation (ekheap (s)) (ksPSpace s')" + using sr by (simp add: state_relation_def) + + thus "ekheap_relation ?eps ?ps'" + by (fold fun_upd_apply) (rule retype_ekheap_relation[OF _ pspr vs et(1) vs' pn pn' ko cover orr num_r]) + + have pn2: "\a\set ?al. kheap s a = None" + by (rule ccontr) (clarsimp simp: pspace_no_overlapD1[OF pn _ cover vs(1)]) + + from sr + have gr: "ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s'))" + by (rule state_relationE) + + show "ghost_relation ?ps (gsUserPages ?t') (gsCNodes ?t') (gsPTTypes (ksArchState ?t'))" + proof (cases ?tp) + case Untyped thus ?thesis by (simp add: not_unt) + next + note data_map_insert_def[simp] + + case TCBObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: ups_of_heap_def default_object_def TCBObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: cns_of_heap_def default_object_def TCBObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def TCBObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, simp add: TCBObject update_gs_def) + next + case EndpointObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: ups_of_heap_def default_object_def data_map_insert_def EndpointObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: cns_of_heap_def default_object_def data_map_insert_def EndpointObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def data_map_insert_def EndpointObject + opt_map_def) + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: EndpointObject update_gs_def) + next + note data_map_insert_def[simp] + case NotificationObject + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: ups_of_heap_def + default_object_def NotificationObject) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: cns_of_heap_def + default_object_def NotificationObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def NotificationObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: NotificationObject update_gs_def) + next + case CapTableObject + note data_map_insert_def[simp] + from pn2 + have [simp]: "ups_of_heap ?ps = ups_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: ups_of_heap_def + default_object_def CapTableObject) + have [simp]: "cns_of_heap ?ps = (\x. if x \ set ?al then Some us + else cns_of_heap (kheap s) x)" + by (rule ext, induct (?al), + simp_all add: cns_of_heap_def wf_empty_bits wf_unique default_object_def CapTableObject) + from pn2 + have [simp]: "pt_types_of_heap ?ps = pt_types_of_heap (kheap s)" + by - (rule ext, induct (?al), + simp_all add: pt_types_of_heap_def default_object_def CapTableObject opt_map_def) + note data_map_insert_def[simp del] + from gr show ?thesis + by (simp add: ghost_relation_of_heap, + simp add: CapTableObject update_gs_def ext) + next + case (ArchObject ao) + from pn2 + have [simp]: "cns_of_heap ?ps = cns_of_heap (kheap s)" + by - (rule ext, induct (?al), simp_all add: cns_of_heap_def data_map_insert_def + default_object_def ArchObject) + from gr + have [simp]: "gsPTTypes (ksArchState s') = pt_types_of_heap (kheap s)" + by (clarsimp simp add: ghost_relation_of_heap) + from pn2 ArchObject + have [simp]: "pt_types_of_heap ?ps = gsPTTypes (ksArchState ?t')" + apply - + apply (rule ext) + apply (induct (?al)) + apply (simp add: update_gs_def ArchObject split: aobject_type.splits) + apply (cases ao; + simp add: data_map_insert_def pt_types_of_heap_def default_object_def + default_arch_object_def opt_map_def update_gs_def) + done + from pn2 gr show ?thesis + apply (clarsimp simp add: ghost_relation_of_heap) + apply (rule conjI[rotated]) + apply (simp add: ArchObject update_gs_def split: aobject_type.splits) + apply (thin_tac "cns_of_heap h = g" for h g) + apply (drule sym) + apply (rule ext) + apply (induct (?al)) + apply (simp add: update_gs_def ArchObject split: aobject_type.splits) + apply (simp add: update_gs_def ArchObject default_object_def + default_arch_object_def ups_of_heap_def + data_map_insert_def + split: aobject_type.splits) + done + qed + + have [simp]: "\s. gsPTTypes_update (\_. gsPTTypes s) s = s" + by (case_tac s, simp) + + show "\f' g' h' pt_fn'. ?t' = + s'\ksPSpace := f' (ksPSpace s'), gsUserPages := g' (gsUserPages s'), + gsCNodes := h' (gsCNodes s'), + ksArchState := (ksArchState s') \gsPTTypes := pt_fn' (gsPTTypes (ksArchState s'))\\" + apply (clarsimp simp: update_gs_def + split: Structures_A.apiobject_type.splits) + apply (intro conjI impI) + apply (subst ex_comm, rule_tac x=id in exI, + subst ex_comm, rule_tac x=id in exI, + subst ex_comm, rule_tac x=id in exI, fastforce)+ + apply (subst ex_comm, rule_tac x=id in exI) + apply (subst ex_comm) + apply (rule_tac x="\cns x. if x\set ?al then Some us else cns x" in exI, + simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\x. foldr (\addr. data_map_insert addr ko) + (new_cap_addrs m ptr ko) x" in exI, simp) + apply clarsimp + apply (rule_tac x="\x. foldr (\addr. data_map_insert addr ko) + (new_cap_addrs m ptr ko) x" in exI) + apply (subst ex_comm, rule_tac x=id in exI) + apply (simp split: aobject_type.splits) + apply (intro conjI impI) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMSmallPage + else cns x" in exI, simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMLargePage + else cns x" in exI, simp) + apply (subst ex_comm, rule_tac x=id in exI) + apply (rule_tac x="\cns x. if x \ set ?al then Some ARMHugePage + else cns x" in exI, simp) + apply (rule_tac x=id in exI) + apply (rule_tac x="\pt_types x. if x \ set ?al then Some NormalPT_T + else pt_types x" in exI) + apply (cases s', rename_tac arch machine, case_tac arch) + apply fastforce + apply (rule_tac x=id in exI) + apply (rule_tac x="\pt_types x. if x \ set ?al then Some VSRootPT_T + else pt_types x" in exI) + apply (cases s', rename_tac arch machine, case_tac arch) + apply fastforce + apply (rule_tac x=id in exI, simp)+ + done +qed + +lemma new_cap_addrs_fold': + "1 \ n \ + map (\n. ptr + (n << objBitsKO ko)) [0.e.n - 1] = + new_cap_addrs (unat n) ptr ko" + by (clarsimp simp:new_cap_addrs_def ptr_add_def upto_enum_red' + shiftl_t2n power_add field_simps) + +lemma objBitsKO_gt_0: "0 < objBitsKO ko" + apply (case_tac ko) + apply (simp_all add:objBits_simps' pageBits_def) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object) + apply (simp_all add:archObjSize_def bit_simps) + done + +lemma kheap_ekheap_double_gets: + "(\rv erv rv'. \pspace_relation rv rv'; ekheap_relation erv rv'\ + \ corres r (R rv erv) (R' rv') (b rv erv) (d rv')) \ + corres r (\s. R (kheap s) (ekheap s) s) (\s. R' (ksPSpace s) s) + (do x \ gets kheap; xa \ gets ekheap; b x xa od) (gets ksPSpace >>= d)" + apply (rule corres_symb_exec_l) + apply (rule corres_guard_imp) + apply (rule_tac r'= "\erv rv'. ekheap_relation erv rv' \ pspace_relation x rv'" + in corres_split) + apply (subst corres_gets[where P="\s. x = kheap s" and P'=\]) + apply clarsimp + apply (simp add: state_relation_def) + apply clarsimp + apply assumption + apply (wp gets_exs_valid | simp)+ + done + +(* + +Split out the extended operation that sets the etcb domains. + +This allows the existing corres proofs in this file to more-or-less go +through as they stand. + +A more principled fix would be to change the abstract spec and +generalise init_arch_objects to initialise other object types. + +*) + +definition retype_region2_ext :: "obj_ref list \ Structures_A.apiobject_type \ unit det_ext_monad" where + "retype_region2_ext ptrs type \ modify (\s. ekheap_update (foldr (\p ekh. (ekh(p := default_ext type default_domain))) ptrs) s)" + +crunch all_but_exst[wp]: retype_region2_ext "all_but_exst P" +crunch (empty_fail) empty_fail[wp]: retype_region2_ext + +end + +interpretation retype_region2_ext_extended: is_extended "retype_region2_ext ptrs type" + by (unfold_locales; wp) + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "retype_region2_extra_ext ptrs type \ + when (type = Structures_A.TCBObject) (do + cdom \ gets cur_domain; + mapM_x (ethread_set (\tcb. tcb\tcb_domain := cdom\)) ptrs + od)" + +crunch all_but_exst[wp]: retype_region2_extra_ext "all_but_exst P" (wp: mapM_x_wp) +crunch (empty_fail) empty_fail[wp]: retype_region2_extra_ext (wp: mapM_x_wp) + +end + +interpretation retype_region2_extra_ext_extended: is_extended "retype_region2_extra_ext ptrs type" + by (unfold_locales; wp) + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + retype_region2 :: "obj_ref \ nat \ nat \ Structures_A.apiobject_type \ bool \ (obj_ref list,'z::state_ext) s_monad" +where + "retype_region2 ptr numObjects o_bits type dev \ do + obj_size \ return $ 2 ^ obj_bits_api type o_bits; + ptrs \ return $ map (\p. ptr_add ptr (p * obj_size)) [0..< numObjects]; + when (type \ Structures_A.Untyped) (do + kh \ gets kheap; + kh' \ return $ foldr (\p kh. kh(p \ default_object type dev o_bits)) ptrs kh; + do_extended_op (retype_region2_ext ptrs type); + modify $ kheap_update (K kh') + od); + return $ ptrs + od" + +lemma retype_region_ext_modify_kheap_futz: + "(retype_region2_extra_ext ptrs type :: (unit, det_ext) s_monad) >>= (\_. modify (kheap_update f)) + = (modify (kheap_update f) >>= (\_. retype_region2_extra_ext ptrs type))" + apply (clarsimp simp: retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def when_def bind_assoc) + apply (subst oblivious_modify_swap) + defer + apply (simp add: bind_assoc) + apply (rule oblivious_bind) + apply simp + apply (rule oblivious_mapM_x) + apply (clarsimp simp: ethread_set_def set_eobject_def) + apply (rule oblivious_bind) + apply (simp add: gets_the_def) + apply (rule oblivious_bind) + apply (clarsimp simp: get_etcb_def) + apply simp + apply (simp add: modify_def[symmetric]) +done + +lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] + +lemma foldr_upd_app_if_eta_futz: + "foldr (\p ps. ps(p \ f p)) as = (\g x. if x \ set as then Some (f x) else g x)" +apply (rule ext) +apply (rule foldr_upd_app_if) +done + +lemma modify_ekheap_update_comp_futz: + "modify (ekheap_update (f \ g)) = modify (ekheap_update g) >>= (K (modify (ekheap_update f)))" +by (simp add: o_def modify_def bind_def gets_def get_def put_def) + +lemma mapM_x_modify_futz: + assumes "\ptr\set ptrs. ekheap s ptr \ None" + shows "mapM_x (ethread_set F) (rev ptrs) s + = modify (ekheap_update (foldr (\p ekh. ekh(p := Some (F (the (ekh p))))) ptrs)) s" (is "?lhs ptrs s = ?rhs ptrs s") +using assms +proof(induct ptrs arbitrary: s) + case Nil thus ?case by (simp add: mapM_x_Nil return_def simpler_modify_def) +next + case (Cons ptr ptrs s) + have "?rhs (ptr # ptrs) s + = (do modify (ekheap_update (foldr (\p ekh. ekh(p \ F (the (ekh p)))) ptrs)); + modify (ekheap_update (\ekh. ekh(ptr \ F (the (ekh ptr))))) + od) s" + by (simp only: foldr_Cons modify_ekheap_update_comp_futz) simp + also have "... = (do ?lhs ptrs; + modify (ekheap_update (\ekh. ekh(ptr \ F (the (ekh ptr))))) + od) s" + apply (rule monad_eq_split_tail) + apply simp + apply (rule Cons.hyps[symmetric]) + using Cons.prems + apply force + done + also have "... = ?lhs (ptr # ptrs) s" + apply (simp add: mapM_x_append mapM_x_singleton) + apply (rule monad_eq_split2[OF refl, where + P="\s. \ptr\set (ptr # ptrs). ekheap s ptr \ None" + and Q="\_ s. ekheap s ptr \ None"]) + apply (simp add: ethread_set_def + assert_opt_def get_etcb_def gets_the_def gets_def get_def modify_def put_def set_eobject_def + bind_def fail_def return_def split_def + split: option.splits) + apply ((wp mapM_x_wp[OF _ subset_refl] | simp add: ethread_set_def set_eobject_def)+)[1] + using Cons.prems + apply force + done + finally show ?case by (rule sym) +qed + +lemma awkward_fold_futz: + "fold (\p ekh. ekh(p \ the (ekh p)\tcb_domain := cur_domain s\)) ptrs ekh + = (\x. if x \ set ptrs then Some ((the (ekh x))\tcb_domain := cur_domain s\) else ekh x)" +by (induct ptrs arbitrary: ekh) (simp_all add: fun_eq_iff) + +lemma retype_region2_ext_retype_region_ext_futz: + "retype_region2_ext ptrs type >>= (\_. retype_region2_extra_ext ptrs type) + = retype_region_ext ptrs type" +proof(cases type) + case TCBObject + have complete_futz: + "\F x. modify (ekheap_update (\_. F (cur_domain x) (ekheap x))) x = modify (ekheap_update (\ekh. F (cur_domain x) ekh)) x" + by (simp add: modify_def get_def get_etcb_def put_def bind_def return_def) + have second_futz: + "\f G. + do modify (ekheap_update f); + cdom \ gets (\s. cur_domain s); + G cdom + od = + do cdom \ gets (\s. cur_domain s); + modify (ekheap_update f); + G cdom + od" + by (simp add: bind_def gets_def get_def return_def simpler_modify_def) + from TCBObject show ?thesis + apply (clarsimp simp: retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def when_def bind_assoc) + apply (clarsimp simp: exec_gets fun_eq_iff) + apply (subst complete_futz) + apply (simp add: second_futz[simplified] exec_gets) + apply (simp add: default_ext_def exec_modify) + apply (subst mapM_x_modify_futz[where ptrs="rev ptrs", simplified]) + apply (simp add: foldr_upd_app_if_eta_futz) + apply (simp add: modify_def exec_get put_def o_def) + apply (simp add: foldr_upd_app_if_eta_futz foldr_conv_fold awkward_fold_futz) + apply (simp cong: if_cong) + done +qed (auto simp: fun_eq_iff retype_region_ext_def retype_region2_ext_def retype_region2_extra_ext_def + put_def gets_def get_def bind_def return_def mk_ef_def modify_def foldr_upd_app_if' when_def default_ext_def) + +lemma retype_region2_ext_retype_region: + "(retype_region ptr numObjects o_bits type dev :: (obj_ref list, det_ext) s_monad) + = (do ptrs \ retype_region2 ptr numObjects o_bits type dev; + retype_region2_extra_ext ptrs type; + return ptrs + od)" +apply (clarsimp simp: retype_region_def retype_region2_def when_def bind_assoc) + apply safe + defer + apply (simp add: retype_region2_extra_ext_def) +apply (subst retype_region_ext_modify_kheap_futz'[simplified bind_assoc]) +apply (subst retype_region2_ext_retype_region_ext_futz[symmetric]) +apply (simp add: bind_assoc) +done + +lemma getObject_tcb_gets: + "getObject addr >>= (\x::tcb. gets proj >>= (\y. G x y)) + = gets proj >>= (\y. getObject addr >>= (\x. G x y))" +by (auto simp: exec_gets fun_eq_iff intro: bind_apply_cong dest!: in_inv_by_hoareD[OF getObject_inv_tcb]) + +lemma setObject_tcb_gets_ksCurDomain: + "setObject addr (tcb::tcb) >>= (\_. gets ksCurDomain >>= G) + = gets ksCurDomain >>= (\x. setObject addr tcb >>= (\_. G x))" +apply (clarsimp simp: exec_gets fun_eq_iff) +apply (rule bind_apply_cong) + apply simp +apply (drule_tac P1="\cdom. cdom = ksCurDomain x" in use_valid[OF _ setObject_cd_inv]) +apply (simp_all add: exec_gets) +done + +lemma curDomain_mapM_x_futz: + "curDomain >>= (\cdom. mapM_x (threadSet (F cdom)) addrs) + = mapM_x (\addr. curDomain >>= (\cdom. threadSet (F cdom) addr)) addrs" +proof(induct addrs) + case Nil thus ?case + by (simp add: curDomain_def mapM_x_def sequence_x_def bind_def gets_def get_def return_def) +next + case (Cons addr addrs) + have H: "\G. do cdom \ curDomain; + _ \ threadSet (F cdom) addr; + G cdom + od + = do cdom \ curDomain; + threadSet (F cdom) addr; + cdom \ curDomain; + G cdom + od" + by (simp add: bind_assoc curDomain_def threadSet_def setObject_tcb_gets_ksCurDomain + getObject_tcb_gets double_gets_drop_regets) + from Cons.hyps show ?case + apply (simp add: mapM_x_def sequence_x_def) + apply (simp add: bind_assoc foldr_map o_def) + apply (subst H) + apply (simp add: mapM_x_def sequence_x_def) + done +qed + +(* + +The existing proof continues below. + +*) + +lemma modify_ekheap_update_ekheap: + "modify (\s. ekheap_update f s) = do s \ gets ekheap; modify (\s'. s'\ekheap := f s\) od" +by (simp add: modify_def gets_def get_def put_def bind_def return_def split_def fun_eq_iff) + +lemma corres_retype': + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = + objBitsKO ko + gbits" + and check: "(sz < obj_bits_api (APIType_map2 ty) us) + = (sz < objBitsKO ko + gbits)" + and usv: "APIType_map2 ty = Structures_A.CapTableObject \ 0 < us" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype + (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + shows "corres (\rv rv'. rv' = g rv) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s) + (retype_region2 ptr n us (APIType_map2 ty) dev) + (do addrs \ createObjects ptr n ko gbits; + _ \ modify (update_gs (APIType_map2 ty) us (set addrs)); + return (g addrs) od)" + (is "corres ?r ?P ?P' ?C ?A") +proof - + note data_map_insert_def[simp del] + have not_zero':"((of_nat n)::machine_word) \ 0" + by (rule range_cover_not_zero[OF not_zero cover]) + have shiftr_not_zero:" ((of_nat n)::machine_word) << gbits \ 0" + apply (rule range_cover_not_zero_shift[OF not_zero cover]) + apply (simp add:obj_bits_api) + done + have unat_of_nat_shift:"unat (((of_nat n)::machine_word) << gbits) = + (n * 2^ gbits)" + apply (rule range_cover.unat_of_nat_n_shift[OF cover]) + using obj_bits_api + apply simp + done + have unat_of_nat_shift': + "unat (((of_nat n)::machine_word) * 2^(gbits + objBitsKO ko)) = + n * 2^(gbits + objBitsKO ko)" + apply (subst mult.commute) + apply (simp add:shiftl_t2n[symmetric]) + apply (rule range_cover.unat_of_nat_n_shift[OF cover]) + using obj_bits_api + apply simp + done + have unat_of_nat_n': + "unat (((of_nat n)::machine_word) * 2 ^ (gbits + objBitsKO ko)) \ 0" + by (simp add:unat_of_nat_shift' not_zero) + have bound:"obj_bits_api (APIType_map2 ty) us \ sz" + using cover + by (simp add:range_cover_def) + have n_estimate: "n < 2 ^ (word_bits - (objBitsKO ko + gbits))" + apply (rule le_less_trans) + apply (rule range_cover.range_cover_n_le(2)[OF cover]) + apply (rule power_strict_increasing) + apply (simp add:obj_bits_api ko) + apply (rule diff_less_mono) + using cover obj_bits_api + apply (simp_all add:range_cover_def ko word_bits_def) + done + + have set_retype_addrs_fold: + "image (\n. ptr + 2 ^ obj_bits_api (APIType_map2 ty) us * n) + {x. x \ of_nat n - 1} = + set (retype_addrs ptr (APIType_map2 ty) n us)" + apply (clarsimp simp: retype_addrs_def image_def Bex_def ptr_add_def + Collect_eq) + apply (rule iffI) + apply (clarsimp simp: field_simps word_le_nat_alt) + apply (rule_tac x="unat x" in exI) + apply (simp add: unat_sub_if_size range_cover.unat_of_nat_n[OF cover] + not_le not_zero + split: if_split_asm) + apply (clarsimp simp: field_simps word_le_nat_alt) + apply (rule_tac x="of_nat x" in exI) + apply (simp add: unat_sub_if_size range_cover.unat_of_nat_n[OF cover]) + apply (rule nat_le_Suc_less_imp) + apply (metis le_unat_uoi nat_less_le not_le_imp_less) + done + + have new_caps_adds_fold: + "map (\n. ptr + 2 ^ objBitsKO ko * n) [0.e.2 ^ gbits * of_nat n - 1] = + new_cap_addrs (2 ^ gbits * n) ptr ko" + apply (simp add: new_cap_addrs_def shiftl_t2n) + apply (subgoal_tac "1 \ (2::machine_word) ^ gbits * of_nat n") + apply (simp add: upto_enum_red' o_def) + apply (rule arg_cong2[where f=map, OF refl]) + apply (rule arg_cong2[where f=upt, OF refl]) + apply (metis mult.commute shiftl_t2n unat_of_nat_shift) + using shiftr_not_zero + apply (simp add: shiftl_t2n) + apply (metis word_less_1 word_not_le) + done + + from aligned + have al': "is_aligned ptr (obj_bits_api (APIType_map2 ty) us)" + by (simp add: obj_bits_api ko) + show ?thesis + apply (simp add: when_def retype_region2_def createObjects'_def + createObjects_def aligned obj_bits_api[symmetric] + ko[symmetric] al' shiftl_t2n data_map_insert_def[symmetric] + is_aligned_mask[symmetric] split_def unless_def + lookupAround2_pspace_no check + split del: if_split) + apply (subst retype_addrs_fold)+ + apply (subst if_P) + using ko + apply (clarsimp simp: makeObjectKO_def) + apply (simp add: bind_assoc retype_region2_ext_def) + apply (rule corres_guard_imp) + apply (subst modify_ekheap_update_ekheap) + apply (simp only: bind_assoc) + apply (rule kheap_ekheap_double_gets) + apply (rule corres_symb_exec_r) + apply (simp add: not_less modify_modify bind_assoc[symmetric] + obj_bits_api[symmetric] shiftl_t2n upto_enum_red' + range_cover.unat_of_nat_n[OF cover]) + apply (rule corres_split_nor[OF _ corres_trivial]) + apply (rename_tac x eps ps) + apply (rule_tac P="\s. x = kheap s \ eps = ekheap (s) \ ?P s" and + P'="\s. ps = ksPSpace s \ ?P' s" in corres_modify) + apply (simp add: set_retype_addrs_fold new_caps_adds_fold) + apply (erule retype_state_relation[OF _ _ _ _ _ _ _ _ _ cover _ _ orr], + simp_all add: ko not_zero obj_bits_api + bound[simplified obj_bits_api ko])[1] + apply (clarsimp simp: retype_addrs_fold[symmetric] ptr_add_def upto_enum_red' not_zero' + range_cover.unat_of_nat_n[OF cover] word_le_sub1) + apply (rule_tac f=g in arg_cong) + apply clarsimp + apply wp+ + apply (clarsimp split: option.splits) + apply (intro conjI impI) + apply (clarsimp|wp)+ + apply (clarsimp split: option.splits) + apply wpsimp + apply (clarsimp split: option.splits) + apply (intro conjI impI) + apply wp + apply (clarsimp simp:lookupAround2_char1) + apply wp + apply (clarsimp simp: obj_bits_api ko) + apply (drule(1) pspace_no_overlap_disjoint') + apply (rule_tac x1 = a in ccontr[OF in_empty_interE]) + apply simp + apply (clarsimp simp: not_less shiftL_nat) + apply (erule order_trans) + apply (subst p_assoc_help) + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz"]) + apply (subst add.commute) + apply (subst add.assoc) + apply (rule word_plus_mono_right) + using cover + apply - + apply (rule iffD2[OF word_le_nat_alt]) + apply (subst word_of_nat_minus) + using not_zero + apply simp + apply (rule le_trans[OF unat_plus_gt]) + apply simp + apply (subst unat_minus_one) + apply (subst mult.commute) + apply (rule word_power_nonzero_64) + apply (rule of_nat_less_pow_64[OF n_estimate]) + apply (simp add:word_bits_def objBitsKO_gt_0 ko) + apply (simp add:range_cover_def obj_bits_api ko word_bits_def) + apply (cut_tac not_zero',clarsimp simp:ko) + apply(clarsimp simp:field_simps ko) + apply (subst unat_sub[OF word_1_le_power]) + apply (simp add:range_cover_def) + apply (subst diff_add_assoc[symmetric]) + apply (cut_tac unat_of_nat_n',simp add:ko) + apply (clarsimp simp: obj_bits_api ko) + apply (rule diff_le_mono) + apply (frule range_cover.range_cover_compare_bound) + apply (cut_tac obj_bits_api unat_of_nat_shift') + apply (clarsimp simp:add.commute range_cover_def ko) + apply (rule is_aligned_no_wrap'[OF is_aligned_neg_mask,OF le_refl ]) + apply (simp add:range_cover_def domI)+ + done +qed + +lemma createObjects_corres': + "\corres r P P' f (createObjects a b ko d); ko = injectKO val\ + \ corres dc P P' f (createObjects' a b ko d)" + apply (clarsimp simp:corres_underlying_def createObjects_def return_def) + apply (rule conjI) + apply (clarsimp simp:bind_def split_def) + apply (drule(1) bspec) + apply (clarsimp simp:image_def) + apply (drule(1) bspec) + apply clarsimp + apply (erule bexI[rotated]) + apply simp + apply (clarsimp simp:bind_def split_def image_def) + apply (drule(1) bspec|clarsimp)+ + done + +lemmas retype_aligned_distinct'' = retype_aligned_distinct' + [unfolded foldr_upd_app_if[folded data_map_insert_def]] + +lemma retype_ko_wp_at': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "ko_wp_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (if p \ set (new_cap_addrs n ptr obj) then P obj + else ko_wp_at' P p s)" + apply (subst foldr_upd_app_if[folded data_map_insert_def]) + apply (rule foldr_update_ko_wp_at' [OF vs]) + apply (simp add: retype_aligned_distinct'' [OF vs pn cover])+ + apply (rule new_cap_addrs_aligned) + using cover + apply (simp add:range_cover_def cover) + done + +lemma retype_obj_at': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "obj_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (if p \ set (new_cap_addrs n ptr obj) then (\ko. projectKO_opt obj = Some ko \ P ko) + else obj_at' P p s)" + unfolding obj_at'_real_def + apply (rule retype_ko_wp_at'[OF vs pn cover]) +done + +lemma retype_obj_at_disj': + assumes vs: "pspace_aligned' s" "pspace_distinct' s" + and pn: "pspace_no_overlap' ptr sz s" + and cover: "range_cover ptr sz (objBitsKO obj) n" + shows + "obj_at' P p (s \ksPSpace := foldr (\addr. data_map_insert addr obj) + (new_cap_addrs n ptr obj) (ksPSpace s)\) + = (obj_at' P p s \ p \ set (new_cap_addrs n ptr obj) + \ (\ko. projectKO_opt obj = Some ko \ P ko))" + apply (simp add: retype_obj_at' [OF vs pn cover]) + apply (safe, simp_all) + apply (drule subsetD [OF new_cap_addrs_subset [OF cover]]) + apply (insert pspace_no_overlap_disjoint' [OF vs(1) pn ]) + apply (clarsimp simp: obj_at'_def) + apply (rule_tac x1 = p in ccontr[OF in_empty_interE]) + apply (simp add:ptr_add_def p_assoc_help domI)+ + done + +declare word_unat_power[symmetric,simp] + +lemma createObjects_ko_at_strg: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz ((objBitsKO ko) + gbits) n" + assumes not_0: "n\ 0" + assumes pi: "projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n ko gbits + \\r s. \x \ set r. \offs < 2 ^ gbits. ko_at' val (x + (offs << objBitsKO ko)) s\" +proof - + have shiftr_not_zero:" 1 \ ((of_nat n)::machine_word) << gbits" + using range_cover_not_zero_shift[OF not_0 cover,where gbits = gbits] + apply - + apply (simp add:word_le_sub1) + done + note unat_of_nat_shiftl = range_cover.unat_of_nat_n_shift[OF cover,where gbits = gbits,simplified] + have in_new:"\idx offs. \idx \ of_nat n - 1;offs<2 ^ gbits\ + \ ptr + (idx << objBitsKO ko + gbits) + (offs << objBitsKO ko) + \ set (new_cap_addrs (n * 2 ^ gbits) ptr ko)" + apply (insert range_cover_not_zero[OF not_0 cover] not_0) + apply (clarsimp simp:new_cap_addrs_def image_def) + apply (rule_tac x ="unat (2 ^ gbits * idx + offs)" in bexI) + apply (subst add.commute) + apply (simp add:shiftl_shiftl[symmetric]) + apply (simp add:shiftl_t2n distrib_left[symmetric]) + apply simp + apply (rule unat_less_helper) + apply (rule less_le_trans) + apply (erule word_plus_strict_mono_right) + apply (subst distrib_left[where c = "1 :: machine_word",symmetric,simplified]) + apply (subst mult.commute[where a = "2^gbits"])+ + apply (insert cover) + apply (rule word_mult_le_iff[THEN iffD2]) + apply (simp add:p2_gt_0) + apply (clarsimp simp:range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (rule range_cover.range_cover_le_n_less) + apply simp + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper) + apply (erule order_trans[OF _ word_sub_1_le]) + apply (simp add:range_cover_not_zero[OF not_0 cover]) + apply (simp add:word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (erule less_le_trans[OF range_cover.range_cover_le_n_less(1)]) + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper[OF inc_le]) + apply (simp add:word_leq_minus_one_le) + apply (simp add:word_bits_def) + apply (rule no_plus_overflow_neg) + apply (rule less_le_trans[where y = "of_nat n"]) + apply unat_arith + using range_cover.range_cover_n_less[OF cover] + apply (simp add:word_bits_def) + apply (subst distrib_left[where c = "1 :: machine_word",symmetric,simplified]) + apply (subst mult.commute) + apply simp + apply (rule word_mult_le_iff[THEN iffD2]) + apply (simp add:p2_gt_0) + apply (simp add:range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (rule range_cover.range_cover_le_n_less) + apply simp + apply (subst unat_power_lower) + using cover + apply (clarsimp simp:range_cover_def) + apply (simp add:field_simps) + apply (rule unat_le_helper) + apply unat_arith + apply (simp add:word_bits_def) + apply (drule range_cover_rel[where sbit' = "objBitsKO ko "]) + apply simp + apply simp + apply (rule less_le_trans) + apply (erule range_cover.range_cover_le_n_less) + apply (simp add:range_cover.unat_of_nat_n[OF cover]) + apply (simp add: unat_le_helper) + apply (simp add:word_bits_def) + apply unat_arith + done + show ?thesis + apply (simp add: split_def createObjects_def lookupAround2_pspace_no + alignError_def unless_def createObjects'_def) + apply (rule hoare_pre) + apply (wp|simp add:data_map_insert_def[symmetric] + cong: if_cong del: fun_upd_apply data_map_insert_def)+ + apply (wpc|wp|clarsimp simp del:fun_upd_apply)+ + apply (subst new_cap_addrs_fold'[OF shiftr_not_zero])+ + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_obj_at_disj') + apply (simp add:valid_pspace'_def unat_of_nat_shiftl)+ + apply (rule range_cover_rel[OF cover]) + apply simp+ + apply (subst retype_obj_at_disj') + apply (simp add:valid_pspace'_def unat_of_nat_shiftl)+ + apply (rule range_cover_rel[OF cover]) + apply simp+ + using range_cover.unat_of_nat_n_shift[OF cover,where gbits = gbits,simplified] pi + apply (simp add: in_new) + done +qed + +lemma createObjects_ko_at: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz ((objBitsKO ko) + gbits) n" + assumes not_0: "n\ 0" + assumes pi: "projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n ko gbits + \\r s. \x \ set r. \offs < 2 ^ gbits. ko_at' val (x + (offs << objBitsKO ko)) s\" + by (wp createObjects_ko_at_strg[OF cover not_0 pi],fastforce) + +lemma createObjects_obj_at: + fixes ptr :: machine_word and val :: "'a :: pspace_storable" + assumes cover:"range_cover ptr sz ((objBitsKO ko) + gbits) n" + and not_0:"n \ 0" + and pi: "\(val::'a). projectKO_opt ko = Some val" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n ko gbits \\r s. \x \ set r. \offs < 2 ^ gbits. + obj_at' (\(x::'a). True) (x + (offs << objBitsKO ko)) s\" + apply (rule exE[OF pi]) + apply (erule_tac val1 = x in + hoare_post_imp [OF _ createObjects_ko_at [OF cover not_0 ],rotated]) + apply (intro allI ballI impI) + apply (drule(1) bspec) + apply (drule spec, drule(1) mp) + apply (clarsimp elim!: obj_at'_weakenE) + done + +(* until we figure out what we really need of page + mappings it's just alignment, which, fortunately, + is trivial *) +lemma createObjects_aligned: + assumes al: "is_aligned ptr (objBitsKO ko + gbits)" + and bound :"n < 2 ^ word_bits" "n\0" + and bound':"objBitsKO ko + gbits < word_bits" + shows "\\\ createObjects ptr n ko gbits + \\rv s. \x \ set rv. is_aligned x (objBitsKO ko + gbits)\" + apply (rule hoare_strengthen_post) + apply (rule createObjects_ret[OF bound]) + apply (clarsimp dest!: less_two_pow_divD) + apply (rule is_aligned_ptr_add_helper[OF al]) + apply (simp_all add:bound') + done + +lemma createObjects_aligned2: + "\\s. is_aligned ptr (objBitsKO ko + gbits) \ n < 2 ^ word_bits \ n \ 0 + \ aln < word_bits + \ aln = objBitsKO ko + gbits\ + createObjects ptr n ko gbits + \\rv s. \x \ set rv. is_aligned x aln\" + apply (rule hoare_name_pre_state) + apply simp + apply (rule hoare_pre, wp createObjects_aligned, simp_all) + done + +lemma range_cover_n_wb: + "range_cover (ptr :: obj_ref) sz us n \ n < 2 ^ word_bits" + apply (rule order_le_less_trans, erule range_cover.range_cover_n_le(2)) + apply (clarsimp simp: range_cover_def) + apply (simp add: word_bits_def) + done + +lemma createObjects_nonzero: + assumes not_0: "n \ 0" + assumes cover:"range_cover ptr sz ((objBitsKO ko) + bits) n" + shows "\\s. ptr \ 0\ + createObjects ptr n ko bits + \\rv s. \p \ set rv. p \ 0\" + apply (insert not_0) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P = "ptr \ 0"]) + using cover + apply (clarsimp simp:range_cover_def) + apply (erule is_aligned_get_word_bits,simp_all) + apply (rule hoare_post_imp [OF _ createObjects_ret]) + apply (simp add: ptr_add_def) + apply (intro allI impI ballI) + apply (simp add:power_add[symmetric] mult.assoc) + apply (drule(1) range_cover_no_0[OF _ cover]) + apply (simp add: objBits_def) + apply (simp add: range_cover_n_wb[OF cover]) + apply simp + done + +lemma objBits_if_dev: + "objBitsKO (if dev then KOUserDataDevice else KOUserData) = pageBits" + by (simp add: objBitsKO_def) + +lemma cwo_ret: + assumes not_0: "n \ 0" + assumes cover: "range_cover ptr sz (pageBits + bs) n" + assumes sz: "bs = pageBitsForSize vmsz - pageBits" + shows "\pspace_no_overlap' ptr sz and valid_pspace'\ + createObjects ptr n (if dev then KOUserDataDevice else KOUserData) bs + \\rv s. \x\set rv. frame_at' x vmsz dev s\" +proof - + note create_objs_device = hoare_post_imp [OF _ hoare_conj [OF createObjects_ret + createObjects_ko_at[where val = UserDataDevice,simplified]]] + + note create_objs_normal = hoare_post_imp [OF _ hoare_conj [OF createObjects_ret + createObjects_ko_at[where val = UserData,simplified]]] + + show ?thesis + unfolding frame_at'_def + apply (cases dev) + apply (rule hoare_pre) + apply (rule create_objs_device) + apply (clarsimp simp add: sz pageBits_def) + apply (drule bspec, simp, drule spec, drule(1) mp) + apply (simp add: typ_at'_def obj_at'_real_def objBits_simps pageBits_def shiftl_t2n field_simps) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp add: projectKO_opts_defs split: kernel_object.splits) + apply (rule le_less_trans[OF _ power_strict_increasing]) + apply (rule range_cover.range_cover_n_le(1)[OF cover]) + apply (simp add: word_bits_def pageBits_def not_0)+ + apply (rule range_cover_rel[OF cover]) + apply (simp add: objBitsKO_def pageBits_def not_0)+ + using not_0 + apply simp_all + apply (rule hoare_pre) + apply (rule create_objs_normal) + apply (clarsimp simp add: sz pageBits_def) + apply (drule bspec, simp, drule spec, drule(1) mp) + apply (simp add: typ_at'_def obj_at'_real_def objBits_simps pageBits_def shiftl_t2n field_simps) + apply (erule ko_wp_at'_weakenE) + apply (clarsimp simp add: projectKO_opts_defs split: kernel_object.splits) + apply (rule le_less_trans[OF _ power_strict_increasing]) + apply (rule range_cover.range_cover_n_le(1)[OF cover]) + apply (simp add: word_bits_def pageBits_def not_0)+ + apply (rule range_cover_rel[OF cover]) + apply (simp add: objBitsKO_def pageBits_def not_0)+ + done +qed + +lemmas capFreeIndex_update_valid_untyped' = + capFreeIndex_update_valid_cap'[unfolded valid_cap'_def,simplified,THEN conjunct2,THEN conjunct1] + +lemma createNewCaps_valid_cap: + fixes ptr :: machine_word + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n " + assumes not_0: "n \ 0" + assumes ct: "ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us" + "ty = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits" + assumes ptr: "ptr \ 0" + + assumes sz_constrained: "sz \ maxUntypedSizeBits" + + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createNewCaps ty ptr n us dev + \\r s. (\cap \ set r. s \' cap)\" +proof - + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + note if_split_def[split del] = if_split + note createObjects_nonzero' = createObjects_nonzero[OF not_0] + note cwo_ret' = cwo_ret[OF not_0] + show ?thesis + proof(cases "Types_H.toAPIType ty") + case None thus ?thesis + using not_0 + apply (clarsimp simp: createNewCaps_def Arch_createNewCaps_def) + using cover + apply (simp add: range_cover_def) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def + split: AARCH64_H.object_type.splits) + + apply (in_case "HugePageObject") + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs="2 * ptTranslationBits NormalPT_T", simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb add.commute)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case "VSpaceObject") + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=64, unfolded word_bits_len_of, OF cover] + not_0]; + simp add: objBits_simps word_bits_def add.commute) + apply (rule createObjects_obj_at[where 'a=pte, OF _ not_0]; + simp add: objBits_simps) + apply simp + apply (clarsimp simp: objBits_simps page_table_at'_def typ_at_to_obj_at_arches) + apply (drule (1) bspec)+ + apply (clarsimp simp: pt_bits_def) + apply (erule_tac x="ucast i" in allE) + apply (erule impE) + apply (simp add: mask_def bit_simps split: if_splits) + apply unat_arith + apply clarsimp + + apply (in_case "SmallPageObject") + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs=0, simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case \LargePageObject\) + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits ball_conj_distrib) + apply (wp createObjects_aligned2 createObjects_nonzero' + cwo_ret'[where bs="ptTranslationBits NormalPT_T", simplified] + | simp add: objBits_if_dev pageBits_def ptr range_cover_n_wb)+ + apply (simp add:pageBits_def ptr word_bits_def) + + apply (in_case \PageTableObject\) + apply wp + apply (simp add: valid_cap'_def capAligned_def n_less_word_bits) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=64, unfolded word_bits_len_of, OF cover] + not_0]; + simp add: objBits_simps bit_simps word_bits_def) + apply (rule createObjects_obj_at[where 'a=pte, OF _ not_0]; + simp add: objBits_simps bit_simps) + apply simp + apply (clarsimp simp: objBits_simps bit_simps page_table_at'_def typ_at_to_obj_at_arches) + apply (drule (1) bspec)+ + apply (erule_tac x="ucast i" in allE) + apply (erule impE) + apply (simp add: mask_def) + apply unat_arith + apply clarsimp + apply simp + + apply (in_case \VCPUObject\) + apply (wpsimp wp: hoare_vcg_const_Ball_lift simp: valid_cap'_def capAligned_def n_less_word_bits)+ + apply (simp only: imp_conv_disj typ_at_to_obj_at_arches pageBits_def) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned[OF _ range_cover.range_cover_n_less(1) + [where 'a=machine_word_len, unfolded word_bits_len_of, + OF cover] not_0]) + apply (simp add:objBits_simps)+ + apply (rule createObjects_obj_at [where 'a=vcpu, OF _ not_0]) + apply (simp add: objBits_simps) + apply simp + apply simp + apply simp + apply (clarsimp simp: objBits_simps) + apply simp + done + next + case (Some a) thus ?thesis + proof(cases a) + case Untyped with Some cover ct show ?thesis + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def fromIntegral_def + toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (intro ballI) + apply (clarsimp simp: image_def upto_enum_red' valid_cap'_def capAligned_def + split: capability.splits) + apply (drule word_leq_minus_one_le[rotated]) + apply (rule range_cover_not_zero[OF not_0 cover]) + apply (intro conjI) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (fastforce simp:range_cover_def word_bits_def)+ + apply (clarsimp simp:valid_untyped'_def ko_wp_at'_def obj_range'_def) + apply (drule(1) pspace_no_overlapD'[rotated]) + apply (frule(1) range_cover_cell_subset) + apply (erule disjE) + apply (simp add: mask_def add_diff_eq) + apply (drule psubset_imp_subset) + apply (drule(1) disjoint_subset2[rotated]) + apply (drule(1) disjoint_subset) + apply (drule(1) range_cover_subset_not_empty) + apply clarsimp+ + apply (simp add: mask_def add_diff_eq) + apply blast + apply (drule(1) range_cover_no_0[OF ptr _ unat_less_helper]) + apply simp + done + next + case TCBObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def curDomain_def + split: AARCH64_H.object_type.splits) + apply (wp mapM_x_wp' hoare_vcg_const_Ball_lift)+ + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a = "tcb",OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply simp + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_tcbI) + done + next + case EndpointObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a=endpoint, OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply (simp) + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_epI) + done + next + case NotificationObject with Some cover ct show ?thesis + including no_pre + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (rule hoare_post_imp) + prefer 2 + apply (rule createObjects_obj_at [where 'a="notification", OF _ not_0]) + using cover + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.splits) + apply (simp) + apply (clarsimp simp: valid_cap'_def objBits_simps) + apply (fastforce intro: capAligned_ntfnI) + done + next + case CapTableObject with Some cover ct show ?thesis + apply (clarsimp simp: Arch_createNewCaps_def createNewCaps_def) + apply (simp_all add: AARCH64_H.toAPIType_def + fromIntegral_def toInteger_nat fromInteger_nat APIType_capBits_def + split: AARCH64_H.object_type.splits) + apply wp + apply (clarsimp simp: AARCH64_H.toAPIType_def APIType_capBits_def objBits_simps + split: AARCH64_H.object_type.split object_type.splits) + apply (rule hoare_strengthen_post) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_aligned [OF _ _ not_0 ]) + apply ((clarsimp simp:objBits_simps range_cover_def range_cover.range_cover_n_less[where 'a=64, unfolded word_bits_len_of, OF cover])+)[3] + apply (simp add: word_bits_def) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_ret [OF range_cover.range_cover_n_less(1)[where 'a=64, unfolded word_bits_len_of, OF cover] not_0]) + apply (rule createObjects_obj_at [where 'a=cte, OF _ not_0]) + apply (simp add: objBits_simps APIType_capBits_def) + apply (simp) + apply simp + apply (clarsimp simp: valid_cap'_def capAligned_def objBits_simps + dest!: less_two_pow_divD) + apply (thin_tac "\x\S. is_aligned (p x) n" for S p n) + apply (intro conjI) + apply ((simp add:range_cover_def word_bits_def)+)[2] + apply (clarsimp simp: power_sub) + apply (drule bspec, simp) + apply (drule_tac x = "addr && mask us" in spec) + apply (drule mp) + apply simp + apply (rule and_mask_less') + apply (simp add: range_cover_def word_bits_def) + apply (clarsimp simp add: shiftl_t2n) + apply simp + done + qed + qed +qed + +lemma other_objs_default_relation: + "\ case ty of Structures_A.EndpointObject \ ko = injectKO (makeObject :: endpoint) + | Structures_A.NotificationObject \ ko = injectKO (makeObject :: Structures_H.notification) + | Structures_A.TCBObject \ ko = injectKO (makeObject :: tcb) + | _ \ False \ \ + obj_relation_retype (default_object ty dev n) ko" + apply (rule obj_relation_retype_other_obj) + apply (clarsimp simp: default_object_def + is_other_obj_relation_type_def + split: Structures_A.apiobject_type.split_asm) + apply (clarsimp simp: other_obj_relation_def default_object_def + ep_relation_def ntfn_relation_def + tcb_relation_def default_tcb_def makeObject_tcb + makeObject_cte new_context_def newContext_def + default_ep_def makeObject_endpoint default_notification_def + makeObject_notification default_ntfn_def + fault_rel_optionation_def + initContext_def newFPUState_def + arch_tcb_context_get_def atcbContextGet_def + default_arch_tcb_def newArchTCB_def + arch_tcb_relation_def + split: Structures_A.apiobject_type.split_asm) + done + +lemma captable_relation_retype: + "n < word_bits \ + obj_relation_retype (default_object Structures_A.CapTableObject dev n) (KOCTE makeObject)" + apply (clarsimp simp: obj_relation_retype_def default_object_def + wf_empty_bits objBits_simps' + dom_empty_cnode ex_with_length cte_level_bits_def) + apply (rule conjI) + defer + apply (clarsimp simp: cte_relation_def empty_cnode_def makeObject_cte) + apply (rule set_eqI, rule iffI) + apply (clarsimp simp: cte_map_def') + apply (rule_tac x="of_bl y" in exI) + apply (simp add: of_bl_length[where 'a=64, folded word_bits_def]) + apply (clarsimp simp: image_def cte_map_def') + apply (rule_tac x="drop (word_bits - n) (to_bl xa)" in exI) + apply (simp add: of_drop_to_bl word_bits_def word_size) + apply (simp add: less_mask_eq) + done + +lemma pagetable_relation_retype: + "obj_relation_retype (default_object (ArchObject PageTableObj) dev n) + (KOArch (KOPTE makeObject))" + apply (simp add: default_object_def default_arch_object_def + makeObject_pte obj_relation_retype_def + objBits_simps pte_relation_def table_size_def) + apply (clarsimp simp: range_composition[symmetric] shiftl_t2n field_simps) + apply (fastforce simp add: image_iff le_mask_iff_lt_2n[THEN iffD1]) + done + +lemma vsroot_relation_retype: + "obj_relation_retype (default_object (ArchObject VSpaceObj) dev n) + (KOArch (KOPTE makeObject))" + apply (simp add: default_object_def default_arch_object_def + makeObject_pte obj_relation_retype_def + objBits_simps pte_relation_def table_size_def) + apply (clarsimp simp: range_composition[symmetric] shiftl_t2n field_simps) + apply (fastforce simp add: image_iff le_mask_iff_lt_2n[THEN iffD1]) + done + +lemmas makeObjectKO_simps = makeObjectKO_def[split_simps AARCH64_H.object_type.split + apiobject_type.split sum.split kernel_object.split ] + +lemma corres_retype: + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = objBitsKO ko + gbits" + and tp: "APIType_map2 ty \ no_gs_types" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + shows "corres (=) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s + \ (\val. ko = injectKO val)) + (retype_region2 ptr n us (APIType_map2 ty) dev) (createObjects ptr n ko gbits)" + apply (rule corres_guard_imp) + apply (rule_tac F = "(\val. ko = injectKO val)" in corres_gen_asm2) + apply (erule exE) + apply (rule corres_rel_imp) + apply (rule corres_retype'[where g=id and ty=ty and sz = sz,OF not_zero aligned _ _ _ ko + ,simplified update_gs_id[OF tp] modify_id_return,simplified]) + using assms + apply (simp_all add: objBits_def no_gs_types_def) + apply auto + done + +lemma init_arch_objects_APIType_map2: + "init_arch_objects (APIType_map2 (Inr ty)) ptr bits sz refs = + (case ty of APIObjectType _ \ return () + | _ \ init_arch_objects (APIType_map2 (Inr ty)) ptr bits sz refs)" + apply (clarsimp split: AARCH64_H.object_type.split) + apply (simp add: init_arch_objects_def APIType_map2_def + split: apiobject_type.split) + done + +lemmas object_splits = + apiobject_type.split_asm + AARCH64_H.object_type.split_asm + sum.split_asm kernel_object.split_asm + arch_kernel_object.split_asm + +declare hoare_in_monad_post[wp del] +declare univ_get_wp[wp del] + +lemma nullPointer_0_simp[simp]: + "(nullPointer = 0) = True" + by (simp add: nullPointer_def) + +lemma descendants_of_retype': + assumes P: "\p. P p \ m p = None" + shows "descendants_of' p (\p. if P p then Some makeObject else m p) = + descendants_of' p m" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (rule iffI) + apply (erule subtree.induct) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold makeObject_cte split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def makeObject_cte split: if_split_asm) + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold makeObject_cte split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def makeObject_cte split: if_split_asm) + apply (erule subtree.induct) + apply (rule direct_parent) + apply (clarsimp simp: mdb_next_unfold dest!: P) + apply assumption + apply (fastforce simp: parentOf_def dest!: P) + apply (erule trans_parent) + apply (clarsimp simp: mdb_next_unfold dest!: P) + apply assumption + apply (fastforce simp: parentOf_def dest!: P) + done + +lemma capRange_Null [simp]: "capRange NullCap = {}" + by (simp add: capRange_def) + +end + +locale retype_mdb = vmdb + + fixes P n + assumes P: "\p. P p \ m p = None" + assumes 0: "\P 0" + defines "n \ \p. if P p then Some makeObject else m p" +begin + +interpretation Arch . (*FIXME: arch_split*) + +lemma no_0_n: "no_0 n" + using no_0 by (simp add: no_0_def n_def 0) + +lemma n_next: + "n \ c \ c' = (if P c then c' = 0 else m \ c \ c')" + by (simp add: mdb_next_unfold n_def makeObject_cte nullPointer_def) + +lemma n_prev: + "n \ c \ c' = (if P c' then c = 0 else m \ c \ c')" + by (simp add: mdb_prev_def n_def makeObject_cte nullPointer_def) + +lemma dlist_n: "valid_dlist n" + using dlist no_0 no_0_n + apply (simp add: valid_dlist_def2) + apply (clarsimp simp: n_prev n_next) + apply (rule conjI) + apply clarsimp + apply (erule allE, erule (1) impE) + apply (erule_tac x=c' in allE) + apply simp + apply (drule P) + apply (simp add: mdb_next_unfold) + apply clarsimp + apply (erule allE, erule (1) impE) + apply (erule_tac x=c' in allE) + apply simp + apply (drule P) + apply (simp add: mdb_prev_def) + done + +lemma n_next_trancl: + "n \ c \\<^sup>+ c' \ (if P c then c' = 0 else m \ c \\<^sup>+ c')" + apply (insert no_0_n chain) + apply (erule trancl_induct) + apply (fastforce simp: n_next) + apply (simp split: if_split_asm) + apply (clarsimp simp: mdb_next_unfold) + apply (simp add: n_next split: if_split_asm) + apply (simp add: mdb_chain_0_def) + apply (drule_tac x=c in bspec) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + apply assumption + done + +lemma next_not_P: + "m \ c \ c' \ \P c" + by (clarsimp simp: mdb_next_unfold dest!: P) + +lemma m_next_trancl: + "m \ c \\<^sup>+ c' \ n \ c \\<^sup>+ c'" + apply (erule trancl_induct) + apply (rule r_into_trancl) + apply (clarsimp simp: n_next) + apply (drule next_not_P) + apply simp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (clarsimp simp: n_next) + apply (drule next_not_P) + apply simp + done + +lemma P_to_0: + "P c \ n \ c \\<^sup>+ 0" + by (rule r_into_trancl) (simp add: n_next) + +lemma n_trancl_eq: + "n \ c \\<^sup>+ c' = (if P c then c' = 0 else m \ c \\<^sup>+ c')" + by (auto dest: m_next_trancl n_next_trancl P_to_0) + +lemma n_rtrancl_eq: + "n \ c \\<^sup>* c' = (if P c then c' = 0 \ c = c' else m \ c \\<^sup>* c')" + by (auto simp: n_trancl_eq rtrancl_eq_or_trancl) + +lemma dom_n: + "dom n = dom m \ Collect P" + by (auto simp add: n_def) + +lemma mdb_chain_0_n: "mdb_chain_0 n" + using chain + by (auto simp: mdb_chain_0_def dom_n n_trancl_eq) + +lemma n_Some_eq: + "(n p = Some (CTE cap node)) = + (if P p then cap = NullCap \ node = nullMDBNode + else m p = Some (CTE cap node))" + by (auto simp: n_def makeObject_cte) + +lemma valid_badges_n: "valid_badges n" +proof - + from valid + have "valid_badges m" .. + thus ?thesis + apply (clarsimp simp: valid_badges_def) + apply (simp add: n_Some_eq n_next split: if_split_asm) + apply fastforce + done +qed + +lemma caps_contained_n: "caps_contained' n" +proof - + from valid + have "caps_contained' m" .. + thus ?thesis + apply (clarsimp simp: caps_contained'_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply fastforce + done +qed + +lemma mdb_chunked_n: "mdb_chunked n" +proof - + from valid + have "mdb_chunked m" .. + thus ?thesis + apply (clarsimp simp: mdb_chunked_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply (simp add: n_Some_eq n_trancl_eq n_rtrancl_eq is_chunk_def) + apply fastforce + done +qed + +lemma descendants [simp]: + "descendants_of' p n = descendants_of' p m" + apply (unfold n_def) + apply (subst descendants_of_retype') + apply (erule P) + apply (rule refl) + done + +lemma untyped_mdb_n: "untyped_mdb' n" +proof - + from valid + have "untyped_mdb' m" .. + thus ?thesis + apply (clarsimp simp: untyped_mdb'_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma untyped_inc_n: "untyped_inc' n" +proof - + from valid + have "untyped_inc' m" .. + thus ?thesis + apply (clarsimp simp: untyped_inc'_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply blast + done +qed + +lemma valid_nullcaps_n: "valid_nullcaps n" +proof - + from valid + have "valid_nullcaps m" .. + thus ?thesis + apply (clarsimp simp: valid_nullcaps_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma ut_rev_n: "ut_revocable' n" +proof - + from valid + have "ut_revocable' m" .. + thus ?thesis + apply (clarsimp simp: ut_revocable'_def) + apply (simp add: n_Some_eq split: if_split_asm) + done +qed + +lemma class_links_m: + "class_links m" + using valid by (simp add: valid_mdb_ctes_def) + +lemma next_not_P2: + "\ m \ p \ p'; p' \ nullPointer \ \ \ P p'" + using dlist + apply (clarsimp simp: mdb_next_unfold) + apply (erule(1) valid_dlistE) + apply clarsimp + apply (clarsimp dest!: P) + done + +lemma class_links_n: + "class_links n" + using class_links_m + apply (simp add: class_links_def) + apply (elim allEI) + apply clarsimp + apply (subgoal_tac "p' \ nullPointer") + apply (simp add: n_next split: if_split_asm) + apply (case_tac cte, case_tac cte') + apply (clarsimp simp add: n_Some_eq split: if_split_asm) + apply (drule(1) next_not_P2) + apply simp + apply (clarsimp simp: no_0_n nullPointer_def) + done + +lemma irq_control_n: + "irq_control n" + apply (clarsimp simp add: irq_control_def) + apply (simp add: n_Some_eq split: if_split_asm) + apply (frule irq_revocable, rule irq_control) + apply clarsimp + apply (erule (1) irq_controlD, rule irq_control) + done + +lemma dist_z_m: "distinct_zombies m" + using valid by auto + +lemma dist_z_n: "distinct_zombies n" + using dist_z_m + apply (simp add: n_def distinct_zombies_def + distinct_zombie_caps_def + split del: if_split) + apply (erule allEI, erule allEI) + apply (clarsimp split del: if_split) + apply (clarsimp split: if_split_asm simp: makeObject_cte) + apply (clarsimp simp: isCap_simps) + done + +lemma reply_masters_rvk_fb_m: "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n: "reply_masters_rvk_fb n" + using reply_masters_rvk_fb_m + by (simp add: n_def reply_masters_rvk_fb_def + ball_ran_eq makeObject_cte isCap_simps) + +lemma valid_n: + "valid_mdb_ctes n" + by (simp add: valid_mdb_ctes_def dlist_n no_0_n mdb_chain_0_n + valid_badges_n caps_contained_n untyped_mdb_n + untyped_inc_n mdb_chunked_n valid_nullcaps_n ut_rev_n + class_links_n irq_control_n dist_z_n + reply_masters_rvk_fb_n) + +end + +definition + caps_no_overlap'' :: "machine_word \ nat \ kernel_state \ bool" +where + "caps_no_overlap'' ptr sz s \ \cte \ ran (ctes_of s). + untypedRange (cteCap cte) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ untypedRange (cteCap cte)" + +lemma obj_range'_subset: + "\range_cover ptr sz (objBitsKO val) n; ptr' \ set (new_cap_addrs n ptr val)\ + \ obj_range' ptr' val \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}" + unfolding obj_range'_def + by (rule new_range_subset, auto) + +lemma obj_range'_subset_strong: + assumes "range_cover ptr sz (objBitsKO val) n" + and "ptr' \ set (new_cap_addrs n ptr val)" + shows "obj_range' ptr' val \ {ptr..ptr + (of_nat n * 2 ^ objBitsKO val) - 1}" +proof - + { + assume cover: "range_cover ptr sz (objBitsKO val) n" + and mem_p: "ptr' \ set (new_cap_addrs n ptr val)" + and not_0: "n\ 0" + note n_less = range_cover.range_cover_n_less[OF cover] + have unat_of_nat_m1: "unat (of_nat n - (1::machine_word)) < n" + using not_0 n_less by (simp add:unat_of_nat_minus_1) + have decomp: + "of_nat n * 2 ^ objBitsKO val = + of_nat (n - 1) * 2 ^ objBitsKO val + (2 :: machine_word) ^ objBitsKO val" + apply (simp add:distrib_right[where b = "1 :: machine_word",simplified,symmetric]) + using not_0 n_less + apply simp + done + have "ptr' + 2 ^ objBitsKO val - 1 \ ptr + of_nat n * 2 ^ objBitsKO val - 1" + using cover + apply (subst decomp) + apply (simp add:add.assoc[symmetric]) + apply (simp add:p_assoc_help) + apply (rule order_trans[OF word_plus_mono_left word_plus_mono_right]) + using mem_p not_0 + apply (clarsimp simp:new_cap_addrs_def shiftl_t2n) + apply (rule word_plus_mono_right) + apply (subst mult.commute) + apply (rule word_mult_le_mono1[OF word_of_nat_le]) + using n_less not_0 + apply (simp add:unat_of_nat_minus_1) + apply (rule p2_gt_0[THEN iffD2]) + apply (simp add:word_bits_def range_cover_def) + apply (simp only: word_bits_def[symmetric]) + apply (clarsimp simp: unat_of_nat_minus_1[OF n_less(1) not_0]) + apply (rule nat_less_power_trans2 + [OF range_cover.range_cover_le_n_less(2),OF cover, folded word_bits_def]) + apply (simp add:unat_of_nat_m1 less_imp_le) + apply (simp add:range_cover_def word_bits_def) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + using range_cover.range_cover_compare[OF cover,where p = "unat (of_nat n - (1::machine_word))"] + apply (clarsimp simp:unat_of_nat_m1) + apply (simp add:range_cover_def word_bits_def) + apply (rule olen_add_eqv[THEN iffD2]) + apply (subst add.commute[where a = "2^objBitsKO val - 1"]) + apply (subst p_assoc_help[symmetric]) + apply (rule is_aligned_no_overflow) + apply (clarsimp simp:range_cover_def word_bits_def) + apply (erule aligned_add_aligned[OF _ is_aligned_mult_triv2]; simp) + apply simp + by (meson assms(1) is_aligned_add is_aligned_mult_triv2 is_aligned_no_overflow' range_cover_def) + } + with assms show ?thesis + unfolding obj_range'_def + apply - + apply (frule(1) obj_range'_subset) + apply (simp add: obj_range'_def) + apply (cases "n = 0"; clarsimp simp:new_cap_addrs_def mask_def field_simps) + done +qed + + +lemma caps_no_overlapD'': + "\cte_wp_at' (\cap. cteCap cap = c) q s;caps_no_overlap'' ptr sz s\ + \ untypedRange c \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} \ + {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ untypedRange c" + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps caps_no_overlap''_def + simp del:atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule_tac x = cte in bspec) + apply fastforce + apply (erule(1) impE) + apply blast +done + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_untyped'_helper: + assumes valid : "valid_cap' c s" + and cte_at : "cte_wp_at' (\cap. cteCap cap = c) q s" + and cover : "range_cover ptr sz (objBitsKO val) n" + and range : "caps_no_overlap'' ptr sz s" + and pres : "isUntypedCap c \ usableUntypedRange c \ {ptr..ptr + of_nat n * 2 ^ objBitsKO val - 1} = {}" + shows "\pspace_aligned' s; pspace_distinct' s; pspace_no_overlap' ptr sz s\ + \ valid_cap' c (s\ksPSpace := foldr (\addr. data_map_insert addr val) (new_cap_addrs n ptr val) (ksPSpace s)\)" + proof - + note blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff + assume pn : "pspace_aligned' s" "pspace_distinct' s" + and no_overlap: "pspace_no_overlap' ptr sz s" + show ?thesis + using pn pres no_overlap valid cover cte_wp_at_ctes_of[THEN iffD1,OF cte_at] + caps_no_overlapD''[OF cte_at range] + apply (clarsimp simp:valid_cap'_def retype_ko_wp_at') + apply (case_tac "cteCap cte"; + simp add: valid_cap'_def cte_wp_at_obj_cases' valid_pspace'_def retype_obj_at_disj' + split: zombie_type.split_asm) + apply (clarsimp simp: valid_arch_cap'_def valid_arch_cap_ref'_def retype_obj_at_disj' + typ_at_to_obj_at_arches frame_at'_def page_table_at'_def + split: if_split_asm arch_capability.splits) + unfolding valid_untyped'_def + apply (intro allI) + apply (rule ccontr) + apply clarify + using cover[unfolded range_cover_def] + apply (clarsimp simp:isCap_simps retype_ko_wp_at' split:if_split_asm) + apply (thin_tac "\x. Q x" for Q) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (elim disjE) + apply (frule(1) obj_range'_subset) + apply (erule impE) + apply (drule(1) psubset_subset_trans) + apply (drule Int_absorb1[OF psubset_imp_subset]) + apply (drule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (simp add:Int_ac add_mask_fold) + apply (drule(1) subset_trans) + apply (simp only: add_mask_fold) + apply (frule(1) obj_range'_subset_strong) + apply (drule(1) non_disjoing_subset) + apply blast + apply (thin_tac "\x. Q x" for Q) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (frule(1) obj_range'_subset) + apply (drule(1) subset_trans) + apply (erule impE) + apply (clarsimp simp: add_mask_fold) + apply blast + apply (simp only: add_mask_fold) + apply blast + done +qed + +definition caps_overlap_reserved' :: "machine_word set \ kernel_state \ bool" +where + "caps_overlap_reserved' S s \ \cte \ ran (ctes_of s). + (isUntypedCap (cteCap cte) \ usableUntypedRange (cteCap cte) \ S = {})" + +lemma createObjects_valid_pspace': + assumes mko: "makeObjectKO dev ty = Some val" + and not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and sz_limit: "sz \ maxUntypedSizeBits" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat (n * 2^gbits * 2 ^ objBitsKO val ) - 1} s + \ ptr \ 0\ + createObjects' ptr n val gbits \\r. valid_pspace'\" + apply (cut_tac not_0) + apply (simp add: split_def createObjects'_def + lookupAround2_pspace_no + alignError_def unless_def) + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def del:fun_upd_apply)+ + apply (wpc|wp)+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift[OF _ cover]) + apply simp+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift[OF _ cover]) + apply simp+ + apply (subst data_map_insert_def[symmetric])+ + apply (rule impI) + apply (clarsimp simp: new_cap_addrs_fold' + valid_pspace'_def linorder_not_less + objBits_def[symmetric]) + apply (simp only: imp_disjL[symmetric] imp_conjL[symmetric] imp_ex[symmetric] + range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified]) +proof (intro conjI impI) + + fix s + + assume pn: "pspace_no_overlap' ptr sz s" + and vo: "valid_objs' s" + and ad: "pspace_aligned' s" "pspace_distinct' s" + and pc: "caps_no_overlap'' ptr sz s" + and mdb: "valid_mdb' s" + and p_0: "ptr \ 0" + and reserved : "caps_overlap_reserved' {ptr..ptr + of_nat n *2 ^ gbits * 2 ^ objBitsKO val - 1} s" + and no_0_obj': "no_0_obj' s" + have obj': "objBitsKO val \ sz" + using cover + by (simp add:range_cover_def) + + let ?s' = "s\ksPSpace := foldr (\addr. data_map_insert addr val) (new_cap_addrs (n * 2 ^ gbits) ptr val) (ksPSpace s)\" + + note cover' = range_cover_rel[where sbit' = "objBitsKO val",OF cover _ refl,simplified] + + note ad' = retype_aligned_distinct'[OF ad pn cover'] + + note shift = range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified] + + have al: "is_aligned ptr (objBitsKO val)" + using cover' + by (simp add:range_cover_def) + + show pspace_aligned: "pspace_aligned' ?s'" + using ad' shift + by (simp add:field_simps) + + show "pspace_distinct' ?s'" + using ad' shift + by (simp add:field_simps) + + note obj_at_disj = retype_obj_at_disj' [OF ad pn cover'] + + note obj_at_disj' = obj_at_disj [unfolded foldr_upd_app_if[folded data_map_insert_def]] + + have obj_atC: "\P x. x \ set (new_cap_addrs (2 ^ gbits * n) ptr val) \ \ obj_at' P x s" + apply (clarsimp simp: obj_at'_def) + apply (drule subsetD [OF new_cap_addrs_subset [OF cover' ]]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (drule domI[where m = "ksPSpace s"]) + apply (drule(1) orthD2) + apply (clarsimp simp:ptr_add_def p_assoc_help) + done + + have valid_cap: "\cap q. \ s \' cap; cte_wp_at' (\cte. cteCap cte = cap) q s \ + \ ?s' \' cap" + apply (rule valid_untyped'_helper[OF _ _ _ pc _ ad pn ]) + apply simp+ + apply (subst mult.commute) + apply (rule cover') + using reserved + apply (clarsimp simp:caps_overlap_reserved'_def cte_wp_at_ctes_of) + apply (drule_tac x = cte in bspec) + apply fastforce + apply simp + done + + show valid_objs: "valid_objs' ?s'" using vo + apply (clarsimp simp: valid_objs'_def + foldr_upd_app_if[folded data_map_insert_def] + elim!: ranE + split: if_split_asm) + apply (insert sym[OF mko])[1] + apply (clarsimp simp: makeObjectKO_def + split: bool.split_asm sum.split_asm + AARCH64_H.object_type.split_asm + apiobject_type.split_asm + kernel_object.split_asm + arch_kernel_object.split_asm) + apply (drule bspec, erule ranI) + apply (subst mult.commute) + apply (case_tac obj; simp add: valid_obj'_def) + apply (rename_tac endpoint) + apply (case_tac endpoint; simp add: valid_ep'_def obj_at_disj') + apply (rename_tac notification) + apply (case_tac notification; simp add: valid_ntfn'_def valid_bound_tcb'_def obj_at_disj') + apply (rename_tac ntfn xa) + apply (case_tac ntfn, simp_all, (clarsimp simp: obj_at_disj' split:option.splits)+) + apply (rename_tac tcb) + apply (case_tac tcb, clarsimp simp add: valid_tcb'_def) + apply (frule pspace_alignedD' [OF _ ad(1)]) + apply (frule pspace_distinctD' [OF _ ad(2)]) + apply (simp add: objBits_simps) + apply (subst mult.commute) + apply (intro conjI ballI) + apply (clarsimp elim!: ranE) + apply (rule valid_cap[unfolded foldr_upd_app_if[folded data_map_insert_def]]) + apply (fastforce) + apply (rule_tac ptr="x + xa" in cte_wp_at_tcbI', assumption+) + apply fastforce + apply simp + apply (rename_tac thread_state mcp priority bool option nat cptr vptr bound user_context) + apply (case_tac thread_state, simp_all add: valid_tcb_state'_def + valid_bound_ntfn'_def obj_at_disj' + split: option.splits)[2] + apply (clarsimp simp add: valid_arch_tcb'_def typ_at_to_obj_at_arches obj_at_disj') + apply (simp add: valid_cte'_def) + apply (frule pspace_alignedD' [OF _ ad(1)]) + apply (frule pspace_distinctD' [OF _ ad(2)]) + apply (simp add: objBits_simps') + apply (subst mult.commute) + apply (erule valid_cap[unfolded foldr_upd_app_if[folded data_map_insert_def]]) + apply (erule(2) cte_wp_at_cteI'[unfolded cte_level_bits_def]) + apply simp + done + have not_0: "0 \ set (new_cap_addrs (2 ^ gbits * n) ptr val)" + using p_0 + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset [OF cover'],rotated]) + apply (clarsimp simp:ptr_add_def) + done + show "valid_mdb' ?s'" + apply (simp add: valid_mdb'_def foldr_upd_app_if[folded data_map_insert_def]) + apply (subst mult.commute) + apply (subst ctes_of_retype [OF mko ad]) + apply (rule ad'[unfolded foldr_upd_app_if[folded data_map_insert_def]])+ + apply (simp add: objBits_def[symmetric] new_cap_addrs_aligned [OF al]) + apply (rule ballI, drule subsetD [OF new_cap_addrs_subset [OF cover']]) + apply (insert pspace_no_overlap_disjoint' [OF ad(1) pn]) + apply (drule_tac x = x in orthD1) + apply (simp add:ptr_add_def p_assoc_help) + apply fastforce + apply (fold makeObject_cte) + apply (rule retype_mdb.valid_n) + apply unfold_locales + apply (rule mdb[unfolded valid_mdb'_def]) + apply (rule iffD2 [OF None_ctes_of_cte_at[unfolded cte_wp_at_obj_cases'], THEN sym]) + apply (rule notI) + apply (elim disjE conjE, simp_all add: obj_atC)[1] + apply (thin_tac "S \ T = {}" for S T) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (drule pspace_no_overlapD' [OF _ pn]) + apply (drule subsetD [OF new_cap_addrs_subset[OF cover']]) + apply (frule_tac ptr'=p in mask_in_range) + apply (drule(1) tcb_cte_cases_aligned_helpers) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:objBits_simps) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (frule new_range_subset[OF cover']) + apply (drule bspec [OF new_cap_addrs_aligned[OF al]]) + apply (drule(1) disjoint_subset[rotated]) + apply (drule_tac a=p in equals0D) + apply (frule_tac ptr'=p in mask_in_range) + apply (simp only: add_mask_fold) + apply (insert sym [OF mko], + clarsimp simp: objBits_simps makeObjectKO_def obj_at'_def)[1] + apply (insert sym[OF mko] cover', + clarsimp simp: obj_at'_def objBits_simps + makeObjectKO_def)[1] + apply (drule(1) tcb_cte_cases_aligned_helpers(2)) + apply clarsimp + apply (drule subsetD [OF new_cap_addrs_subset,rotated]) + apply (simp add:objBits_simps) + apply (drule orthD1) + apply (fastforce simp:p_assoc_help ptr_add_def) + apply fastforce + apply (simp add: not_0) + done + + have data_map_ext: "\x y. data_map_insert x y = (\m. m (x \ y))" + by (rule ext) simp + show no_0_obj: "no_0_obj' ?s'" + using not_0 no_0_obj' + by (simp add: no_0_obj'_def data_map_ext field_simps foldr_upd_app_other) + +qed + +abbreviation + "injectKOS \ (injectKO :: ('a :: pspace_storable) \ kernel_object)" + +lemma createObjects_valid_pspace_untyped': + assumes mko: "makeObjectKO dev ty = Some val" + and not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and sz_limit: "sz \ maxUntypedSizeBits" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s \ caps_no_overlap'' ptr sz s \ ptr \ 0 + \ caps_overlap_reserved' {ptr .. ptr + of_nat (n * 2^gbits * 2 ^ objBitsKO val ) - 1} s \ + createObjects' ptr n val gbits \\r. valid_pspace'\" + apply (wp createObjects_valid_pspace' [OF mko not_0 cover sz_limit]) + apply simp + done + +declare bleeding_obvious [simp] + +lemma range_cover_new_cap_addrs_compare: + assumes not_0: "n \ 0" + and cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and ptr_in: "p \ set (new_cap_addrs (unat (((of_nat n)::machine_word) << gbits)) ptr val)" + shows "p \ ptr + of_nat (shiftL n (objBitsKO val + gbits) - Suc 0)" +proof - + note unat_of_nat_shift = range_cover.unat_of_nat_n_shift[OF cover,where gbits=gbits,simplified] + have cover' :"range_cover ptr sz (objBitsKO val) (n*2^gbits)" + by (rule range_cover_rel[OF cover],simp+) + have upbound:" unat ((((of_nat n)::machine_word) * 2 ^ gbits)) * unat ((2::machine_word) ^ objBitsKO val) < 2 ^ word_bits" + using range_cover.range_cover_le_n_less[OF cover' le_refl] cover' + apply - + apply (drule nat_less_power_trans) + apply (simp add:range_cover_def) + apply (fold word_bits_def) + using unat_of_nat_shift not_0 + apply (simp add:field_simps shiftl_t2n) + done + have not_0': "(2::machine_word) ^ (objBitsKO val + gbits) * of_nat n \ 0" + apply (rule range_cover_not_zero_shift[OF not_0,unfolded shiftl_t2n,OF _ le_refl]) + apply (rule range_cover_rel[OF cover]) + apply simp+ + done + have "gbits < word_bits" + using cover + by (simp add:range_cover_def word_bits_def) + thus ?thesis + apply - + apply (insert not_0 cover ptr_in) + apply (frule range_cover.range_cover_le_n_less[OF _ le_refl]) + apply (fold word_bits_def) + apply (simp add:shiftL_nat ) + apply (simp add:range_cover.unat_of_nat_n_shift) + apply (clarsimp simp:new_cap_addrs_def shiftl_t2n) + apply (rename_tac pa) + apply (rule word_plus_mono_right) + apply (rule order_trans) + apply (subst mult.commute) + apply (rule word_mult_le_iff[THEN iffD2]) + apply (clarsimp simp:p2_gt_0 range_cover_def word_bits_def) + apply (drule range_cover_rel[where sbit' = "0"]) + apply (simp+)[2] + apply (erule less_le_trans[OF range_cover.range_cover_le_n_less(2)]) + apply (clarsimp simp:field_simps power_add) + apply (rule unat_le_helper) + apply (rule of_nat_mono_maybe_le[THEN iffD1]) + using range_cover.range_cover_le_n_less[OF cover' le_refl] + apply (simp_all only:word_bits_def[symmetric]) + apply simp + apply (drule nat_less_power_trans) + apply (simp add:range_cover_def word_bits_def) + apply (rule less_le_trans[OF mult_less_mono1]) + apply (rule unat_mono) + apply (rule_tac y1= "pa" in of_nat_mono_maybe'[THEN iffD1,rotated -1]) + apply (assumption) + apply (simp add:word_bits_def) + apply (simp add:word_bits_def) + apply simp + using unat_of_nat_shift + apply (simp add:field_simps shiftl_t2n) + apply simp + apply (rule word_less_sub_1) + apply (simp add:power_add field_simps) + apply (subst mult.assoc[symmetric]) + apply (rule word_mult_less_mono1) + apply (rule word_of_nat_less) + using unat_of_nat_shift + apply (simp add:shiftl_t2n field_simps) + apply (meson less_exp objBitsKO_bounded2 of_nat_less_pow_64 word_gt_a_gt_0) + using upbound + apply (simp add:word_bits_def) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + apply (rule less_le_trans[rotated -1]) + apply (rule range_cover.range_cover_compare_bound[OF cover']) + apply (simp add: unat_minus_one[OF not_0']) + using range_cover.unat_of_nat_n_shift[OF cover le_refl] + apply (simp add:shiftl_t2n power_add field_simps) + apply (simp add:range_cover_def word_bits_def) + done +qed + +lemma createObjects_orig_ko_wp_at2': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ P (ko_wp_at' P' p s) + \ (P' val \ P True) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (ko_wp_at' P' p s)\" + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def del:fun_upd_apply) + apply (rule hoare_grab_asm)+ + apply (subst new_cap_addrs_fold') + apply (drule range_cover_not_zero_shift[rotated]) + apply (rule le_add2) + apply (simp add:word_le_sub1 del:fun_upd_apply)+ + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (wpc|wp)+ + apply (clarsimp simp:valid_pspace'_def linorder_not_less simp del:fun_upd_apply) + apply (subgoal_tac " range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at',simp+)+ + apply clarsimp + apply (cases "P' val") + apply simp + apply clarsimp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add:lookupAround2_None1) + apply (intro conjI impI allI) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply (fastforce simp: ko_wp_at'_def) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (simp add:dom_def) + apply (fastforce simp:ko_wp_at'_def) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + + +lemma createObjects_orig_obj_at2': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ P (obj_at' P' p s) + \ \ (case_option False P' (projectKO_opt val)) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (obj_at' P' p s)\" + unfolding obj_at'_real_def + by (wp createObjects_orig_ko_wp_at2') auto + +lemma createObjects_orig_cte_wp_at2': + "\\s. P (cte_wp_at' P' p s) + \ n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ \ (case_option False P' (projectKO_opt val)) + \ (\(getF, setF) \ ran tcb_cte_cases. + \ (case_option False (P' \ getF) (projectKO_opt val))) + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. P (cte_wp_at' P' p s)\" + apply (simp add: cte_wp_at'_obj_at') + apply (rule handy_prop_divs) + apply (wp createObjects_orig_obj_at2'[where sz = sz], simp) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + including no_pre + apply (wp handy_prop_divs createObjects_orig_obj_at2'[where sz = sz] + | simp add: o_def cong: option.case_cong)+ + done + +lemma threadSet_cte_wp_at2'T: + assumes "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows "\\s. P (cte_wp_at' P' p s)\ threadSet F t \\rv s. P (cte_wp_at' P' p s)\" + using assms by (rule threadSet_cte_wp_at'T) + +lemmas threadSet_cte_wp_at2' = + threadSet_cte_wp_at2'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma createNewCaps_cte_wp_at2: + "\\s. P (cte_wp_at' P' p s) \ \ P' makeObject + \ n \ 0 + \ range_cover ptr sz (APIType_capBits ty objsz) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n objsz dev + \\rv s. P (cte_wp_at' P' p s)\" + including no_pre + apply (simp add: createNewCaps_def createObjects_def AARCH64_H.toAPIType_def + split del: if_split) + apply (case_tac ty; simp add: createNewCaps_def createObjects_def Arch_createNewCaps_def + split del: if_split cong: if_cong) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp add:createObjects_def) + apply ((wp createObjects_orig_cte_wp_at2'[where sz = sz] + mapM_x_wp' threadSet_cte_wp_at2')+ + | assumption + | clarsimp simp: APIType_capBits_def projectKO_opts_defs + makeObject_tcb tcb_cte_cases_def cteSizeBits_def + archObjSize_def + createObjects_def curDomain_def + objBits_if_dev + split del: if_split + | simp add: objBits_simps field_simps mult_2_right)+ + done + +lemma createObjects_orig_obj_at': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ obj_at' P p s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r. obj_at' P p\" + apply (rule hoare_grab_asm)+ + apply (clarsimp simp: createObjects'_def) + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp+ + apply (wp|simp add:split_def cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (wpc|wp)+ + apply (clarsimp simp del:fun_upd_apply) + apply (simp add:range_cover_def is_aligned_mask) + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst retype_obj_at',simp+)+ + apply (intro conjI impI allI) + apply (clarsimp simp:obj_at'_real_def ko_wp_at'_def) + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add:lookupAround2_None1) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply simp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (drule_tac x = p in orthD1) + apply (clarsimp simp:ptr_add_def p_assoc_help) + apply (simp add:dom_def obj_at'_real_def ko_wp_at'_def) + apply simp+ + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +crunch ko_wp_at'[wp]: doMachineOp "\s. P (ko_wp_at' P' p s)" + +lemma createObjects_orig_cte_wp_at': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ cte_wp_at' P p s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. cte_wp_at' P p s\" + apply (simp add: cte_wp_at'_obj_at' tcb_cte_cases_def cteSizeBits_def) + apply (rule hoare_pre, wp hoare_vcg_disj_lift createObjects_orig_obj_at'[where sz = sz]) + apply clarsimp + done + +lemma createNewCaps_cte_wp_at': + "\\s. cte_wp_at' P p s + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us dev + \\rv. cte_wp_at' P p\" + apply (simp add: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (case_tac ty; simp add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (wp createObjects_orig_cte_wp_at'[where sz = sz] mapM_x_wp' + threadSet_cte_wp_at'T + | clarsimp simp: objBits_simps field_simps mult_2_right APIType_capBits_def + createObjects_def curDomain_def + | intro conjI impI + | force simp: tcb_cte_cases_def cteSizeBits_def)+ + done + +lemma createObjects_obj_at_other: + assumes cover: "range_cover ptr sz (objBitsKO val + gbits) n" + and not_0: "n\ 0" + shows "\\s. obj_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\_. obj_at' P p\" + apply (simp add: createObjects_def) + apply (wp createObjects_orig_obj_at'[where sz = sz]) + using cover not_0 + apply (clarsimp simp: cover not_0 valid_pspace'_def pspace_no_overlap'_def) + done + +lemma valid_cap'_range_no_overlap: + "\untypedRange c \ {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} = {}; s \' c; + valid_pspace' s; pspace_no_overlap' ptr sz s; + range_cover ptr sz (objBitsKO val) n\ + \ s\ksPSpace := foldr (\addr. data_map_insert addr val) + (new_cap_addrs n ptr val) (ksPSpace s)\ \' c" + apply (cases c; simp add: valid_cap'_def valid_arch_cap'_def valid_arch_cap_ref'_def + cte_wp_at_obj_cases' valid_pspace'_def retype_obj_at_disj' + typ_at_to_obj_at_arches frame_at'_def page_table_at'_def + split: zombie_type.split_asm arch_capability.splits if_splits + del: Int_atLeastAtMost)[1] + apply (rename_tac word nat1 nat2) + apply (clarsimp simp:valid_untyped'_def retype_ko_wp_at' + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (frule aligned_untypedRange_non_empty) + apply (simp add:isCap_simps) + apply (intro conjI impI) + apply (intro allI) + apply (drule_tac x = ptr' in spec) + apply (rule ccontr) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (erule disjE) + apply (drule(2) disjoint_subset2 [OF obj_range'_subset]) + apply (drule(1) disjoint_subset2[OF psubset_imp_subset]) + apply (simp add: Int_absorb ptr_add_def p_assoc_help mask_def + del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule(1) obj_range'_subset) + apply (drule_tac A'=" {word + of_nat nat2..word + 2 ^ nat1 - 1}" in disjoint_subset[rotated]) + apply clarsimp + apply (rule is_aligned_no_wrap') + apply (fastforce simp:capAligned_def) + apply (erule of_nat_less_pow_64) + apply (simp add:capAligned_def) + apply (drule(1) disjoint_subset2) + apply (simp add: add_mask_fold) + apply blast + apply (intro allI) + apply (drule_tac x = ptr' in spec) + apply (rule ccontr) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + apply (drule(2) disjoint_subset2 [OF obj_range'_subset]) + apply (drule(1) disjoint_subset2) + apply (simp add: Int_absorb ptr_add_def p_assoc_help mask_def + del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + done + +lemma createObjects_valid_cap': + "\valid_cap' c and valid_pspace' and pspace_no_overlap' ptr sz and + K (untypedRange c \ {ptr .. (ptr && ~~ mask sz) + 2^sz - 1} = {} \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects' ptr n val gbits + \\_. valid_cap' c\" + apply (rule hoare_gen_asm) + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def) + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (clarsimp simp: linorder_not_less valid_pspace'_def) + apply (wpc|wp)+ + apply (subst data_map_insert_def[symmetric])+ + apply clarsimp + apply (subgoal_tac " range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst range_cover.unat_of_nat_n_shift,simp+)+ + apply (subst (asm) range_cover.unat_of_nat_n_shift,simp+)+ + apply (intro conjI impI allI) + apply (erule(4) valid_cap'_range_no_overlap)+ + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_cte_wp_at': + "\range_cover ptr sz (objBitsKO val + gbits) n; n \ 0\ + \\\s. cte_wp_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\_. cte_wp_at' P p\" + apply (clarsimp simp: valid_def cte_wp_at_obj_cases') + apply (erule disjE) + apply (erule use_valid[OF _ ]) + apply (rule createObjects_orig_obj_at') + apply fastforce + apply clarsimp + apply (drule_tac x = na in bspec) + apply clarsimp + apply clarsimp + apply (drule use_valid[OF _ createObjects_orig_obj_at']) + apply fastforce + apply simp + done + +lemma createNewCaps_cte_wp_at: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0 : "n \ 0" + shows "\\s. cte_wp_at' P p s \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us dev + \\_. cte_wp_at' P p\" + apply (wp createNewCaps_cte_wp_at') + apply (auto simp: cover not_0) + done + +lemma createObjects_ret2: + "\(\s. P (map (\p. ptr_add y (p * 2 ^ (objBitsKO ko + gbits))) + [0.. n \ 0)\ + createObjects y n ko gbits \\rv s. P rv\" + apply (rule hoare_gen_asm) + apply (rule hoare_chain) + apply (rule hoare_vcg_conj_lift) + apply (rule createObjects_ret) + apply simp+ + apply (rule hoare_vcg_prop) + defer + apply (clarsimp simp: power_add mult.commute mult.left_commute | assumption)+ + done + +lemma state_refs_ko_wp_at_eq: + "state_refs_of' s = (\x. {r. ko_wp_at' (\ko. r \ refs_of' ko) x s})" + apply (rule ext) + apply (simp add: state_refs_of'_def ko_wp_at'_def + split: option.split) + done + +lemma state_hyp_refs_ko_wp_at_eq: + "state_hyp_refs_of' s = (\x. {r. ko_wp_at' (\ko. r \ hyp_refs_of' ko) x s})" + apply (rule ext) + apply (simp add: state_hyp_refs_of'_def ko_wp_at'_def + split: option.split) + done + +lemma createObjects_state_refs_of'': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (state_refs_of' s) \ refs_of' val = {} + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. P (state_refs_of' s)\" + apply (clarsimp simp:valid_def lookupAround2_pspace_no state_refs_ko_wp_at_eq) + apply (erule ssubst[where P = P,rotated]) + apply (rule ext) + apply (rule set_eqI) + apply clarsimp + apply (intro iffI,rule ccontr) + apply (drule_tac P1="\x. \ x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp + apply (intro conjI) + apply simp+ + apply (drule_tac P1="\x. x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp+ + done + +lemma createObjects_state_hyp_refs_of'': + "\\s. n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ P (state_hyp_refs_of' s) \ hyp_refs_of' val = {} + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp:valid_def lookupAround2_pspace_no state_hyp_refs_ko_wp_at_eq) + apply (erule ssubst[where P = P,rotated]) + apply (rule ext) + apply (rule set_eqI) + apply clarsimp + apply (intro iffI,rule ccontr) + apply (drule_tac P1="\x. \ x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp + apply (intro conjI) + apply simp+ + apply (drule_tac P1="\x. x" in use_valid[OF _ createObjects_orig_ko_wp_at2'[where sz = sz]]) + apply simp+ + done + +lemma createNewCaps_state_refs_of': + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (state_refs_of' s)\ + createNewCaps ty ptr n us dev + \\rv s. P (state_refs_of' s)\" + unfolding createNewCaps_def + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wp mapM_x_wp' createObjects_state_refs_of'' threadSet_state_refs_of' + | simp add: not_0 pspace_no_overlap'_def objBitsKO_def APIType_capBits_def + valid_pspace'_def makeObject_tcb makeObject_endpoint objBits_def + makeObject_notification archObjSize_def createObjects_def + curDomain_def field_simps mult_2_right + | intro conjI impI)+ + done + + +lemma createNewCaps_state_hyp_refs_of': + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ P (state_hyp_refs_of' s)\ + createNewCaps ty ptr n us dev + \\rv s. P (state_hyp_refs_of' s)\" + unfolding createNewCaps_def + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty; simp add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp split del: if_split) + apply (rule hoare_pre, wp, simp) + apply (insert cover not_0) + apply (wp mapM_x_wp' createObjects_state_hyp_refs_of'' threadSet_state_hyp_refs_of' + | simp add: not_0 pspace_no_overlap'_def objBitsKO_def APIType_capBits_def + valid_pspace'_def makeObject_tcb makeObject_vcpu objBits_def + newArchTCB_def vcpu_tcb_refs'_def makeVCPUObject_def field_simps + archObjSize_def createObjects_def curDomain_def mult_2_right + | intro conjI impI)+ + done + +lemma createObjects_iflive': + "\\s. if_live_then_nonz_cap' s \ \ live' val + \ n \ 0 + \ range_cover ptr sz (objBitsKO val + gbits) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\rv s. if_live_then_nonz_cap' s\" + apply (rule hoare_pre) + apply (simp only: if_live_then_nonz_cap'_def + ex_nonz_cap_to'_def imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + hoare_vcg_ex_lift createObjects_orig_ko_wp_at2' + createObjects_orig_cte_wp_at') + apply clarsimp + apply (intro conjI allI impI) + apply simp_all + apply (rule ccontr) + apply clarsimp + apply (drule(1) if_live_then_nonz_capE') + apply (fastforce simp: ex_nonz_cap_to'_def) + done + +lemma atcbVCPUPtr_new[simp]: + "atcbVCPUPtr newArchTCB = None" + by (simp add: newArchTCB_def) + +lemma arch_live'_KOPTE[simp]: + "arch_live' (KOPTE makeObject) = False" + by (simp add: makeObject_pte arch_live'_def) + +lemma arch_live'_KOVCPU[simp]: + "arch_live' (KOVCPU makeObject) = False" + by (simp add: makeObject_vcpu makeVCPUObject_def arch_live'_def) + +lemma createNewCaps_iflive'[wp]: + assumes cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + shows + "\\s. valid_pspace' s \ pspace_no_overlap' ptr sz s + \ if_live_then_nonz_cap' s\ + createNewCaps ty ptr n us dev + \\rv s. if_live_then_nonz_cap' s\" + unfolding createNewCaps_def + apply (insert cover) + apply (clarsimp simp: toAPIType_def) + apply (cases ty, simp_all add: createNewCaps_def Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (rule hoare_pre, wp, simp) + apply (wp mapM_x_wp' createObjects_iflive' threadSet_iflive' + | simp add: not_0 pspace_no_overlap'_def createObjects_def live'_def hyp_live'_def + valid_pspace'_def makeObject_tcb makeObject_endpoint + makeObject_notification objBitsKO_def + APIType_capBits_def objBits_def + archObjSize_def field_simps mult_2_right + curDomain_def + split del:if_split + | simp split: if_split + | fastforce)+ + done + +lemma createObjects_pspace_only: + "\ \f s. P (ksPSpace_update f s) = P s \ + \ \P\ createObjects' ptr n val gbits \\rv. P\" + apply (simp add: createObjects_def createObjects'_def unless_def alignError_def + split_def lookupAround2_pspace_no) + apply wpsimp + done + +lemma createObjects'_qs[wp]: + "\\s. P (ksReadyQueues s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueues s)\" + by (rule createObjects_pspace_only, simp) + +lemma createObjects'_qsL1[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (rule createObjects_pspace_only, simp) + +lemma createObjects'_qsL2[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ createObjects' ptr n val gbits \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (rule createObjects_pspace_only, simp) + +(* FIXME move these 2 to TcbAcc_R *) +lemma threadSet_qsL1[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ threadSet f t \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +lemma threadSet_qsL2[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ threadSet f t \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +crunches createObjects, createNewCaps + for qs[wp]: "\s. P (ksReadyQueues s)" + and qsL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and qsL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: crunch_simps wp: crunch_wps) + +lemma sch_act_wf_lift_asm: + assumes tcb: "\P t. \st_tcb_at' P t and Q \ f \\rv. st_tcb_at' P t\" + assumes tcbDomain: "\P t. \obj_at' (\tcb. P (tcbDomain tcb)) t and Q\ f \\rv. obj_at' (\tcb. P (tcbDomain tcb)) t\" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes kCD: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s \ Q s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ ksA], assumption) + apply (frule use_valid[OF _ kCT[of "(=) (ksCurThread s)" for s] refl]) + apply (frule use_valid[OF _ kCD[of "(=) (ksCurDomain s)" for s] refl]) + apply (case_tac "ksSchedulerAction s") + apply (simp add: ct_in_state'_def) + apply (drule use_valid [OF _ tcb]) + apply simp + apply simp + apply simp + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule use_valid [OF _ tcb], fastforce) + apply (frule use_valid [OF _ tcbDomain], fastforce) + apply auto + done + +lemma valid_queues_lift_asm': + assumes tat: "\d p t. \\s. \ obj_at' (inQ d p) t s \ Q d p s\ f \\_ s. \ obj_at' (inQ d p) t s\" + and prq: "\P. \\s. P (ksReadyQueues s)\ f \\_ s. P (ksReadyQueues s)\" + shows "\\s. valid_queues' s \ (\d p. Q d p s)\ f \\_. valid_queues'\" + apply (simp only: valid_queues'_def imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + tat prq) + apply simp + done + +lemma createObjects'_ct[wp]: + "\\s. P (ksCurThread s)\ createObjects' p n v us \\rv s. P (ksCurThread s)\" + by (rule createObjects_pspace_only, simp) + +crunches createObjects, doMachineOp, createNewCaps + for ct[wp]: "\s. P (ksCurThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + (simp: unless_def crunch_simps wp: crunch_wps) + +lemma threadSet_ko_wp_at2': + "\\s. P (ko_wp_at' P' p s) \ (\tcb_x :: tcb. P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + threadSet F ptr + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: threadSet_def split del: if_split) + apply (wp setObject_ko_wp_at getObject_tcb_wp | simp add: objBits_simps')+ + apply (auto simp: ko_wp_at'_def obj_at'_def) + done + +lemma threadSet_ko_wp_at2'_futz: + "\\s. P (ko_wp_at' P' p s) \ obj_at' Q ptr s + \ (\tcb_x :: tcb. Q tcb_x \ P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + threadSet F ptr + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: threadSet_def split del: if_split) + apply (wp setObject_ko_wp_at getObject_tcb_wp | simp add: objBits_simps')+ + apply (auto simp: ko_wp_at'_def obj_at'_def) + done + +lemma mapM_x_threadSet_createNewCaps_futz: + "\\s. P (ko_wp_at' P' p s) \ (\addr\set addrs. obj_at' (\tcb. \tcbQueued tcb \ tcbState tcb = Inactive) addr s) + \ (\tcb_x :: tcb. tcbQueued (F tcb_x) = tcbQueued tcb_x \ tcbState (F tcb_x) = tcbState tcb_x) + \ (\tcb_x :: tcb. \ tcbQueued tcb_x \ tcbState tcb_x = Inactive \ P' (injectKO (F tcb_x)) = P' (injectKO tcb_x))\ + mapM_x (threadSet F) addrs + \\_ s. P (ko_wp_at' P' p s)\" (is "\?PRE\ _ \\_. ?POST\") + apply (rule mapM_x_inv_wp[where P="?PRE"]) + apply simp + apply (rule hoare_pre) + apply (wp hoare_vcg_ball_lift threadSet_ko_wp_at2'[where P="id", simplified] + | wp (once) threadSet_ko_wp_at2'_futz[where Q="\tcb. \tcbQueued tcb \ tcbState tcb = Inactive"] + | simp)+ + done + +lemma createObjects_makeObject_not_tcbQueued: + assumes "range_cover ptr sz (objBitsKO tcb) n" + assumes "n \ 0" "tcb = injectKO (makeObject::tcb)" + shows "\\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n tcb 0 + \\rv s. \addr\set rv. obj_at' (\tcb. \ tcbQueued tcb \ tcbState tcb = Structures_H.thread_state.Inactive) addr s\" + apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where 'a=tcb]]) + using assms + apply (auto simp: obj_at'_def projectKO_opt_tcb objBitsKO_def objBits_def makeObject_tcb) + done + +lemma createObjects_ko_wp_at2: + "\\s. range_cover ptr sz (objBitsKO ko + gbits) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ P (ko_wp_at' P' p s) + \ (P' ko \ P True) + \ pspace_no_overlap' ptr sz s\ + createObjects ptr n ko gbits + \\_ s. P (ko_wp_at' P' p s)\" + apply (simp add: createObjects_def) + apply (wp createObjects_orig_ko_wp_at2') + apply auto + done + +lemma createNewCaps_ko_wp_atQ': + "\(\s. P (ko_wp_at' P' p s) + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s) + and K (\d (tcb_x :: tcb). \tcbQueued tcb_x \ tcbState tcb_x = Inactive + \ P' (injectKO (tcb_x \ tcbDomain := d \)) = P' (injectKO tcb_x)) + and K (\v. makeObjectKO d (Inr ty) = Some v + \ P' v \ P True)\ + createNewCaps ty ptr n us d + \\rv s. P (ko_wp_at' P' p s)\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty, simp_all add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (rule hoare_pre, wp, simp) + apply (wp mapM_x_threadSet_createNewCaps_futz + mapM_x_wp' + createObjects_obj_at + createObjects_ko_wp_at2 createObjects_makeObject_not_tcbQueued + | simp add: makeObjectKO_def objBitsKO_def archObjSize_def APIType_capBits_def + objBits_def curDomain_def field_simps mult_2_right + split del: if_split + | intro conjI impI | fastforce + | split if_split_asm)+ + done + +lemmas createNewCaps_ko_wp_at' + = createNewCaps_ko_wp_atQ'[simplified, unfolded fold_K] + +lemmas createNewCaps_obj_at2 = + createNewCaps_ko_wp_at' + [where P'="\ko. \obj :: ('a :: pspace_storable). + projectKO_opt ko = Some obj \ P' obj" for P', + folded obj_at'_real_def, + unfolded pred_conj_def, simplified] + +lemma createNewCaps_obj_at': + "\\s. obj_at' (P :: ('a :: pspace_storable) \ bool) p s + \ range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (\tcb d. \tcbQueued tcb \ tcbState tcb = Inactive \ + ((\obj :: 'a. injectKOS obj = KOTCB (tcb\tcbDomain := d\) \ P obj) \ + (\obj :: 'a. injectKOS obj = KOTCB tcb \ P obj)))\ + createNewCaps ty ptr n us d + \\rv s. obj_at' P p s\" + apply (simp add: obj_at'_real_def) + apply (wp createNewCaps_ko_wp_at') + apply (fastforce simp:project_inject) + done + +lemmas createNewCaps_pred_tcb_at' + = createNewCaps_obj_at'[where P="\ko. (Q :: 'a :: type \ bool) (proj (tcb_to_itcb' ko))" for Q proj, + folded pred_tcb_at'_def, simplified] + +lemma createNewCaps_cur: + "\range_cover ptr sz (APIType_capBits ty us) n ; n \ 0\ \ + \\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + cur_tcb' s\ + createNewCaps ty ptr n us d + \\rv. cur_tcb'\" + apply (rule hoare_post_imp [where Q="\rv s. \t. ksCurThread s = t \ tcb_at' t s"]) + apply (simp add: cur_tcb'_def) + apply (wp hoare_vcg_ex_lift createNewCaps_obj_at') + apply (clarsimp simp: pspace_no_overlap'_def cur_tcb'_def valid_pspace'_def) + apply auto + done + +crunch ksInterrupt[wp]: createNewCaps "\s. P (ksInterruptState s)" + (simp: crunch_simps unless_def + wp: setObject_ksInterrupt updateObject_default_inv crunch_wps) + +lemma createNewCaps_ifunsafe': + "\\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 \ + if_unsafe_then_cap' s\ + createNewCaps ty ptr n us d + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + imp_conv_disj) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF createNewCaps_ksInterrupt]) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createNewCaps_cte_wp_at2 hoare_vcg_ex_lift) + apply (simp add: makeObject_cte pspace_no_overlap'_def + valid_pspace'_def) + apply auto + done + +lemma createObjects_nosch'[wp]: + "\\s. P (ksSchedulerAction s)\ + createObjects' ptr n val gbits + \\rv s. P (ksSchedulerAction s)\" + by (rule createObjects_pspace_only, simp) + +crunches createObjects, createNewCaps + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and it[wp]: "\s. P (ksIdleThread s)" + (wp: setObject_ksPSpace_only updateObject_default_inv mapM_x_wp') + +lemma createObjects_idle': + "\valid_idle' and valid_pspace' and pspace_no_overlap' ptr sz + and (\s. \ case_option False (\cte. ksIdleThread s \ capRange (cteCap cte)) + (projectKO_opt val) + \ (\(getF, setF) \ ran tcb_cte_cases. + \ case_option False (\tcb. ksIdleThread s \ capRange (cteCap (getF tcb))) + (projectKO_opt val))) + and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects' ptr n val gbits + \\rv. valid_idle'\" + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + apply (clarsimp simp add: valid_idle'_def pred_tcb_at'_def) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_as_subst [OF createObjects'_it]) + apply (wp createObjects_orig_obj_at' + createObjects_orig_cte_wp_at2' + hoare_vcg_all_lift | simp)+ + apply (clarsimp simp: valid_idle'_def o_def pred_tcb_at'_def valid_pspace'_def + cong: option.case_cong) + apply auto + done + +lemma createNewCaps_idle'[wp]: + "\valid_idle' and valid_pspace' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_idle'\" + apply (rule hoare_gen_asm) + apply (clarsimp simp: createNewCaps_def AARCH64_H.toAPIType_def + split del: if_split) + apply (cases ty, simp_all add: Arch_createNewCaps_def + split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split)[1] + apply (wp, simp) + including no_pre + apply (wp mapM_x_wp' + createObjects_idle' + threadSet_idle' + | simp add: projectKO_opt_tcb projectKO_opt_cte mult_2 + makeObject_cte makeObject_tcb archObjSize_def + tcb_cte_cases_def objBitsKO_def APIType_capBits_def + objBits_def createObjects_def cteSizeBits_def + | simp add: field_simps + | intro conjI impI + | fastforce simp: curDomain_def)+ + done + +crunches createNewCaps + for asid_table[wp]: "\s. P (armKSASIDTable (ksArchState s))" + and cur_vcpu[wp]: "\s. P (armHSCurVCPU (ksArchState s))" + and num_list_regs[wp]: "\s. P (armKSGICVCPUNumListRegs (ksArchState s))" + and global_ksArch[wp]: "\s. P (armKSGlobalUserVSpace (ksArchState s))" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + (simp: crunch_simps wp: crunch_wps) + +lemma createNewCaps_global_refs': + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s \ valid_global_refs' s + \ 0 < gsMaxObjectSize s\ + createNewCaps ty ptr n us d + \\rv. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_cap_sizes'_def valid_refs'_def) + apply (rule_tac Q="\rv s. \ptr. \ cte_wp_at' (\cte. (kernel_data_refs \ capRange (cteCap cte) \ {} + \ 2 ^ capBits (cteCap cte) > gsMaxObjectSize s)) ptr s \ global_refs' s \ kernel_data_refs" + in hoare_post_imp) + apply (auto simp: cte_wp_at_ctes_of linorder_not_less elim!: ranE)[1] + apply (rule hoare_pre) + apply (simp add: global_refs'_def) + apply (rule hoare_use_eq [where f="\s. armKSGlobalUserVSpace (ksArchState s)", + OF createNewCaps_global_ksArch]) + apply (rule hoare_use_eq [where f=ksIdleThread, OF createNewCaps_it]) + apply (rule hoare_use_eq [where f=irq_node', OF createNewCaps_ksInterrupt]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (wp hoare_vcg_all_lift createNewCaps_cte_wp_at2[where sz=sz]) + apply (clarsimp simp: cte_wp_at_ctes_of global_refs'_def + makeObject_cte) + apply (auto simp: linorder_not_less ball_ran_eq) + done + +lemma koTypeOf_eq_UserDataT: + "(koTypeOf ko = UserDataT) = (ko = KOUserData)" + by (cases ko, simp_all) + +lemma createNewCaps_valid_arch_state: + "\(\s. valid_arch_state' s \ valid_pspace' s \ pspace_no_overlap' ptr sz s + \ (tp = APIObjectType ArchTypes_H.CapTableObject \ us > 0)) + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_arch_state'\" + unfolding valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs + apply (simp add: typ_at_to_obj_at_arches option_case_all_conv) + apply (wpsimp wp: hoare_vcg_const_Ball_lift createNewCaps_obj_at' + createNewCaps_ko_wp_at' hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (fastforce simp: pred_conj_def valid_pspace'_def o_def is_vcpu'_def) + done + +lemma valid_irq_handlers_cte_wp_at_form': + "valid_irq_handlers' = (\s. \irq. irq_issued' irq s \ + (\p. \ cte_wp_at' (\cte. cteCap cte = IRQHandlerCap irq) p s))" + by (auto simp: valid_irq_handlers'_def cteCaps_of_def cte_wp_at_ctes_of + fun_eq_iff ran_def) + +lemma createNewCaps_irq_handlers': + "\valid_irq_handlers' and pspace_no_overlap' ptr sz + and pspace_aligned' and pspace_distinct' + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_irq_handlers'\" + apply (simp add: valid_irq_handlers_cte_wp_at_form' irq_issued'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createNewCaps_cte_wp_at2) + apply (clarsimp simp: makeObject_cte) + apply auto + done + +lemma createObjects'_irq_states' [wp]: + "\valid_irq_states'\ createObjects' a b c d \\_. valid_irq_states'\" + apply (simp add: createObjects'_def split_def) + apply (wp unless_wp|wpc|simp add: alignError_def)+ + apply fastforce + done + +crunch irq_states' [wp]: createNewCaps valid_irq_states' + (wp: crunch_wps no_irq no_irq_clearMemory simp: crunch_simps unless_def) + +crunch ksMachine[wp]: createObjects "\s. P (ksMachineState s)" + (simp: crunch_simps unless_def) + +lemma createNewCaps_valid_queues': + "\valid_queues' and pspace_no_overlap' ptr sz + and pspace_aligned' and pspace_distinct' + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_queues'\" + apply (wp valid_queues_lift_asm' [OF createNewCaps_obj_at2]) + apply (clarsimp) + apply (simp add: makeObjectKO_def + split: object_type.split_asm + apiobject_type.split_asm) + apply (clarsimp simp: inQ_def) + apply (auto simp: makeObject_tcb + split: object_type.splits apiobject_type.splits) + done + +lemma createNewCaps_valid_queues: + "\valid_queues and pspace_no_overlap' ptr sz + and pspace_aligned' and pspace_distinct' + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0)\ + createNewCaps ty ptr n us d + \\rv. valid_queues\" + apply (rule hoare_gen_asm) + apply (wp valid_queues_lift_asm createNewCaps_obj_at2[where sz=sz]) + apply (clarsimp simp: projectKO_opts_defs) + apply (simp add: inQ_def) + apply (wp createNewCaps_pred_tcb_at'[where sz=sz] | simp)+ + done + +lemma mapM_x_threadSet_valid_pspace: + "\valid_pspace' and K (curdom \ maxDomain)\ + mapM_x (threadSet (tcbDomain_update (\_. curdom))) addrs \\y. valid_pspace'\" + apply (rule hoare_gen_asm) + apply (wp mapM_x_wp' threadSet_valid_pspace') + apply simp_all + done + +lemma createNewCaps_valid_pspace: + assumes not_0: "n \ 0" + and cover: "range_cover ptr sz (APIType_capBits ty us) n" + and sz_limit: "sz \ maxUntypedSizeBits" + shows "\\s. pspace_no_overlap' ptr sz s \ valid_pspace' s + \ caps_no_overlap'' ptr sz s \ ptr \ 0 \ caps_overlap_reserved' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} s \ ksCurDomain s \ maxDomain\ + createNewCaps ty ptr n us dev \\r. valid_pspace'\" + unfolding createNewCaps_def Arch_createNewCaps_def + using valid_obj_makeObject_rules sz_limit + apply (clarsimp simp: AARCH64_H.toAPIType_def + split del: if_split cong: option.case_cong) + apply (cases ty, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp, clarsimp) + apply (insert cover) + apply (wp createObjects_valid_pspace_untyped' [OF _ not_0 , where ty="Inr ty" and sz = sz] + mapM_x_threadSet_valid_pspace mapM_x_wp' + | simp add: makeObjectKO_def APIType_capBits_def + objBits_simps not_0 createObjects_def curDomain_def + | intro conjI impI + | simp add: power_add field_simps mult_2_right + | simp add: bit_simps)+ + done + +lemma doMachineOp_return_foo: + "doMachineOp (do x\a;return () od) = (do (doMachineOp a); return () od)" + apply (clarsimp simp: doMachineOp_def bind_def gets_def + get_def return_def select_f_def split_def simpler_modify_def) + apply (rule ext)+ + apply simp + apply (rule set_eqI) + apply clarsimp + done + +lemma createNewCaps_vms: + "\pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and + K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) and + valid_machine_state'\ + createNewCaps ty ptr n us dev + \\archCaps. valid_machine_state'\" + apply (clarsimp simp: valid_machine_state'_def pointerInDeviceData_def + Arch_createNewCaps_def createNewCaps_def pointerInUserData_def + typ_at'_def createObjects_def doMachineOp_return_foo + split del: if_split) + apply (rule hoare_pre) + apply (wpc + | wp hoare_vcg_const_Ball_lift hoare_vcg_disj_lift + hoare_vcg_all_lift + doMachineOp_ko_wp_at' createObjects_orig_ko_wp_at2'[where sz = sz] + hoare_vcg_all_lift + dmo_lift' mapM_x_wp' threadSet_ko_wp_at2' + | clarsimp simp: createObjects_def Arch_createNewCaps_def curDomain_def Let_def + split del: if_split + | assumption)+ + apply (case_tac ty) + apply (auto simp: APIType_capBits_def objBits_simps toAPIType_def object_type.splits + field_simps mult_2_right) + done + +lemma createObjects_pspace_domain_valid': + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ pspace_domain_valid s\ + createObjects' ptr n val gbits + \\_. pspace_domain_valid\" + apply (simp add: createObjects'_def split_def unless_def) + apply (rule hoare_pre) + apply (wp | wpc | simp only: alignError_def haskell_assert_def)+ + apply (clarsimp simp: new_cap_addrs_fold' unat_1_0 unat_gt_0 + range_cover_not_zero_shift + caps_overlap_reserved'_def) + apply (simp add: pspace_domain_valid_def foldr_upd_app_if + fun_upd_def[symmetric]) + apply (subgoal_tac " \x \ set (new_cap_addrs (unat (of_nat n << gbits)) ptr val). + mask_range x (objBitsKO val) \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}") + apply blast + + apply (rule ballI) + apply (rule new_range_subset) + apply (erule range_cover_rel, simp+) + apply (simp add: range_cover.unat_of_nat_n_shift field_simps) + done + +lemma createObjects_pspace_domain_valid: + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ pspace_domain_valid s\ + createObjects ptr n val gbits + \\_. pspace_domain_valid\" + apply (simp add: createObjects_def) + apply (wp createObjects_pspace_domain_valid'[where sz=sz]) + apply (simp add: objBits_def) + done + +lemma createNewCaps_pspace_domain_valid[wp]: + "\pspace_domain_valid and K ({ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} + \ kernel_data_refs = {} + \ range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us dev + \\rv. pspace_domain_valid\" + apply (simp add: createNewCaps_def) + apply (rule hoare_pre) + apply (wp createObjects_pspace_domain_valid[where sz=sz] + mapM_x_wp' + | wpc | simp add: Arch_createNewCaps_def curDomain_def Let_def + split del: if_split)+ + apply (simp add: AARCH64_H.toAPIType_def + split: object_type.splits) + apply (auto simp: objBits_simps APIType_capBits_def field_simps mult_2_right) + done + +(* FIXME: move *) +lemma ct_idle_or_in_cur_domain'_lift_futz: + assumes a: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes b: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + assumes c: "\P. \\s. P (ksIdleThread s)\ f \\_ s. P (ksIdleThread s)\" + assumes d: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes e: "\d t. \\s. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t s \ Q s\ + f + \\_. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t\" + shows "\ct_idle_or_in_cur_domain' and ct_active' and Q\ f \\_. ct_idle_or_in_cur_domain'\" +proof - + from e have e': + "\d t. \\s. obj_at' (\tcb. tcbState tcb \ Inactive \ d = tcbDomain tcb) t s \ Q s\ + f + \\_. obj_at' (\tcb. d = tcbDomain tcb) t\" + apply (rule hoare_strengthen_post) + apply (auto simp: obj_at'_def) + done + show ?thesis + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply (wps a b c d) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) + apply (auto simp: obj_at'_def ct_in_state'_def st_tcb_at'_def) + done +qed + +lemma createNewCaps_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and ct_active' and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) \ + createNewCaps ty ptr n us dev + \\rv. ct_idle_or_in_cur_domain'\" + apply (wp ct_idle_or_in_cur_domain'_lift_futz createNewCaps_obj_at'[where sz=sz] | simp)+ + done + +lemma sch_act_wf_lift_asm_futz: + assumes tcb: "\P t. \st_tcb_at' P t and Q \ f \\rv. st_tcb_at' P t\" + assumes tcbDomain: "\P t. \obj_at' (\tcb. runnable' (tcbState tcb) \ P (tcbDomain tcb)) t and Q\ f \\rv. obj_at' (\tcb. runnable' (tcbState tcb) \ P (tcbDomain tcb)) t\" + assumes kCT: "\P. \\s. P (ksCurThread s)\ f \\_ s. P (ksCurThread s)\" + assumes kCD: "\P. \\s. P (ksCurDomain s)\ f \\_ s. P (ksCurDomain s)\" + assumes ksA: "\P. \\s. P (ksSchedulerAction s)\ f \\_ s. P (ksSchedulerAction s)\" + shows + "\\s. sch_act_wf (ksSchedulerAction s) s \ Q s\ + f + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (rule use_valid [OF _ ksA], assumption) + apply (frule use_valid [OF _ kCT[of "(=) (ksCurThread s)" for s] refl]) + apply (frule use_valid [OF _ kCD[of "(=) (ksCurDomain s)" for s] refl]) + apply (case_tac "ksSchedulerAction s") + apply (simp add: ct_in_state'_def) + apply (drule use_valid [OF _ tcb]) + apply simp + apply simp + apply simp + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule use_valid [OF _ tcb], fastforce) + apply simp + apply (rename_tac word) + apply (subgoal_tac "(obj_at' (\tcb. runnable' (tcbState tcb) \ ksCurDomain b = tcbDomain tcb) word and Q) s") + apply (drule use_valid [OF _ tcbDomain], fastforce) + apply (auto simp: st_tcb_at'_def o_def obj_at'_def ko_wp_at'_def) + done + +lemma createNewCaps_sch_act_wf: + "\(\s. sch_act_wf (ksSchedulerAction s) s) and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us dev + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift_asm_futz + createNewCaps_pred_tcb_at'[where sz=sz] + createNewCaps_obj_at'[where sz=sz] + | simp)+ + done + +lemma createObjects'_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ createObjects' ptr numObjects val gSize \\_ s. P (ksDomSchedule s)\" + apply (simp add: createObjects'_def unless_def alignError_def) + apply (wp | wpc)+ + apply simp + done + +lemma createObjects'_ksDomScheduleIdx[wp]: + "\\s. P (ksDomScheduleIdx s)\ createObjects' ptr numObjects val gSize \\_ s. P (ksDomScheduleIdx s)\" + apply (simp add: createObjects'_def unless_def alignError_def) + apply (wp | wpc)+ + apply simp + done + +crunch ksDomSchedule[wp]: createNewCaps "\s. P (ksDomSchedule s)" + (wp: mapM_x_wp' simp: crunch_simps) + +crunch ksDomScheduleIdx[wp]: createNewCaps "\s. P (ksDomScheduleIdx s)" + (wp: mapM_x_wp' simp: crunch_simps) + +lemma createObjects_null_filter': + "\\s. P (null_filter' (ctes_of s)) \ makeObjectKO dev ty = Some val \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits + \\addrs a. P (null_filter' (ctes_of a))\" + apply (clarsimp simp: createObjects'_def split_def) + apply (wp unless_wp|wpc + | clarsimp simp: alignError_def split del: if_split simp del:fun_upd_apply)+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (subst new_cap_addrs_fold') + apply (simp add:unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp + apply assumption + apply simp + apply (subst data_map_insert_def[symmetric])+ + apply (frule(2) retype_aligned_distinct'[where ko = val]) + apply (erule range_cover_rel) + apply simp+ + apply (frule(2) retype_aligned_distinct'(2)[where ko = val]) + apply (erule range_cover_rel) + apply simp+ + apply (frule null_filter_ctes_retype + [where addrs = "(new_cap_addrs (unat (((of_nat n)::machine_word) << gbits)) ptr val)"]) + apply assumption+ + apply (clarsimp simp:field_simps foldr_upd_app_if[folded data_map_insert_def] shiftl_t2n range_cover.unat_of_nat_shift)+ + apply (rule new_cap_addrs_aligned[THEN bspec]) + apply (erule range_cover.aligned[OF range_cover_rel]) + apply simp+ + apply (clarsimp simp:shiftl_t2n field_simps range_cover.unat_of_nat_shift) + apply (drule subsetD[OF new_cap_addrs_subset,rotated]) + apply (erule range_cover_rel) + apply simp + apply simp + apply (rule ccontr) + apply clarify + apply (frule(1) pspace_no_overlapD') + apply (erule_tac B = "{x..x+2^objBitsKO y - 1}" in in_empty_interE[rotated]) + apply (drule(1) pspace_alignedD') + apply (clarsimp) + apply (erule is_aligned_no_overflow) + apply (simp del:atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff add:Int_ac ptr_add_def p_assoc_help) + apply (simp add:field_simps foldr_upd_app_if[folded data_map_insert_def] shiftl_t2n) + apply auto + done + +lemma createNewCaps_null_filter': + "\(\s. P (null_filter' (ctes_of s))) + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0) \ + createNewCaps ty ptr n us dev + \\_ s. P (null_filter' (ctes_of s))\" + apply (rule hoare_gen_asm) + apply (simp add: createNewCaps_def toAPIType_def + Arch_createNewCaps_def + split del: if_split cong: option.case_cong) + apply (cases ty, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp,simp) + apply (simp add: createObjects_def makeObjectKO_def APIType_capBits_def objBits_def + archObjSize_def curDomain_def objBits_if_dev bit_simps + split del: if_split + | wp createObjects_null_filter'[where ty = "Inr ty" and sz = sz and dev=dev] + threadSet_ctes_of mapM_x_wp' + | simp add: objBits_simps + | fastforce)+ + done + +crunch gsUntypedZeroRanges[wp]: createNewCaps "\s. P (gsUntypedZeroRanges s)" + (wp: mapM_x_wp' simp: crunch_simps) + +lemma untyped_ranges_zero_inv_null_filter: + "untyped_ranges_zero_inv (option_map cteCap o null_filter' ctes) + = untyped_ranges_zero_inv (option_map cteCap o ctes)" + apply (simp add: untyped_ranges_zero_inv_def fun_eq_iff null_filter'_def) + apply clarsimp + apply (rule_tac f="\caps. x = ran caps" for caps in arg_cong) + apply (clarsimp simp: fun_eq_iff map_comp_def untypedZeroRange_def) + done + +lemma untyped_ranges_zero_inv_null_filter_cteCaps_of: + "untyped_ranges_zero_inv (cteCaps_of s) + = untyped_ranges_zero_inv (option_map cteCap o null_filter' (ctes_of s))" + by (simp add: untyped_ranges_zero_inv_null_filter cteCaps_of_def) + +lemma createNewCaps_urz: + "\untyped_ranges_zero' + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0) \ + createNewCaps ty ptr n us dev + \\archCaps. untyped_ranges_zero'\" + apply (simp add: untyped_ranges_zero_inv_null_filter_cteCaps_of) + apply (rule hoare_pre) + apply (rule untyped_ranges_zero_lift) + apply (wp createNewCaps_null_filter')+ + apply (auto simp: o_def) + done + +lemma createNewCaps_invs': + "\(\s. invs' s \ ct_active' s \ pspace_no_overlap' ptr sz s + \ caps_no_overlap'' ptr sz s \ ptr \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ caps_overlap_reserved' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} s + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ gsMaxObjectSize s > 0) + and K (range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 + \ sz \ maxUntypedSizeBits)\ + createNewCaps ty ptr n us dev + \\rv. invs'\" + (is "\?P and K ?Q\ ?f \\rv. invs'\") +proof (rule hoare_gen_asm, elim conjE) + assume cover: "range_cover ptr sz (APIType_capBits ty us) n" + and not_0: "n \ 0" + and sz_limit: "sz \ maxUntypedSizeBits" + have cnc_ct_not_inQ: + "\ct_not_inQ and valid_pspace' and pspace_no_overlap' ptr sz\ + createNewCaps ty ptr n us dev \\_. ct_not_inQ\" + unfolding ct_not_inQ_def + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ (obj_at' (Not \ tcbQueued) (ksCurThread s) s + \ valid_pspace' s \ pspace_no_overlap' ptr sz s)" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF createNewCaps_nosch]) + apply (rule hoare_weaken_pre) + apply (wps createNewCaps_ct) + apply (wp createNewCaps_obj_at') + using cover not_0 + apply (fastforce simp: valid_pspace'_def) + done + show "\?P\ + createNewCaps ty ptr n us dev + \\rv. invs'\" + apply (simp add: invs'_def valid_state'_def + pointerInUserData_def typ_at'_def) + apply (rule hoare_pre) + apply (wp createNewCaps_valid_pspace [OF not_0 cover sz_limit] + createNewCaps_state_refs_of' [OF cover not_0 ] + createNewCaps_state_hyp_refs_of' [OF cover not_0 ] + createNewCaps_iflive' [OF cover not_0 ] + irqs_masked_lift + createNewCaps_ifunsafe' + createNewCaps_cur [OF cover not_0] + createNewCaps_global_refs' + createNewCaps_valid_arch_state + valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createNewCaps_obj_at'] + createNewCaps_irq_handlers' createNewCaps_vms + createNewCaps_valid_queues + createNewCaps_valid_queues' + createNewCaps_pred_tcb_at' cnc_ct_not_inQ + createNewCaps_ct_idle_or_in_cur_domain' + createNewCaps_sch_act_wf + createNewCaps_urz[where sz=sz] + | simp)+ + using not_0 + apply (clarsimp simp: valid_pspace'_def) + using cover + apply (intro conjI) + apply simp_all + done +qed + +lemma createObjects_obj_ranges': + "\\s. (\x ko. ksPSpace s x = Some ko \ (obj_range' x ko) \ S = {}) \ + pspace_no_overlap' ptr sz s \ + pspace_aligned' s \ pspace_distinct' s \ + S \ {ptr..(ptr &&~~ mask sz) + 2^sz - 1} = {} \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ + createObjects' ptr n val gbits + \\r s. (\x ko. ksPSpace s x = Some ko \ (obj_range' x ko) \ S = {})\" + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def del: fun_upd_apply) + apply (rule hoare_pre) + apply (wp|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (subst new_cap_addrs_fold') + apply (simp add: unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply fastforce+ + apply (clarsimp simp: foldr_fun_upd_value) + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (erule(1) disjoint_subset[OF obj_range'_subset]) + apply (simp add: Int_commute) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_pred_tcb_at': + "\pred_tcb_at' proj P t and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0) + and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz\ + createObjects ptr n val gbits \\rv. pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def createObjects_def) + apply (wp createObjects_orig_obj_at') + apply auto + done + +lemma createObjects_ex_cte_cap_to [wp]: + "\\s. range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ pspace_aligned' s \ + pspace_distinct' s \ ex_cte_cap_to' p s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\r. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def createObjects_def) + apply (rule hoare_lift_Pf2 [where f="irq_node'"]) + apply (wp hoare_vcg_ex_lift createObjects_orig_cte_wp_at'[where sz = sz]) + apply simp + apply wp + done + +lemma createObjects_orig_obj_at3: + "\\s. obj_at' P p s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + pspace_aligned' s \ + pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createObjects ptr n val gbits \\r. obj_at' P p\" + by (wp createObjects_orig_obj_at'[where sz = sz] | simp add: createObjects_def)+ + +lemma createObjects_sch: + "\(\s. sch_act_wf (ksSchedulerAction s) s) and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz + and K (range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0)\ + createObjects ptr n val gbits + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (rule hoare_gen_asm) + apply (wp sch_act_wf_lift_asm createObjects_pred_tcb_at' createObjects_orig_obj_at3 | force)+ + done + +lemma createObjects_queues: + "\\s. valid_queues s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ + createObjects ptr n val gbits + \\rv. valid_queues\" + apply (wp valid_queues_lift_asm [unfolded pred_conj_def, OF createObjects_orig_obj_at3] + createObjects_pred_tcb_at' [unfolded pred_conj_def]) + apply fastforce + apply wp+ + apply fastforce + done + +lemma createObjects_queues': + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. valid_queues' s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0\ + createObjects ptr n val gbits + \\rv. valid_queues'\" + apply (simp add: createObjects_def) + apply (wp valid_queues_lift_asm') + apply (wp createObjects_orig_obj_at2') + apply clarsimp + apply assumption + apply wp + using no_tcb + apply fastforce + done + +lemma createObjects_no_cte_ifunsafe': + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. valid_pspace' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + if_unsafe_then_cap' s\ + createObjects ptr n val gbits + \\rv s. if_unsafe_then_cap' s\" + apply (simp only: if_unsafe_then_cap'_def ex_cte_cap_to'_def + imp_conv_disj) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF createObjects_ksInterrupt]) + apply (simp add: createObjects_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_imp_lift + createObjects_orig_cte_wp_at2' hoare_vcg_ex_lift) + apply (simp add: valid_pspace'_def disj_imp) + using no_cte no_tcb + apply fastforce + done + +lemma createObjects_no_cte_valid_global: + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_global_refs' s\ + createObjects ptr n val gbits + \\rv s. valid_global_refs' s\" + apply (simp add: valid_global_refs'_def valid_cap_sizes'_def valid_refs'_def) + apply (rule_tac Q="\rv s. \ptr. \ cte_wp_at' (\cte. (kernel_data_refs \ capRange (cteCap cte) \ {} + \ 2 ^ capBits (cteCap cte) > gsMaxObjectSize s)) ptr s \ global_refs' s \ kernel_data_refs" + in hoare_post_imp) + apply (auto simp: cte_wp_at_ctes_of linorder_not_less elim!: ranE)[1] + apply (rule hoare_pre) + apply (simp add: global_refs'_def) + apply (rule hoare_use_eq [where f="\s. armKSGlobalUserVSpace (ksArchState s)", + OF createObjects_global_ksArch]) + apply (rule hoare_use_eq [where f=ksIdleThread, OF createObjects_it]) + apply (rule hoare_use_eq [where f=irq_node', OF createObjects_ksInterrupt]) + apply (rule hoare_use_eq [where f=gsMaxObjectSize], wp) + apply (simp add: createObjects_def) + apply (wp hoare_vcg_all_lift createObjects_orig_cte_wp_at2') + using no_cte no_tcb + apply (simp add: split_def cte_wp_at_ctes_of split: option.splits) + apply (clarsimp simp: global_refs'_def) + apply (auto simp: ball_ran_eq linorder_not_less[symmetric]) + done + +lemma createObjects'_typ_at: + "\\s. n \ 0 \ + range_cover ptr sz (objBitsKO val + gbits) n \ + typ_at' T p s \ + pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s\ + createObjects' ptr n val gbits \\r s. typ_at' T p s\" + apply (rule hoare_grab_asm)+ + apply (simp add: createObjects'_def lookupAround2_pspace_no + alignError_def unless_def split_def typ_at'_def) + apply (subst new_cap_addrs_fold') + apply (simp add: unat_1_0 unat_gt_0) + apply (rule range_cover_not_zero_shift) + apply simp+ + apply (wp|wpc|simp cong: if_cong del: data_map_insert_def fun_upd_apply)+ + apply (subst data_map_insert_def[symmetric]) + apply clarsimp + apply (subgoal_tac "range_cover ptr sz (objBitsKO val) (unat (of_nat n << gbits))") + apply (subst data_map_insert_def[symmetric])+ + apply (subst retype_ko_wp_at',simp+)+ + apply clarsimp + apply (frule(1) subsetD [OF new_cap_addrs_subset]) + apply (drule(1) pspace_no_overlap_disjoint') + apply (simp add: lookupAround2_None1) + apply (intro conjI impI allI) + apply (drule_tac x = p in spec) + apply (erule impE) + apply (erule(1) range_cover_new_cap_addrs_compare[rotated]) + apply simp + apply (fastforce simp: ko_wp_at'_def) + apply (drule_tac x = p in orthD1) + apply (clarsimp simp: ptr_add_def p_assoc_help) + apply (simp add: dom_def) + apply (fastforce simp: ko_wp_at'_def) + apply (rule range_cover_rel) + apply (simp)+ + apply (subst mult.commute) + apply (erule range_cover.unat_of_nat_n_shift) + apply simp + done + +lemma createObjects_valid_arch: + "\\s. valid_arch_state' s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + createObjects ptr n val gbits + \\rv s. valid_arch_state' s\" + unfolding valid_arch_state'_def valid_asid_table'_def vspace_table_at'_defs createObjects_def + apply (simp add: typ_at_to_obj_at_arches option_case_all_conv) + apply (wpsimp wp: hoare_vcg_const_Ball_lift createNewCaps_obj_at' createObjects_orig_ko_wp_at2' + createNewCaps_ko_wp_at' hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (fastforce simp: pred_conj_def valid_pspace'_def o_def is_vcpu'_def) + done + +lemma createObjects_irq_state: + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_irq_node' (irq_node' s) s\ + createObjects ptr n val gbits + \\rv s. valid_irq_node' (irq_node' s) s\" + apply (wp valid_irq_node_lift_asm [unfolded pred_conj_def, OF _ createObjects_orig_obj_at3]) + apply auto + done + +lemma createObjects_no_cte_irq_handlers: + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ + range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + valid_irq_handlers' s\ + createObjects ptr n val gbits + \\rv s. valid_irq_handlers' s\" + apply (simp add: valid_irq_handlers_cte_wp_at_form' createObjects_def irq_issued'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift + createObjects_orig_cte_wp_at2') + using no_cte no_tcb by (auto simp: split_def split: option.splits) + +lemma createObjects_cur': + "\\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ range_cover ptr sz (objBitsKO val + gbits) n \ n \ 0 \ + cur_tcb' s\ + createObjects ptr n val gbits + \\rv s. cur_tcb' s\" + apply (rule hoare_post_imp [where Q="\rv s. \t. ksCurThread s = t \ tcb_at' t s"]) + apply (simp add: cur_tcb'_def) + apply (wp hoare_vcg_ex_lift createObjects_orig_obj_at3) + apply (clarsimp simp: cur_tcb'_def) + apply auto + done + +lemma createObjects_vms'[wp]: + "\(\_. (range_cover ptr sz (objBitsKO val + gbits) n \ 0 < n)) and pspace_aligned' and + pspace_distinct' and pspace_no_overlap' ptr sz and valid_machine_state'\ + createObjects ptr n val gbits + \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + typ_at'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_ko_wp_at2' + | simp add: createObjects_def)+ + apply auto + done + +lemma createObjects_ct_idle_or_in_cur_domain': + "\ct_active' and valid_pspace' and pspace_no_overlap' ptr sz + and ct_idle_or_in_cur_domain' + and K (range_cover ptr sz (objBitsKO val + gSize) n \ n \ 0)\ + createObjects ptr n val gSize + \\_. ct_idle_or_in_cur_domain'\" + apply (rule hoare_gen_asm) + apply (wp ct_idle_or_in_cur_domain'_lift_futz createObjects_obj_at_other[where sz=sz]) + apply simp_all + done + +lemma untyped_zero_ranges_cte_def: + "untyped_ranges_zero_inv (cteCaps_of s) rs + = (\r. (\p. cte_wp_at' (\cte. untypedZeroRange (cteCap cte) = Some r) p s) + = (r \ rs))" + apply (clarsimp simp: untyped_ranges_zero_inv_def cte_wp_at_ctes_of + cteCaps_of_def set_eq_iff ran_def map_comp_Some_iff) + apply (safe, metis+) + done + +lemma createObjects_untyped_ranges_zero': + assumes moKO: "makeObjectKO dev ty = Some val" + shows + "\ct_active' and valid_pspace' and pspace_no_overlap' ptr sz + and untyped_ranges_zero' + and K (range_cover ptr sz (objBitsKO val + gSize) n \ n \ 0)\ + createObjects ptr n val gSize + \\_. untyped_ranges_zero'\" + apply (rule hoare_gen_asm) + apply (simp add: untyped_zero_ranges_cte_def iff_conv_conj_imp + createObjects_def) + apply (simp only: imp_conv_disj not_all not_ex) + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_conj_lift + hoare_vcg_disj_lift createObjects_orig_cte_wp_at2'[where sz=sz]) + apply (clarsimp simp: valid_pspace'_def) + apply (cut_tac moKO[symmetric]) + apply (simp add: makeObjectKO_def projectKO_opt_tcb projectKO_opt_cte + split: sum.split_asm kernel_object.split_asm + arch_kernel_object.split_asm + object_type.split_asm apiobject_type.split_asm) + apply (simp add: makeObject_tcb tcb_cte_cases_def cteSizeBits_def makeObject_cte + untypedZeroRange_def) + apply (simp add: makeObject_cte untypedZeroRange_def) + done + +lemma createObjects_no_cte_invs: + assumes moKO: "makeObjectKO dev ty = Some val" + assumes no_cte: "\c. projectKO_opt val \ Some (c::cte)" + assumes no_tcb: "\t. projectKO_opt val \ Some (t::tcb)" + shows + "\\s. range_cover ptr sz ((objBitsKO val) + gbits) n \ n \ 0 + \ sz \ maxUntypedSizeBits + \ invs' s \ ct_active' s + \ pspace_no_overlap' ptr sz s \ ptr \ 0 + \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1} \ kernel_data_refs = {} + \ caps_overlap_reserved' {ptr..ptr + of_nat (n * 2 ^ gbits * 2 ^ objBitsKO val) - 1} s + \ caps_no_overlap'' ptr sz s \ refs_of' val = {} \ hyp_refs_of' val = {} \ \ live' val\ + createObjects ptr n val gbits + \\rv. invs'\" +proof - + have co_ct_not_inQ: + "\range_cover ptr sz ((objBitsKO val) + gbits) n; n \ 0\ \ + \\s. ct_not_inQ s \ pspace_no_overlap' ptr sz s \ valid_pspace' s\ + createObjects ptr n val gbits \\_. ct_not_inQ\" + (is "\ _; _ \ \ \\s. ct_not_inQ s \ ?REST s\ _ \_\") + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. (ksSchedulerAction s = ResumeCurrentThread) \ + (obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ?REST s)" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF createObjects_nosch]) + apply (rule hoare_weaken_pre) + apply (wps createObjects_ct) + apply (wp createObjects_obj_at_other) + apply (simp)+ + done + show ?thesis + apply (rule hoare_grab_asm)+ + apply (clarsimp simp: invs'_def valid_state'_def) + apply wp + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def,wp createObjects_valid_pspace_untyped') + apply (wp assms | simp add: objBits_def)+ + apply (wp createObjects_sch createObjects_queues) + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_state_refs_of'') + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_state_hyp_refs_of'') + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_iflive') + apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift + createObjects_idle' createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_cur' + createObjects_queues' [OF no_tcb] + assms | simp add: objBits_def )+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: createObjects_def) + apply (wp createObjects_idle') + apply (wp createObjects_no_cte_ifunsafe' irqs_masked_lift + createObjects_idle' createObjects_no_cte_valid_global + createObjects_valid_arch createObjects_irq_state + createObjects_no_cte_irq_handlers createObjects_cur' + createObjects_queues' [OF no_tcb] assms + createObjects_pspace_domain_valid co_ct_not_inQ + createObjects_ct_idle_or_in_cur_domain' + createObjects_untyped_ranges_zero'[OF moKO] + | simp)+ + apply clarsimp + using no_cte no_tcb + apply ((intro conjI; assumption?); simp add: valid_pspace'_def objBits_def) + apply (fastforce simp add: split_def split: option.splits) + apply (clarsimp simp: invs'_def no_tcb valid_state'_def no_cte split: option.splits) + done +qed + +lemma corres_retype_update_gsI: + assumes not_zero: "n \ 0" + and aligned: "is_aligned ptr (objBitsKO ko + gbits)" + and obj_bits_api: "obj_bits_api (APIType_map2 ty) us = + objBitsKO ko + gbits" + and check: "sz < obj_bits_api (APIType_map2 ty) us \ + sz < objBitsKO ko + gbits" + and usv: "APIType_map2 ty = Structures_A.CapTableObject \ 0 < us" + and ko: "makeObjectKO dev ty = Some ko" + and orr: "obj_bits_api (APIType_map2 ty) us \ sz \ + obj_relation_retype + (default_object (APIType_map2 ty) dev us) ko" + and cover: "range_cover ptr sz (obj_bits_api (APIType_map2 ty) us) n" + and f: "f = update_gs (APIType_map2 ty) us" + shows "corres (\rv rv'. rv' = g rv) + (\s. valid_pspace s \ pspace_no_overlap_range_cover ptr sz s + \ valid_mdb s \ valid_etcbs s \ valid_list s) + (\s. pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s) + (retype_region2 ptr n us (APIType_map2 ty) dev) + (do addrs \ createObjects ptr n ko gbits; + _ \ modify (f (set addrs)); + return (g addrs) + od)" + using corres_retype' [OF not_zero aligned obj_bits_api check usv ko orr cover] + by (simp add: f) + +lemma gcd_corres: "corres (=) \ \ (gets cur_domain) curDomain" + by (simp add: curDomain_def state_relation_def) + +lemma retype_region2_extra_ext_mapM_x_corres: + shows "corres dc + (valid_etcbs and (\s. \addr\set addrs. tcb_at addr s)) + (\s. \addr\set addrs. tcb_at' addr s) + (retype_region2_extra_ext addrs Structures_A.apiobject_type.TCBObject) + (mapM_x (\addr. do cdom \ curDomain; + threadSet (tcbDomain_update (\_. cdom)) addr + od) + addrs)" + apply (rule corres_guard_imp) + apply (simp add: retype_region2_extra_ext_def curDomain_mapM_x_futz[symmetric] when_def) + apply (rule corres_split_eqr[OF gcd_corres]) + apply (rule_tac S="Id \ {(x, y). x \ set addrs}" + and P="\s. (\t \ set addrs. tcb_at t s) \ valid_etcbs s" + and P'="\s. \t \ set addrs. tcb_at' t s" + in corres_mapM_x) + apply simp + apply (rule corres_guard_imp) + apply (rule ethread_set_corres, simp_all add: etcb_relation_def non_exst_same_def)[1] + apply (case_tac tcb') + apply simp + apply fastforce + apply fastforce + apply (wp hoare_vcg_ball_lift | simp)+ + apply auto[1] + apply (wp | simp add: curDomain_def)+ + done + +lemma retype_region2_extra_ext_trivial: + "ty \ APIType_map2 (Inr (APIObjectType apiobject_type.TCBObject)) + \ retype_region2_extra_ext ptrs ty = return ()" +by (simp add: retype_region2_extra_ext_def when_def APIType_map2_def) + +lemma retype_region2_retype_region_PageTableObject: + "retype_region ptr n us (APIType_map2 (Inr PageTableObject)) dev = + (retype_region2 ptr n us (APIType_map2 (Inr PageTableObject)) dev :: obj_ref list det_ext_monad)" + by (simp add: retype_region2_ext_retype_region retype_region2_extra_ext_def when_def + APIType_map2_def) + +lemma retype_region2_valid_etcbs[wp]:"\valid_etcbs\ retype_region2 a b c d dev \\_. valid_etcbs\" + apply (simp add: retype_region2_def) + apply (simp add: retype_region2_ext_def bind_assoc) + apply wp + apply (clarsimp simp del: fun_upd_apply) + apply (blast intro: valid_etcb_fold_update) + done + +lemma retype_region2_obj_at: + assumes tytcb: "ty = Structures_A.apiobject_type.TCBObject" + shows "\\\ retype_region2 ptr n us ty dev \\rv s. \x \ set rv. tcb_at x s\" + using tytcb unfolding retype_region2_def + apply (simp only: return_bind bind_return foldr_upd_app_if fun_app_def K_bind_def) + apply (wp dxo_wp_weak | simp)+ + apply (auto simp: obj_at_def default_object_def is_tcb_def) + done + +lemma createObjects_tcb_at': + "\range_cover ptr sz (objBitsKO (injectKOS (makeObject::tcb))) n; n \ 0\ \ + \\s. pspace_no_overlap' ptr sz s \ pspace_aligned' s \ pspace_distinct' s\ + createObjects ptr n (KOTCB makeObject) 0 \\ptrs s. \addr\set ptrs. tcb_at' addr s\" + apply (rule hoare_strengthen_post[OF createObjects_ko_at_strg[where val = "(makeObject :: tcb)"]]) + apply (auto simp: obj_at'_def project_inject objBitsKO_def objBits_def makeObject_tcb) + done + +lemma init_arch_objects_APIType_map2_noop: + "init_arch_objects (APIType_map2 tp) ptr n m addrs = return ()" + apply (simp add: init_arch_objects_def APIType_map2_def) + done + +lemma data_page_relation_retype: + "obj_relation_retype (ArchObj (DataPage False pgsz)) KOUserData" + "obj_relation_retype (ArchObj (DataPage True pgsz)) KOUserDataDevice" + apply (simp_all add: obj_relation_retype_def shiftl_t2n mult_ac + objBits_simps pbfs_atleast_pageBits) + apply (clarsimp simp: image_def)+ + done + +lemma corres_retype_region_createNewCaps: + "corres ((\r r'. length r = length r' \ list_all2 cap_relation r r') + \ map (\ref. default_cap (APIType_map2 (Inr ty)) ref us dev)) + (\s. valid_pspace s \ valid_mdb s \ valid_etcbs s \ valid_list s \ valid_arch_state s + \ caps_no_overlap y sz s \ pspace_no_overlap_range_cover y sz s + \ caps_overlap_reserved {y..y + of_nat n * 2 ^ (obj_bits_api (APIType_map2 (Inr ty)) us) - 1} s + \ (\slot. cte_wp_at (\c. up_aligned_area y sz \ cap_range c \ cap_is_device c = dev) slot s) + \ (APIType_map2 (Inr ty) = Structures_A.CapTableObject \ 0 < us)) + (\s. pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' y sz s + \ valid_pspace' s \ valid_arch_state' s + \ range_cover y sz (obj_bits_api (APIType_map2 (Inr ty)) us) n \ n\ 0) + (do x \ retype_region y n us (APIType_map2 (Inr ty)) dev :: obj_ref list det_ext_monad; + init_arch_objects (APIType_map2 (Inr ty)) y n us x; + return x od) + (createNewCaps ty y n us dev)" + apply (rule_tac F="range_cover y sz (obj_bits_api (APIType_map2 (Inr ty)) us) n + \ n \ 0 \ (APIType_map2 (Inr ty) = Structures_A.CapTableObject \ 0 < us)" + in corres_req, simp) + apply (clarsimp simp add: createNewCaps_def toAPIType_def split del: if_split cong: if_cong) + apply (subst init_arch_objects_APIType_map2) + apply (cases ty, simp_all add: Arch_createNewCaps_def split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + \ \Untyped\ + apply (simp add: retype_region_def obj_bits_api_def APIType_map2_def + split del: if_split cong: if_cong) + apply (subst upto_enum_red') + apply (drule range_cover_not_zero[rotated]) + apply simp + apply unat_arith + apply (clarsimp simp: list_all2_same enum_word_def range_cover.unat_of_nat_n + list_all2_map1 list_all2_map2 ptr_add_def fromIntegral_def + toInteger_nat fromInteger_nat) + apply (subst unat_of_nat_minus_1) + apply (rule le_less_trans[OF range_cover.range_cover_n_le(2) power_strict_increasing]) + apply simp + apply (clarsimp simp: range_cover_def) + apply (arith+)[4] + \ \TCB, EP, NTFN\ + apply (simp_all add: retype_region2_ext_retype_region + bind_cong[OF curDomain_mapM_x_futz refl, unfolded bind_assoc] + split del: if_split)[8] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr) + apply (rule corres_retype[where 'a = tcb], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def + APIType_map2_def makeObjectKO_def + other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply (rule corres_split_nor) + apply (simp add: APIType_map2_def) + apply (rule retype_region2_extra_ext_mapM_x_corres) + apply (rule corres_trivial, simp) + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 + objBits_simps APIType_map2_def) + apply wp + apply wp + apply ((wp retype_region2_obj_at | simp add: APIType_map2_def)+)[1] + apply ((wp createObjects_tcb_at'[where sz=sz] + | simp add: APIType_map2_def objBits_simps' obj_bits_api_def)+)[1] + apply simp + apply simp + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: liftM_def[symmetric] split del: if_split) + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = endpoint], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + APIType_map2_def) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: liftM_def[symmetric] split del: if_split) + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = notification], + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + APIType_map2_def) + \ \CapTable\ + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (subst bind_assoc_return_reverse[of "createObjects y n (KOCTE makeObject) us"]) + apply (subst liftM_def[of "map (\addr. capability.CNodeCap addr us 0 0)", symmetric]) + apply simp + apply (rule corres_rel_imp) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI, + simp_all add: obj_bits_api_def objBits_simps' pageBits_def APIType_map2_def + makeObjectKO_def slot_bits_def field_simps ext)[1] + apply (simp add: range_cover_def) + apply (rule captable_relation_retype,simp add: range_cover_def word_bits_def) + apply simp + apply simp + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 objBits_simps + allRights_def APIType_map2_def + split del: if_split) + apply (in_case \HugePageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype bit_simps + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \VSpaceObject\) + apply (subst retype_region2_extra_ext_trivial, simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def]) + apply (rule corres_guard_imp) + apply (simp add: init_arch_objects_APIType_map2_noop) + apply (rule corres_rel_imp) + apply (rule corres_retype_update_gsI; + (simp add: APIType_map2_def objBits_simps makeObjectKO_def obj_bits_api_def + range_cover.aligned default_arch_object_def pt_bits_def)?) + apply (rule vsroot_relation_retype) + apply (rule ext)+ + apply (rename_tac s, case_tac s, rename_tac arch machine, case_tac arch) + apply (fastforce simp: update_gs_def) + apply (clarsimp simp: list_all2_map1 list_all2_map2 list_all2_same + APIType_map2_def arch_default_cap_def) + apply fastforce+ + apply (in_case \SmallPageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \LargePageObject\) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype_update_gsI; + clarsimp simp: obj_bits_api_def3 APIType_map2_def objBits_simps ext + default_object_def default_arch_object_def makeObjectKO_def + data_page_relation_retype + elim!: range_cover.aligned; + assumption) + apply fastforce+ + apply (simp add: APIType_map2_def arch_default_cap_def vm_read_write_def vmrights_map_def + list_all2_map1 list_all2_map2 list_all2_same) + apply (in_case \PageTableObject\) + apply (subst retype_region2_ext_retype_region) + apply (subst retype_region2_extra_ext_trivial, simp add: APIType_map2_def) + apply (simp_all add: corres_liftM2_simp[unfolded liftM_def]) + apply (rule corres_guard_imp) + apply (simp add: init_arch_objects_APIType_map2_noop) + apply (rule corres_rel_imp) + apply (rule corres_retype_update_gsI; + (simp add: APIType_map2_def objBits_simps makeObjectKO_def obj_bits_api_def + range_cover.aligned default_arch_object_def pt_bits_def)?) + apply (rule pagetable_relation_retype) + apply (rule ext)+ + apply (rename_tac s, case_tac s, rename_tac arch machine, case_tac arch) + apply (fastforce simp: update_gs_def) + apply (clarsimp simp: list_all2_map1 list_all2_map2 list_all2_same + APIType_map2_def arch_default_cap_def) + apply fastforce+ + apply (in_case \VCPUObject\) + apply (subst retype_region2_ext_retype_region) + apply (subst retype_region2_extra_ext_trivial) + apply (simp add: APIType_map2_def) + apply (simp add: corres_liftM2_simp[unfolded liftM_def] split del: if_split) + apply (rule corres_rel_imp) + apply (simp add: init_arch_objects_APIType_map2_noop split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_retype[where 'a = vcpu], + simp_all add: obj_bits_api_def objBits_simps pageBits_def default_arch_object_def + APIType_map2_def makeObjectKO_def other_objs_default_relation)[1] + apply (fastforce simp: range_cover_def) + apply (simp add: no_gs_types_def) + apply (auto simp add: obj_relation_retype_def range_cover_def objBitsKO_def arch_kobj_size_def default_object_def + archObjSize_def pageBits_def obj_bits_def cte_level_bits_def default_arch_object_def + other_obj_relation_def vcpu_relation_def default_vcpu_def makeObject_vcpu + makeVCPUObject_def default_gic_vcpu_interface_def vgic_map_def)[1] + apply simp+ + apply (clarsimp simp: list_all2_same list_all2_map1 list_all2_map2 + objBits_simps APIType_map2_def arch_default_cap_def) + done + +end +end diff --git a/proof/refine/AARCH64/Schedule_R.thy b/proof/refine/AARCH64/Schedule_R.thy new file mode 100644 index 0000000000..ec6dffbdf6 --- /dev/null +++ b/proof/refine/AARCH64/Schedule_R.thy @@ -0,0 +1,2449 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Schedule_R +imports VSpace_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare hoare_weak_lift_imp[wp_split del] + +(* Levity: added (20090713 10:04:12) *) +declare sts_rel_idle [simp] + +lemma invs_no_cicd'_queues: + "invs_no_cicd' s \ valid_queues s" + unfolding invs_no_cicd'_def + by simp + +lemma corres_if2: + "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ + \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" + by simp + +lemma findM_awesome': + assumes x: "\x xs. suffix (x # xs) xs' \ + corres (\a b. if b then (\a'. a = Some a' \ r a' (Some x)) else a = None) + P (P' (x # xs)) + ((f >>= (\x. return (Some x))) \ (return None)) (g x)" + assumes y: "corres r P (P' []) f (return None)" + assumes z: "\x xs. suffix (x # xs) xs' \ + \P' (x # xs)\ g x \\rv s. \ rv \ P' xs s\" + assumes p: "suffix xs xs'" + shows "corres r P (P' xs) f (findM g xs)" +proof - + have P: "f = do x \ (do x \ f; return (Some x) od) \ return None; if x \ None then return (the x) else f od" + apply (rule ext) + apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) + done + have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" + by (wp | simp)+ + show ?thesis using p + apply (induct xs) + apply (simp add: y del: dc_simp) + apply (simp only: findM.simps) + apply (subst P) + apply (rule corres_guard_imp) + apply (rule corres_split[OF x]) + apply assumption + apply (rule corres_if2) + apply (case_tac ra, clarsimp+)[1] + apply (rule corres_trivial, clarsimp) + apply (case_tac ra, simp_all)[1] + apply (erule(1) meta_mp [OF _ suffix_ConsD]) + apply (rule Q) + apply (rule hoare_post_imp [OF _ z]) + apply simp+ + done +qed + +lemmas findM_awesome = findM_awesome' [OF _ _ _ suffix_order.refl] + +(* Levity: added (20090721 10:56:29) *) +declare objBitsT_koTypeOf [simp] + +lemma vs_lookup_pages_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ + vs_lookup_target level asid vref (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = + vs_lookup_target level asid vref s" + unfolding vs_lookup_target_def vs_lookup_slot_def vs_lookup_table_def + apply (prop_tac "asid_pools_of s vcpuPtr = None", clarsimp simp: opt_map_def obj_at_def) + apply (prop_tac "pts_of s vcpuPtr = None", clarsimp simp: opt_map_def obj_at_def) + apply (fastforce simp: obind_assoc intro!: obind_eqI) + done + +lemma valid_vs_lookup_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ + valid_vs_lookup (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = valid_vs_lookup s" + by (clarsimp simp: valid_vs_lookup_def caps_of_state_VCPU_update vs_lookup_pages_vcpu_update) + +lemma set_vpcu_valid_vs_lookup[wp]: + "set_vcpu vcpuPtr vcpu \\s. P (valid_vs_lookup s)\" + by (wpsimp wp: set_vcpu_wp simp: valid_vs_lookup_vcpu_update) + +lemma set_vcpu_vmid_inv[wp]: + "set_vcpu vcpuPtr vcpu \\s. P (vmid_inv s)\" + unfolding vmid_inv_def + by (wp_pre, wps, wpsimp, simp) + +lemma vmid_inv_cur_vcpu[simp]: + "vmid_inv (s\arch_state := arch_state s\arm_current_vcpu := x\\) = vmid_inv s" + by (simp add: vmid_inv_def) + +lemma set_vcpu_valid_asid_table[wp]: + "set_vcpu ptr vcpu \valid_asid_table\" + apply (wpsimp wp: set_vcpu_wp) + apply (prop_tac "asid_pools_of s ptr = None") + apply (clarsimp simp: obj_at_def opt_map_def) + apply simp + done + +crunches vcpu_switch + for valid_vs_lookup[wp]: "\s. P (valid_vs_lookup s)" + and vmid_inv[wp]: vmid_inv + and valid_vmid_table[wp]: valid_vmid_table + and valid_asid_table[wp]: valid_asid_table + and global_pt[wp]: "\s. P (global_pt s)" + (simp: crunch_simps wp: crunch_wps) + +lemma vcpu_switch_valid_global_arch_objs[wp]: + "vcpu_switch v \valid_global_arch_objs\" + by (wp valid_global_arch_objs_lift) + +crunches set_vm_root + for pspace_distinct[wp]: pspace_distinct + (simp: crunch_simps) + +(* FIXME AARCH64: move to TcbAcc_R *) +lemma ko_tcb_cross: + "\ ko_at (TCB tcb) t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ \tcb'. ko_at' tcb' t s' \ tcb_relation tcb tcb'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply normalise_obj_at' + apply (clarsimp simp: state_relation_def pspace_relation_def obj_at_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: other_obj_relation_def obj_at'_def) + done + +(* FIXME AARCH64: move *) +lemma ko_vcpu_cross: + "\ ko_at (ArchObj (VCPU vcpu)) p s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ \vcpu'. ko_at' vcpu' p s' \ vcpu_relation vcpu vcpu'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (clarsimp simp: obj_at_def) + apply (clarsimp simp: state_relation_def pspace_relation_def obj_at_def) + apply (drule bspec, fastforce) + apply (clarsimp simp: other_obj_relation_def + split: kernel_object.splits arch_kernel_object.splits) + apply (prop_tac "ksPSpace s' p \ None") + apply (prop_tac "p \ pspace_dom (kheap s)") + apply (fastforce intro!: set_mp[OF pspace_dom_dom]) + apply fastforce + apply (fastforce simp: obj_at'_def objBits_simps dest: pspace_alignedD pspace_distinctD') + done + +(* FIXME AARCH64: move *) +lemma vcpu_at_cross: + "\ vcpu_at p s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ vcpu_at' p s'" + apply (drule vcpu_at_ko, clarsimp) + apply (drule (3) ko_vcpu_cross) + apply (clarsimp simp: typ_at'_def obj_at'_def ko_wp_at'_def) + done + +lemma arch_switchToThread_corres: + "corres dc (valid_arch_state and valid_objs and pspace_aligned and pspace_distinct + and valid_vspace_objs and tcb_at t) + (no_0_obj') + (arch_switch_to_thread t) (Arch.switchToThread t)" + unfolding arch_switch_to_thread_def AARCH64_H.switchToThread_def + apply (corres corres: getObject_TCB_corres vcpuSwitch_corres + term_simp: tcb_relation_def arch_tcb_relation_def) + apply (wpsimp wp: vcpu_switch_pred_tcb_at getObject_tcb_wp simp: tcb_at_st_tcb_at)+ + apply (clarsimp simp: valid_arch_state_def st_tcb_at_def obj_at_def get_tcb_def) + apply (rule conjI) + apply clarsimp + apply (erule (1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_arch_tcb_def obj_at_def) + apply (clarsimp simp: cur_vcpu_def in_omonad) + apply normalise_obj_at' + apply (clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (frule (2) ko_tcb_cross[rotated], simp add: obj_at_def) + apply normalise_obj_at' + apply (rule conjI; clarsimp) + apply (rule vcpu_at_cross; assumption?) + apply (erule (1) valid_objsE) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_arch_tcb_def tcb_relation_def + arch_tcb_relation_def) + apply (rule vcpu_at_cross; assumption?) + apply (prop_tac "cur_vcpu s", clarsimp simp: valid_arch_state_def) + apply (clarsimp simp: state_relation_def arch_state_relation_def cur_vcpu_def in_omonad obj_at_def) + done + +lemma schedule_choose_new_thread_sched_act_rct[wp]: + "\\\ schedule_choose_new_thread \\rs s. scheduler_action s = resume_cur_thread\" + unfolding schedule_choose_new_thread_def + by wp + +lemma tcbSchedAppend_corres: + notes trans_state_update'[symmetric, simp del] + shows + "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues') + (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp only: tcbSchedAppend_def tcb_sched_action_def) + apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) + defer + apply (wp threadGet_obj_at', simp, simp) + apply (rule no_fail_pre, wp, simp) + apply (case_tac queued) + apply (simp add: unless_def when_def) + apply (rule corres_no_failI) + apply wp+ + apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc + assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def + set_tcb_queue_def simpler_modify_def) + + apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) + = (ready_queues a (tcb_domain y) (tcb_priority y))") + apply (simp add: state_relation_def ready_queues_relation_def) + apply (clarsimp simp: tcb_sched_append_def state_relation_def + valid_queues'_def ready_queues_relation_def + ekheap_relation_def etcb_relation_def + obj_at'_def inQ_def project_inject) + apply (drule_tac x=t in bspec,clarsimp) + apply clarsimp + apply (clarsimp simp: unless_def when_def cong: if_cong) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply simp + apply (rule getQueue_corres) + apply (rule corres_split_noop_rhs2) + apply (simp add: tcb_sched_append_def) + apply (intro conjI impI) + apply (rule corres_guard_imp) + apply (rule setQueue_corres) + prefer 3 + apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) + apply simp + apply simp + apply simp + apply (rule corres_split_noop_rhs2) + apply (rule addToBitmap_if_null_noop_corres) + apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] + apply wp+ + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def + project_inject) + done + + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue + for valid_pspace'[wp]: valid_pspace' + and valid_arch_state'[wp]: valid_arch_state' + (simp: unless_def) + +crunches tcbSchedAppend, tcbSchedDequeue + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) + +lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: + "\ valid_queues_no_bitmap_except t \ + removeFromBitmap d p + \\_. valid_queues_no_bitmap_except t \" + unfolding bitmapQ_defs valid_queues_no_bitmap_except_def + by (wp| clarsimp simp: bitmap_fun_defs)+ + +lemma removeFromBitmap_bitmapQ: + "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wp| clarsimp simp: bitmap_fun_defs)+ + +lemma removeFromBitmap_valid_bitmapQ[wp]: +" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. ksReadyQueues s (d,p) = []) \ + removeFromBitmap d p + \\_. valid_bitmapQ \" +proof - + have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. ksReadyQueues s (d,p) = []) \ + removeFromBitmap d p + \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" + by (rule hoare_pre) + (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except + removeFromBitmap_bitmapQ, simp) + thus ?thesis + by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) +qed + +(* this should be the actual weakest precondition to establish valid_queues + under tagging a thread as not queued *) +lemma threadSet_valid_queues_dequeue_wp: + "\ valid_queues_no_bitmap_except t and + valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ + threadSet (tcbQueued_update (\_. False)) t + \\rv. valid_queues \" + unfolding threadSet_def + apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) + apply (rule hoare_pre) + apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) + apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift + setObject_tcb_strongest) + apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) + done + +(* FIXME move *) +lemmas obj_at'_conjI = obj_at_conj' + +lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: + "\d p ts t. + \ \s. valid_queues_no_bitmap_except t s \ + (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ + t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ + setQueue d p ts + \\rv. valid_queues_no_bitmap_except t \" + unfolding setQueue_def valid_queues_no_bitmap_except_def null_def + by wp force + +definition (* if t is in a queue, it should be tagged with right priority and domain *) + "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ + (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" + +lemma valid_queues_no_bitmap_correct_queueI[intro]: + "valid_queues_no_bitmap s \ correct_queue t s" + unfolding correct_queue_def valid_queues_no_bitmap_def + by (fastforce simp: obj_at'_def inQ_def) + + +lemma tcbSchedDequeue_valid_queues_weak: + "\ valid_queues_no_bitmap_except t and valid_bitmapQ and + bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + correct_queue t and + obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ + tcbSchedDequeue t + \\_. Invariants_H.valid_queues\" +proof - + show ?thesis + unfolding tcbSchedDequeue_def null_def valid_queues_def + apply wp (* stops on threadSet *) + apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], + simp add: valid_queues_def) + apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ + apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp + setQueue_valid_bitmapQ threadGet_const_tcb_at hoare_vcg_if_lift)+ + (* wp done *) + apply (normalise_obj_at') + apply (clarsimp simp: correct_queue_def) + apply (normalise_obj_at') + apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ + done +qed + +lemma tcbSchedDequeue_valid_queues: + "\Invariants_H.valid_queues + and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t + and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ + tcbSchedDequeue t + \\_. Invariants_H.valid_queues\" + apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) + done + +lemma tcbSchedAppend_valid_queues'[wp]: + (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) + "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" + apply (simp add: tcbSchedAppend_def) + apply (rule hoare_pre) + apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + apply (rename_tac queued) + apply (case_tac queued; simp_all add: unless_def when_def) + apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ + apply (subst conj_commute, wp) + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) + apply wp + apply fastforce + apply wp + apply (subst conj_commute) + apply clarsimp + apply (rule_tac Q="\rv. valid_queues' + and obj_at' (\obj. \ tcbQueued obj) t + and obj_at' (\obj. tcbPriority obj = prio) t + and obj_at' (\obj. tcbDomain obj = tdom) t + and (\s. t \ set (ksReadyQueues s (tdom, prio)))" + in hoare_post_imp) + apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def) + apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def inQ_def valid_queues'_def) + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) + "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ + obj_at' (inQ d p) t s \ + threadSet (tcbQueued_update (\_. False)) t + \\rv. valid_queues' \" + unfolding valid_queues'_def + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift) + apply (simp only: imp_conv_disj not_obj_at') + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (simp add: not_obj_at') + apply (clarsimp simp: typ_at_tcb') + apply normalise_obj_at' + apply (fastforce elim: obj_at'_weaken simp: inQ_def) + done + +lemma setQueue_ksReadyQueues_lift: + "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ + setQueue d p ts + \ \_ s. P s (ksReadyQueues s (d,p))\" + unfolding setQueue_def + by (wp, clarsimp simp: fun_upd_def cong: if_cong) + +lemma tcbSchedDequeue_valid_queues'[wp]: + "\valid_queues' and tcb_at' t\ + tcbSchedDequeue t \\_. valid_queues'\" + unfolding tcbSchedDequeue_def + apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + prefer 2 + apply (wp threadGet_const_tcb_at) + apply (fastforce simp: obj_at'_def) + apply clarsimp + apply (rename_tac queued) + apply (case_tac queued, simp_all) + apply wp + apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) + apply (rule hoare_pre_post, assumption) + apply (wp | clarsimp simp: bitmap_fun_defs)+ + apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) + apply clarsimp + apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ + apply clarsimp + apply (rule context_conjI, clarsimp simp: obj_at'_def) + apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def|wp)+ + done + +lemma tcbSchedAppend_iflive'[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ + tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedAppend_def unless_def) + apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + done + +lemma tcbSchedDequeue_iflive'[wp]: + "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedDequeue_def) + apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ + apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) + apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ + apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) + apply (wp | simp add: crunch_simps)+ + done + +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and tcb_at'[wp]: "tcb_at' t" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and irq_states[wp]: valid_irq_states' + and irq_node'[wp]: "\s. P (irq_node' s)" + and ct'[wp]: "\s. P (ksCurThread s)" + and global_refs'[wp]: valid_global_refs' + and ifunsafe'[wp]: if_unsafe_then_cap' + and cap_to'[wp]: "ex_nonz_cap_to' p" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: valid_idle' + (simp: unless_def crunch_simps) + +lemma tcbSchedEnqueue_vms'[wp]: + "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedEnqueue_ksMachine) + done + +lemma tcbSchedEnqueue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedEnqueue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedEnqueue_def) + apply (wpsimp simp: unless_def)+ + done + +lemma ct_idle_or_in_cur_domain'_lift2: + "\ \t. \tcb_in_cur_domain' t\ f \\_. tcb_in_cur_domain' t\; + \P. \\s. P (ksCurThread s) \ f \\_ s. P (ksCurThread s) \; + \P. \\s. P (ksIdleThread s) \ f \\_ s. P (ksIdleThread s) \; + \P. \\s. P (ksSchedulerAction s) \ f \\_ s. P (ksSchedulerAction s) \\ + \ \ ct_idle_or_in_cur_domain'\ f \\_. ct_idle_or_in_cur_domain' \" + apply (unfold ct_idle_or_in_cur_domain'_def) + apply (rule hoare_lift_Pf2[where f=ksCurThread]) + apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) + including no_pre + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) + apply simp+ + done + +lemma tcbSchedEnqueue_invs'[wp]: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def + | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + done + +crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedAppend_vms'[wp]: + "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + done + +crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" + (simp: unless_def) + +crunch ksCurDomain[wp]: tcbSchedAppend "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbSchedAppend_tcbDomain[wp]: + "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ + tcbSchedAppend t + \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + apply (clarsimp simp: tcbSchedAppend_def) + apply (wpsimp simp: unless_def)+ + done + +lemma tcbSchedAppend_tcbPriority[wp]: + "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ + tcbSchedAppend t + \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + apply (clarsimp simp: tcbSchedAppend_def) + apply (wpsimp simp: unless_def)+ + done + +lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp+ + done + +crunch ksDomScheduleIdx[wp]: tcbSchedAppend "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +crunches tcbSchedAppend, tcbSchedDequeue + for gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (simp: unless_def) + +crunches tcbSchedDequeue, tcbSchedAppend + for arch'[wp]: "\s. P (ksArchState s)" + +lemma tcbSchedAppend_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add:tcbSchedAppend_def bitmap_fun_defs) + apply (wp unless_wp setQueue_sch_act threadGet_wp|simp)+ + apply (fastforce simp:typ_at'_def obj_at'_def) + done + +lemma tcbSchedAppend_invs'[wp]: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def + | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority + split: thread_state.split_asm + simp: valid_pspace'_def)+ + done + +lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedEnqueue t + \\_. invs'\" + by wpsimp + +lemma tcbSchedAppend_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedAppend t + \\_. invs'\" + by wpsimp + +lemma tcb_at'_has_tcbDomain: + "tcb_at' t s \ \p. obj_at' (\tcb. tcbDomain tcb = p) t s" + by (clarsimp simp add: obj_at'_def) + +crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedDequeue_vms'[wp]: + "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + done + +crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" + +crunch ksCurDomain[wp]: tcbSchedDequeue "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedDequeue "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedDequeue "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +lemma tcbSchedDequeue_tcbDomain[wp]: + "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ + tcbSchedDequeue t + \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +lemma tcbSchedDequeue_tcbPriority[wp]: + "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ + tcbSchedDequeue t + \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +lemma tcbSchedDequeue_invs'[wp]: + "\invs' and tcb_at' t\ + tcbSchedDequeue t + \\_. invs'\" + unfolding invs'_def valid_state'_def + apply (rule hoare_pre) + apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + tcbSchedDequeue_valid_queues + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + done + +lemma setCurThread_corres: + "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" + apply (unfold setCurThread_def) + apply (rule corres_modify) + apply (simp add: state_relation_def swp_def) + done + +crunches vcpuEnable, vcpuDisable, vcpuSave, vcpuRestore + for typ_at' [wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma vcpuSwitch_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ vcpuSwitch param_a \\_ s. P (typ_at' T p s) \" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma arch_switch_thread_tcb_at'[wp]: + "\tcb_at' t\ Arch.switchToThread t \\_. tcb_at' t\" + by (unfold AARCH64_H.switchToThread_def, wp typ_at_lift_tcb') + +lemma updateASIDPoolEntry_pred_tcb_at'[wp]: + "updateASIDPoolEntry f asid \pred_tcb_at' proj P t'\" + unfolding updateASIDPoolEntry_def getPoolPtr_def + by (wpsimp wp: setASIDPool_pred_tcb_at' getASID_wp) + +lemma updateASIDPoolEntry_valid_queues[wp]: + "updateASIDPoolEntry g asid \Invariants_H.valid_queues\" + unfolding updateASIDPoolEntry_def getPoolPtr_def + by (wpsimp wp: getASID_wp) + +crunches setVMRoot + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +crunches vcpuSwitch + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +crunches Arch.switchToThread + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemma Arch_switchToThread_pred_tcb'[wp]: + "Arch.switchToThread t \\s. P (pred_tcb_at' proj P' t' s)\" +proof - + have pos: "\P t t'. Arch.switchToThread t \pred_tcb_at' proj P t'\" + by (wpsimp simp: AARCH64_H.switchToThread_def) + show ?thesis + apply (rule P_bool_lift [OF pos]) + by (rule lift_neg_pred_tcb_at' [OF ArchThreadDecls_H_AARCH64_H_switchToThread_typ_at' pos]) +qed + +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksQ[wp]: "\s. P (ksReadyQueues s p)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and valid_queues[wp]: "Invariants_H.valid_queues" + (wp: crunch_wps simp: crunch_simps) + +crunches arch_switch_to_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +lemma switchToThread_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) + (no_0_obj' and Invariants_H.valid_queues) + (switch_to_thread t) (switchToThread t)" + (is "corres _ ?PA ?PH _ _") +proof - + have mainpart: "corres dc (?PA) (?PH) + (do y \ arch_switch_to_thread t; + y \ (tcb_sched_action tcb_sched_dequeue t); + modify (cur_thread_update (\_. t)) + od) + (do y \ Arch.switchToThread t; + y \ tcbSchedDequeue t; + setCurThread t + od)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF arch_switchToThread_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) + apply (wp|clarsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ + done + + show ?thesis + apply - + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", + OF corres_symb_exec_l [OF mainpart]]) + apply (auto intro: no_fail_pre [OF no_fail_assert] + no_fail_pre [OF no_fail_get] + dest: st_tcb_at_tcb_at [THEN get_tcb_at] | + simp add: assert_def | wp)+ + done +qed + +lemma tcb_at_idle_thread_lift: + assumes T: "\T' t. \typ_at T' t\ f \\rv. typ_at T' t\" + assumes I: "\P. \\s. P (idle_thread s)\ f \\rv s. P (idle_thread s)\" + shows "\\s. tcb_at (idle_thread s) s \ f \\rv s. tcb_at (idle_thread s) s\" + apply (simp add: tcb_at_typ) + apply (rule hoare_lift_Pf[where f=idle_thread]) + by (wpsimp wp: T I)+ + +lemma tcb_at'_ksIdleThread_lift: + assumes T: "\T' t. \typ_at' T' t\ f \\rv. typ_at' T' t\" + assumes I: "\P. \\s. P (ksIdleThread s)\ f \\rv s. P (ksIdleThread s)\" + shows "\\s. tcb_at' (ksIdleThread s) s \ f \\rv s. tcb_at' (ksIdleThread s) s\" + apply (simp add: tcb_at_typ_at') + apply (rule hoare_lift_Pf[where f=ksIdleThread]) + by (wpsimp wp: T I)+ + +crunches vcpu_update, vgic_update, vcpu_disable, vcpu_restore, vcpu_enable + for valid_asid_map[wp]: valid_asid_map + (simp: crunch_simps wp: crunch_wps) + +lemma setGlobalUserVSpace_corres[corres]: + "corres dc valid_global_arch_objs \ set_global_user_vspace setGlobalUserVSpace" + unfolding set_global_user_vspace_def setGlobalUserVSpace_def + apply (subst o_def) (* unfold fun_comp on abstract side only to get global_pt abbrev *) + apply corres + done + +lemma arch_switchToIdleThread_corres: + "corres dc + (valid_arch_state and pspace_aligned and pspace_distinct) + (no_0_obj') + arch_switch_to_idle_thread Arch.switchToIdleThread" + unfolding arch_switch_to_idle_thread_def switchToIdleThread_def + apply (corres corres: vcpuSwitch_corres) + apply (clarsimp simp: valid_arch_state_def cur_vcpu_def in_omonad obj_at_def) + apply clarsimp + apply (rule vcpu_at_cross; assumption?) + apply (clarsimp simp: valid_arch_state_def cur_vcpu_def in_omonad obj_at_def state_relation_def + arch_state_relation_def) + done + +lemma switchToIdleThread_corres: + "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIdleThread_corres]) + apply (rule corres_split[OF arch_switchToIdleThread_corres]) + apply (unfold setCurThread_def) + apply (rule corres_trivial, rule corres_modify) + apply (simp add: state_relation_def cdt_relation_def) + apply (wp+, simp+) + apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map + invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct + invs_valid_idle invs_vspace_objs) + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def) + done + +lemma gq_sp: "\P\ getQueue d p \\rv. P and (\s. ksReadyQueues s (d, p) = rv)\" + by (unfold getQueue_def, rule gets_sp) + +lemma sch_act_wf: + "sch_act_wf sa s = ((\t. sa = SwitchToThread t \ st_tcb_at' runnable' t s \ + tcb_in_cur_domain' t s) \ + (sa = ResumeCurrentThread \ ct_in_state' activatable' s))" + by (case_tac sa, simp_all add: ) + +declare gq_wp[wp] +declare setQueue_obj_at[wp] + +lemma threadSet_timeslice_invs: + "\invs' and tcb_at' t\ threadSet (tcbTimeSlice_update b) t \\rv. invs'\" + by (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + +lemma setCurThread_invs_no_cicd': + "\invs_no_cicd' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and tcb_in_cur_domain' t\ + setCurThread t + \\rv. invs'\" +proof - + have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" + apply (simp add: ct_not_inQ_def o_def) + done + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def Invariants_H.valid_queues_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def + bitmapQ_defs valid_queues_no_bitmap_def + cong: option.case_cong) + done +qed + +(* Don't use this rule when considering the idle thread. The invariant ct_idle_or_in_cur_domain' + says that either "tcb_in_cur_domain' t" or "t = ksIdleThread s". + Use setCurThread_invs_idle_thread instead. *) +lemma setCurThread_invs: + "\invs' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and + tcb_in_cur_domain' t\ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd') + (simp add: invs'_to_invs_no_cicd'_def) + +lemma valid_queues_not_runnable_not_queued: + fixes s + assumes vq: "Invariants_H.valid_queues s" + and vq': "valid_queues' s" + and st: "st_tcb_at' (Not \ runnable') t s" + shows "obj_at' (Not \ tcbQueued) t s" +proof (rule ccontr) + assume "\ obj_at' (Not \ tcbQueued) t s" + moreover from st have "typ_at' TCBT t s" + by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) + ultimately have "obj_at' tcbQueued t s" + by (clarsimp simp: not_obj_at' comp_def) + + moreover + from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] + obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" + by clarsimp + + moreover + from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] + obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" + by clarsimp + + ultimately + have "t \ set (ksReadyQueues s (d, p))" using vq' + unfolding valid_queues'_def + apply - + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (drule_tac x=t in spec) + apply (erule impE) + apply (fastforce simp add: inQ_def obj_at'_def) + apply (assumption) + done + + with vq have "st_tcb_at' runnable' t s" + unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def + apply - + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp add: st_tcb_at'_def) + apply (drule(1) bspec) + apply (erule obj_at'_weakenE) + apply (clarsimp) + done + + with st show False + apply - + apply (drule(1) pred_tcb_at_conj') + apply (clarsimp) + done +qed + +(* + * The idle thread is not part of any ready queues. + *) +lemma idle'_not_tcbQueued': + assumes vq: "Invariants_H.valid_queues s" + and vq': "valid_queues' s" + and idle: "valid_idle' s" + shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" +proof - + from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" + by (clarsimp simp add: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + with vq vq' show ?thesis + by (rule valid_queues_not_runnable_not_queued) +qed + +lemma setCurThread_invs_no_cicd'_idle_thread: + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" +proof - + have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" + apply (simp add: ct_not_inQ_def o_def) + done + have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" + apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + done + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) + apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' + invs'_def cur_tcb'_def valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def + pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: obj_at'_def idle_tcb'_def ) + done +qed + +lemma setCurThread_invs_idle_thread: + "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd'_idle_thread) + (clarsimp simp: invs'_to_invs_no_cicd'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma Arch_switchToThread_invs[wp]: + "\invs' and tcb_at' t\ Arch.switchToThread t \\rv. invs'\" + unfolding AARCH64_H.switchToThread_def by (wpsimp wp: getObject_tcb_hyp_sym_refs) + +crunch ksCurDomain[wp]: "Arch.switchToThread" "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: crunch_wps getASID_wp) + +crunches Arch.switchToThread + for obj_at_tcb'[wp]: "obj_at' (\tcb::tcb. P tcb) t" + (wp: crunch_wps getASID_wp simp: crunch_simps) + +lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: + "Arch.switchToThread t \tcb_in_cur_domain' t'\" + by (wp tcb_in_cur_domain'_lift) + +lemma tcbSchedDequeue_not_tcbQueued: + "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + apply (simp add: tcbSchedDequeue_def) + apply (wp|clarsimp)+ + apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) + apply (clarsimp simp: obj_at'_def) + apply (wp threadGet_obj_at') + apply (simp) + done + +lemma asUser_obj_at[wp]: + "asUser t' f \obj_at' (P \ tcbState) t\" + apply (wpsimp simp: asUser_def threadGet_stateAssert_gets_asUser) + apply (simp add: asUser_fetch_def obj_at'_def) + done + +declare doMachineOp_obj_at[wp] + +crunch valid_arch_state'[wp]: asUser "valid_arch_state'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_irq_states'[wp]: asUser "valid_irq_states'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_machine_state'[wp]: asUser "valid_machine_state'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_queues'[wp]: asUser "valid_queues'" +(wp: crunch_wps simp: crunch_simps) + + +lemma asUser_valid_irq_node'[wp]: + "asUser t (setRegister f r) \\s. valid_irq_node' (irq_node' s) s\" + apply (rule_tac valid_irq_node_lift) + apply (simp add: asUser_def) + apply (wpsimp wp: crunch_wps)+ + done + +crunch irq_masked'_helper: asUser "\s. P (intStateIRQTable (ksInterruptState s))" +(wp: crunch_wps simp: crunch_simps) + +lemma asUser_irq_masked'[wp]: + "\irqs_masked'\ asUser t (setRegister f r) + \\_ . irqs_masked'\" + apply (rule irqs_masked_lift) + apply (rule asUser_irq_masked'_helper) + done + +lemma asUser_ct_not_inQ[wp]: + "\ct_not_inQ\ asUser t (setRegister f r) + \\_ . ct_not_inQ\" + apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def) + apply (rule hoare_seq_ext)+ + prefer 4 + apply (rule stateAssert_sp) + prefer 3 + apply (rule gets_inv) + defer + apply (rule select_f_inv) + apply (case_tac x; simp) + apply (clarsimp simp: asUser_replace_def obj_at'_def fun_upd_def + split: option.split kernel_object.split) + apply wp + apply (clarsimp simp: ct_not_inQ_def obj_at'_def objBitsKO_def ps_clear_def dom_def) + apply (rule conjI; clarsimp; blast) + done + +crunch pspace_domain_valid[wp]: asUser "pspace_domain_valid" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_dom_schedule'[wp]: asUser "valid_dom_schedule'" +(wp: crunch_wps simp: crunch_simps) + +crunch gsUntypedZeroRanges[wp]: asUser "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: unless_def) + +crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" + (wp: crunch_wps simp: unless_def) + +lemmas asUser_cteCaps_of[wp] = cteCaps_of_ctes_of_lift[OF asUser_ctes_of] + +lemma asUser_utr[wp]: + "\untyped_ranges_zero'\ asUser f t \\_. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +lemma threadSet_invs_no_cicd'_trivialT: + assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes v: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows + "\\s. invs_no_cicd' s \ + (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ + (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ + ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ + (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ + threadSet F t + \\rv. invs_no_cicd'\" +proof - + from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast + note threadSet_sch_actT_P[where P=False, simplified] + have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ + valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" + by (auto simp: z) + show ?thesis + apply (simp add: invs_no_cicd'_def valid_state'_def split del: if_split) + apply (rule hoare_pre) + apply (wp x w v u b + threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_valid_queues + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_valid_queues' + threadSet_cur + untyped_ranges_zero_lift + |clarsimp simp: y z a v domains cteCaps_of_def valid_arch_tcb'_def |rule refl)+ + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) + by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) +qed + +lemmas threadSet_invs_no_cicd'_trivial = + threadSet_invs_no_cicd'_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma asUser_invs_no_cicd'[wp]: + "\invs_no_cicd'\ asUser t m \\rv. invs_no_cicd'\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_invs_no_cicd'_trivial hoare_drop_imps | simp)+ + done + +lemma Arch_switchToThread_invs_no_cicd': + "\invs_no_cicd'\ Arch.switchToThread t \\rv. invs_no_cicd'\" + apply (wpsimp wp: getObject_tcb_hyp_sym_refs setVMRoot_invs_no_cicd' + simp: AARCH64_H.switchToThread_def) + by (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma tcbSchedDequeue_invs_no_cicd'[wp]: + "\invs_no_cicd' and tcb_at' t\ + tcbSchedDequeue t + \\_. invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + tcbSchedDequeue_valid_queues_weak + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply clarsimp + apply (fastforce simp: valid_pspace'_def valid_queues_def + elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) + done + +lemma switchToThread_invs_no_cicd': + "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') + apply (auto elim!: pred_tcb'_weakenE) + done + +lemma switchToThread_invs[wp]: + "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def ) + apply (wp threadSet_timeslice_invs setCurThread_invs + Arch_switchToThread_invs dmo_invs' + doMachineOp_obj_at tcbSchedDequeue_not_tcbQueued) + by (auto elim!: pred_tcb'_weakenE) + +lemma setCurThread_ct_in_state: + "\obj_at' (P \ tcbState) t\ setCurThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def pred_tcb_at'_def o_def) + done +qed + +lemma switchToThread_ct_in_state[wp]: + "\obj_at' (P \ tcbState) t\ switchToThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: Thread_H.switchToThread_def tcbSchedEnqueue_def unless_def) + apply (wp setCurThread_ct_in_state + | simp add: o_def cong: if_cong)+ + done +qed + +lemma setCurThread_obj_at[wp]: + "\obj_at' P addr\ setCurThread t \\rv. obj_at' P addr\" + apply (simp add: setCurThread_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +lemma dmo_cap_to'[wp]: + "\ex_nonz_cap_to' p\ + doMachineOp m + \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma sct_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" + apply (simp add: setCurThread_def) + apply (wp ex_nonz_cap_to_pres') + apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + done + +lemma setVCPU_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setObject p' (v::vcpu) \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vcpuSaveRegRange, vgicUpdateLR, vcpuSave, vcpuSwitch + for cap_to'[wp]: "ex_nonz_cap_to' p" + (ignore: doMachineOp wp: crunch_wps) + +crunches updateASIDPoolEntry + for cap_to'[wp]: "ex_nonz_cap_to' p" + (wp: crunch_wps ex_nonz_cap_to_pres' getASID_wp) + +crunch cap_to'[wp]: "Arch.switchToThread" "ex_nonz_cap_to' p" + (simp: crunch_simps wp: crunch_wps) + +crunch cap_to'[wp]: switchToThread "ex_nonz_cap_to' p" + (simp: crunch_simps) + +lemma no_longer_inQ[simp]: + "\ inQ d p (tcbQueued_update (\x. False) tcb)" + by (simp add: inQ_def) + +lemma iflive_inQ_nonz_cap_strg: + "if_live_then_nonz_cap' s \ obj_at' (inQ d prio) t s + \ ex_nonz_cap_to' t s" + by (clarsimp simp: obj_at'_real_def inQ_def live'_def + elim!: if_live_then_nonz_capE' ko_wp_at'_weakenE) + +lemmas iflive_inQ_nonz_cap[elim] + = mp [OF iflive_inQ_nonz_cap_strg, OF conjI[rotated]] + +declare Cons_eq_tails[simp] + +crunch ksCurDomain[wp]: "ThreadDecls_H.switchToThread" "\s. P (ksCurDomain s)" + +(* FIXME move *) +lemma obj_tcb_at': + "obj_at' (\tcb::tcb. P tcb) t s \ tcb_at' t s" + by (clarsimp simp: obj_at'_def) + +lemma invs'_not_runnable_not_queued: + fixes s + assumes inv: "invs' s" + and st: "st_tcb_at' (Not \ runnable') t s" + shows "obj_at' (Not \ tcbQueued) t s" + apply (insert assms) + apply (rule valid_queues_not_runnable_not_queued) + apply (clarsimp simp add: invs'_def valid_state'_def)+ + done + +lemma valid_queues_not_tcbQueued_not_ksQ: + fixes s + assumes vq: "Invariants_H.valid_queues s" + and notq: "obj_at' (Not \ tcbQueued) t s" + shows "\d p. t \ set (ksReadyQueues s (d, p))" +proof (rule ccontr, simp , erule exE, erule exE) + fix d p + assume "t \ set (ksReadyQueues s (d, p))" + with vq have "obj_at' (inQ d p) t s" + unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def + apply clarify + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp) + apply (drule(1) bspec) + apply (erule obj_at'_weakenE) + apply (simp) + done + hence "obj_at' tcbQueued t s" + apply (rule obj_at'_weakenE) + apply (simp only: inQ_def) + done + with notq show "False" + by (clarsimp simp: obj_at'_def) +qed + +lemma not_tcbQueued_not_ksQ: + fixes s + assumes "invs' s" + and "obj_at' (Not \ tcbQueued) t s" + shows "\d p. t \ set (ksReadyQueues s (d, p))" + apply (insert assms) + apply (clarsimp simp add: invs'_def valid_state'_def) + apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) + apply (clarsimp) + done + +lemma ct_not_ksQ: + "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ + \ \p. ksCurThread s \ set (ksReadyQueues s p)" + apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) + apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) + apply (fastforce) + done + +lemma setThreadState_rct: + "\\s. (runnable' st \ ksCurThread s \ t) + \ ksSchedulerAction s = ResumeCurrentThread\ + setThreadState st t + \\_ s. ksSchedulerAction s = ResumeCurrentThread\" + apply (simp add: setThreadState_def) + apply (rule hoare_pre_disj') + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] + threadSet_nosch]]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) + apply (clarsimp simp: when_def) + apply (case_tac x) + apply (clarsimp, wp)[1] + apply (clarsimp) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_ct threadSet_nosch]]) + apply (rule hoare_seq_ext [OF _ isRunnable_inv]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF gct_wp gct_wp]]) + apply (rename_tac ct) + apply (case_tac "ct\t") + apply (clarsimp simp: when_def) + apply (wp)[1] + apply (clarsimp) + done + +lemma bitmapQ_lookupBitmapPriority_simp: (* neater unfold, actual unfold is really ugly *) + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ \ + bitmapQ d (lookupBitmapPriority d s) s = + (ksReadyQueuesL1Bitmap s d !! word_log2 (ksReadyQueuesL1Bitmap s d) \ + ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) !! + word_log2 (ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d)))))" + unfolding bitmapQ_def lookupBitmapPriority_def + apply (drule word_log2_nth_same, clarsimp) + apply (drule (1) bitmapQ_no_L1_orphansD, clarsimp) + apply (drule word_log2_nth_same, clarsimp) + apply (frule test_bit_size[where n="word_log2 (ksReadyQueuesL2Bitmap _ _)"]) + apply (clarsimp simp: numPriorities_def wordBits_def word_size) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (fastforce intro: unat_less_helper word_log2_max[THEN order_less_le_trans] + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (fastforce intro: unat_less_helper word_log2_max of_nat_mono_maybe + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (simp add: word_ao_dist) + apply (subst less_mask_eq) + apply (rule word_of_nat_less) + apply (fastforce intro: word_of_nat_less simp: wordRadix_def' unat_of_nat word_size)+ + done + +lemma bitmapQ_from_bitmap_lookup: + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s + \ + \ bitmapQ d (lookupBitmapPriority d s) s" + apply (simp add: bitmapQ_lookupBitmapPriority_simp) + apply (drule word_log2_nth_same) + apply (drule (1) bitmapQ_no_L1_orphansD) + apply (fastforce dest!: word_log2_nth_same + simp: word_ao_dist lookupBitmapPriority_def word_size numPriorities_def + wordBits_def) + done + +lemma lookupBitmapPriority_obj_at': + "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; + bitmapQ_no_L1_orphans s\ + \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) + (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + apply (drule (2) bitmapQ_from_bitmap_lookup) + apply (simp add: valid_bitmapQ_bitmapQ_simp) + apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) + apply (clarsimp, rename_tac t ts) + apply (drule cons_set_intro) + apply (drule (2) valid_queues_no_bitmap_objD) + done + +lemma bitmapL1_zero_ksReadyQueues: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + apply (cases "ksReadyQueuesL1Bitmap s d = 0") + apply (force simp add: bitmapQ_def valid_bitmapQ_def) + apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) + done + +lemma prioToL1Index_le_mask: + "\ prioToL1Index p = prioToL1Index p' ; p && mask wordRadix \ p' && mask wordRadix \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (drule shiftr_eq_neg_mask_eq) + apply (metis add.commute word_and_le2 word_plus_and_or_coroll2 word_plus_mono_left) + done + +lemma prioToL1Index_le_index: + "\ prioToL1Index p \ prioToL1Index p' ; prioToL1Index p \ prioToL1Index p' \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (erule (1) le_shiftr') + done + +lemma bitmapL1_highest_lookup: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; + bitmapQ d p' s \ + \ p' \ lookupBitmapPriority d s" + apply (subgoal_tac "ksReadyQueuesL1Bitmap s d \ 0") + prefer 2 + apply (clarsimp simp add: bitmapQ_def) + apply (case_tac "prioToL1Index (lookupBitmapPriority d s) = prioToL1Index p'") + apply (rule prioToL1Index_le_mask, simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst mask_or_not_mask[where n=wordRadix and x=p', symmetric]) + apply (subst word_bw_comms(2)) (* || commute *) + apply (simp add: word_ao_dist mask_AND_NOT_mask mask_twice) + apply (subst less_mask_eq[where x="of_nat _"]) + apply (subst word_less_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size wordRadix_def') + apply (subst word_le_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule word_log2_highest) + apply (subst (asm) prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (simp add: word_size wordRadix_def') + apply (drule (1) bitmapQ_no_L1_orphansD[where d=d and i="word_log2 _"]) + apply (simp add: l2BitmapSize_def') + apply simp + apply (rule prioToL1Index_le_index[rotated], simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (fastforce dest: bitmapQ_no_L1_orphansD + simp: wordBits_def numPriorities_def word_size wordRadix_def' l2BitmapSize_def') + apply (erule word_log2_highest) + done + +lemma bitmapQ_ksReadyQueuesI: + "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + unfolding valid_bitmapQ_def by simp + +lemma getReadyQueuesL2Bitmap_inv[wp]: + "\ P \ getReadyQueuesL2Bitmap d i \\_. P\" + unfolding getReadyQueuesL2Bitmap_def by wp + +lemma switchToThread_lookupBitmapPriority_wp: + "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ + t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + ThreadDecls_H.switchToThread t + \\rv. invs'\" +proof - + have switchToThread_pre: + "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ + \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" + unfolding valid_queues_def + apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) + apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) + apply (rename_tac t ts) + apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) + apply simp + apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) + done + thus ?thesis + by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) +qed + +lemma switchToIdleThread_invs_no_cicd': + "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" + apply (clarsimp simp: Thread_H.switchToIdleThread_def AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_invs_no_cicd'_idle_thread setVMRoot_invs_no_cicd' vcpuSwitch_it') + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_idle'_def) + done + +crunch obj_at'[wp]: "Arch.switchToIdleThread" "obj_at' (P :: ('a :: no_vcpu) \ bool) t" + + +declare hoare_weak_lift_imp_conj[wp_split del] + +lemma setCurThread_const: + "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" + by (simp add: setCurThread_def | wp)+ + + + +crunch it[wp]: switchToIdleThread "\s. P (ksIdleThread s)" +crunch it[wp]: switchToThread "\s. P (ksIdleThread s)" + +lemma switchToIdleThread_curr_is_idle: + "\\\ switchToIdleThread \\rv s. ksCurThread s = ksIdleThread s\" + apply (rule hoare_weaken_pre) + apply (wps switchToIdleThread_it) + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_const) + apply (simp) + done + +lemma chooseThread_it[wp]: + "\\s. P (ksIdleThread s)\ chooseThread \\_ s. P (ksIdleThread s)\" + supply if_split[split del] + by (wpsimp simp: chooseThread_def curDomain_def bitmap_fun_defs) + +lemma threadGet_inv [wp]: "\P\ threadGet f t \\rv. P\" + apply (simp add: threadGet_def) + apply (wp | simp)+ + done + +lemma corres_split_sched_act: + "\sched_act_relation act act'; + corres r P P' f1 g1; + \t. corres r (Q t) (Q' t) (f2 t) (g2 t); + corres r R R' f3 g3\ + \ corres r (case act of resume_cur_thread \ P + | switch_thread t \ Q t + | choose_new_thread \ R) + (case act' of ResumeCurrentThread \ P' + | SwitchToThread t \ Q' t + | ChooseThread \ R') + (case act of resume_cur_thread \ f1 + | switch_thread t \ f2 t + | choose_new_thread \ f3) + (case act' of ResumeCurrentThread \ g1 + | ChooseNewThread \ g3 + | SwitchToThread t \ g2 t)" + apply (cases act) + apply (rule corres_guard_imp, force+)+ + done + +lemma corres_assert_ret: + "corres dc (\s. P) \ (assert P) (return ())" + apply (rule corres_no_failI) + apply simp + apply (simp add: assert_def return_def fail_def) + done + +lemma corres_assert_assume_l: + "corres dc P Q (f ()) g + \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + +lemma corres_assert_assume_r: + "corres dc P Q f (g ()) + \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + +crunch cur[wp]: tcbSchedEnqueue cur_tcb' + (simp: unless_def) + +lemma thread_get_exs_valid[wp]: + "tcb_at t s \ \(=) s\ thread_get f t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma gts_exs_valid[wp]: + "tcb_at t s \ \(=) s\ get_thread_state t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma guarded_switch_to_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) + (no_0_obj' and Invariants_H.valid_queues) + (guarded_switch_to t) (switchToThread t)" + apply (simp add: guarded_switch_to_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_l'[OF _ gts_exs_valid]) + apply (rule corres_assert_assume_l) + apply (rule switchToThread_corres) + apply (force simp: st_tcb_at_tcb_at) + apply (wp gts_st_tcb_at) + apply (force simp: st_tcb_at_tcb_at)+ + done + +abbreviation "enumPrio \ [0.e.maxPriority]" + +lemma enumPrio_word_div: + fixes v :: "8 word" + assumes vlt: "unat v \ unat maxPriority" + shows "\xs ys. enumPrio = xs @ [v] @ ys \ (\x\set xs. x < v) \ (\y\set ys. v < y)" + apply (subst upto_enum_word) + apply (subst upt_add_eq_append'[where j="unat v"]) + apply simp + apply (rule le_SucI) + apply (rule vlt) + apply (simp only: upt_conv_Cons vlt[simplified less_Suc_eq_le[symmetric]]) + apply (intro exI conjI) + apply fastforce + apply clarsimp + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (fastforce simp: vlt) + apply simp + apply (clarsimp simp: Suc_le_eq) + apply (erule disjE) + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def) + apply (clarsimp simp: unat_of_nat_eq) + apply (erule conjE) + apply (drule_tac y="unat v" and x="x" in of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def)+ + done + +lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" + by (simp add: curDomain_def state_relation_def) + +lemma curDomain_corres': + "corres (=) \ (\s. ksCurDomain s \ maxDomain) + (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + apply (case_tac "1 < numDomains"; simp) + apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) + (* if we have only one domain, then we are in it *) + apply (clarsimp simp: return_def simpler_gets_def bind_def maxDomain_def + state_relation_def corres_underlying_def) + done + +lemma lookupBitmapPriority_Max_eqI: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ + \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + apply (rule Max_eqI[simplified eq_commute]; simp) + apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) + apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) + done + +lemma corres_gets_queues_getReadyQueuesL1Bitmap: + "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" + unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def + by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + +lemma guarded_switch_to_chooseThread_fragment_corres: + "corres dc + (P and st_tcb_at runnable t and invs and valid_sched) + (P' and st_tcb_at' runnable' t and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + unfolding guarded_switch_to_def isRunnable_def + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_assert_assume_l) + apply (rule corres_assert_assume_r) + apply (rule switchToThread_corres) + apply (wp gts_st_tcb_at)+ + apply (clarsimp simp: st_tcb_at_tcb_at invs_def valid_state_def valid_pspace_def valid_sched_def + invs_valid_vs_lookup invs_unique_refs) + apply (auto elim!: pred_tcb'_weakenE split: thread_state.splits + simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma bitmap_lookup_queue_is_max_non_empty: + "\ valid_queues s'; (s, s') \ state_relation; invs s; + ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ + \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = + max_non_empty_queue (ready_queues s (cur_domain s))" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def + by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI + state_relation_def ready_queues_relation_def) + +lemma ksReadyQueuesL1Bitmap_return_wp: + "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" + unfolding getReadyQueuesL1Bitmap_def + by wp + +lemma ksReadyQueuesL1Bitmap_st_tcb_at': + "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ + \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) + apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) + apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") + apply simp + apply (simp add: valid_queues_no_bitmap_def) + apply (erule_tac x="ksCurDomain s" in allE) + apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) + apply (clarsimp simp: st_tcb_at'_def) + apply (erule obj_at'_weaken) + apply simp + done + +lemma curDomain_or_return_0: + "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ + \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" + apply (case_tac "1 < numDomains"; simp) + apply (simp add: valid_def curDomain_def simpler_gets_def return_def maxDomain_def) + done + +lemma invs_no_cicd_ksCurDomain_maxDomain': + "invs_no_cicd' s \ ksCurDomain s \ maxDomain" + unfolding invs_no_cicd'_def by simp + +lemma chooseThread_corres: + "corres dc (invs and valid_sched) (invs_no_cicd') + choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") +proof - + show ?thesis + unfolding choose_thread_def chooseThread_def + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac + P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ + st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and + P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ + l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ + l1 \ 0 \ + queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" and + F="hd queue = hd (max_non_empty_queue queues)" in corres_req) + apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (clarsimp simp: if_apply_def2) + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (fastforce simp: invs_no_cicd'_def) + apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") + apply (clarsimp) + apply (subgoal_tac + "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") + apply (fastforce elim!: setcomp_Max_has_prop)+ + apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') + apply (clarsimp dest!: invs_no_cicd'_queues) + apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') + done +qed + +lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = + do y \ gets g; x \ thread_get f p; k x y od" + apply (rule ext) + apply (clarsimp simp add: gets_the_def assert_opt_def + bind_def gets_def get_def return_def + thread_get_def + fail_def split: option.splits) + done + +lemma schact_bind_inside: "do x \ f; (case act of resume_cur_thread \ f1 x + | switch_thread t \ f2 t x + | choose_new_thread \ f3 x) od + = (case act of resume_cur_thread \ (do x \ f; f1 x od) + | switch_thread t \ (do x \ f; f2 t x od) + | choose_new_thread \ (do x \ f; f3 x od))" + apply (case_tac act,simp_all) + done + +interpretation tcb_sched_action_extended: is_extended' "tcb_sched_action f a" + by (unfold_locales) + +lemma getDomainTime_corres: + "corres (=) \ \ (gets domain_time) getDomainTime" + by (simp add: getDomainTime_def state_relation_def) + +lemma nextDomain_corres: + "corres dc \ \ next_domain nextDomain" + apply (simp add: next_domain_def nextDomain_def) + apply (rule corres_modify) + apply (simp add: state_relation_def Let_def dschLength_def dschDomain_def) + done + +lemma next_domain_valid_sched[wp]: + "\ valid_sched and (\s. scheduler_action s = choose_new_thread)\ next_domain \ \_. valid_sched \" + apply (simp add: next_domain_def Let_def) + apply (wp, simp add: valid_sched_def valid_sched_action_2_def ct_not_in_q_2_def) + apply (simp add:valid_blocked_2_def) + done + +lemma nextDomain_invs_no_cicd': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread)\ nextDomain \ \_. invs_no_cicd' \" + apply (simp add: nextDomain_def Let_def dschLength_def dschDomain_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def + ct_not_inQ_def cur_tcb'_def ct_idle_or_in_cur_domain'_def dschDomain_def + all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma scheduleChooseNewThread_fragment_corres: + "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) + (do _ \ when (domainTime = 0) next_domain; + choose_thread + od) + (do _ \ when (domainTime = 0) nextDomain; + chooseThread + od)" + apply (subst bind_dummy_ret_val) + apply (subst bind_dummy_ret_val) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule nextDomain_corres) + apply simp + apply (rule chooseThread_corres) + apply (wp nextDomain_invs_no_cicd')+ + apply (clarsimp simp: valid_sched_def invs'_def valid_state'_def all_invs_but_ct_idle_or_in_cur_domain'_def)+ + done + +lemma scheduleSwitchThreadFastfail_corres: + "\ ct \ it \ (tp = tp' \ cp = cp') ; ct = ct' ; it = it' \ \ + corres ((=)) (is_etcb_at ct) (tcb_at' ct) + (schedule_switch_thread_fastfail ct it cp tp) + (scheduleSwitchThreadFastfail ct' it' cp' tp')" + by (clarsimp simp: schedule_switch_thread_fastfail_def scheduleSwitchThreadFastfail_def) + +lemma gets_is_highest_prio_expand: + "gets (is_highest_prio d p) \ do + q \ gets (\s. ready_queues s d); + return ((\p. q p = []) \ Max {prio. q prio \ []} \ p) + od" + by (clarsimp simp: is_highest_prio_def gets_def) + +lemma isHighestPrio_corres: + assumes "d' = d" + assumes "p' = p" + shows + "corres ((=)) \ valid_queues + (gets (is_highest_prio d p)) + (isHighestPrio d' p')" + using assms + apply (clarsimp simp: gets_is_highest_prio_expand isHighestPrio_def) + apply (subst getHighestPrio_def') + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (rule corres_if_r'[where P'="\_. True",rotated]) + apply (rule_tac corres_symb_exec_r) + apply (rule_tac + P="\s. q = ready_queues s d + " and + P'="\s. valid_queues s \ + l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" and + F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_queues_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) + apply fastforce + apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ + done + +crunch valid_idle_etcb[wp]: set_scheduler_action valid_idle_etcb + +crunch inv[wp]: isHighestPrio P +crunch inv[wp]: curDomain P +crunch inv[wp]: scheduleSwitchThreadFastfail P + +lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) + "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def + valid_queues_def valid_queues_no_bitmap_def valid_queues'_def + ct_idle_or_in_cur_domain'_def) + +lemma scheduleChooseNewThread_corres: + "corres dc + (\s. invs s \ valid_sched s \ scheduler_action s = choose_new_thread) + (\s. invs' s \ ksSchedulerAction s = ChooseNewThread) + schedule_choose_new_thread scheduleChooseNewThread" + unfolding schedule_choose_new_thread_def scheduleChooseNewThread_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF getDomainTime_corres], clarsimp) + apply (rule corres_split[OF scheduleChooseNewThread_fragment_corres, simplified bind_assoc]) + apply (rule setSchedulerAction_corres) + apply (wp | simp)+ + apply (wp | simp add: getDomainTime_def)+ + apply auto + done + +lemma ethread_get_when_corres: + assumes x: "\etcb tcb'. etcb_relation etcb tcb' \ r (f etcb) (f' tcb')" + shows "corres (\rv rv'. b \ r rv rv') (is_etcb_at t) (tcb_at' t) + (ethread_get_when b f t) (threadGet f' t)" + apply (clarsimp simp: ethread_get_when_def) + apply (rule conjI; clarsimp) + apply (rule corres_guard_imp, rule ethreadget_corres; simp add: x) + apply (clarsimp simp: threadGet_def) + apply (rule corres_noop) + apply wpsimp+ + done + +lemma schedule_corres: + "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" + supply ethread_get_wp[wp del] + supply ssa_wp[wp del] + supply tcbSchedEnqueue_invs'[wp del] + supply tcbSchedEnqueue_invs'_not_ResumeCurrentThread[wp del] + supply setSchedulerAction_direct[wp] + supply if_split[split del] + + apply (clarsimp simp: Schedule_A.schedule_def Thread_H.schedule_def) + apply (subst thread_get_test) + apply (subst thread_get_comm) + apply (subst schact_bind_inside) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres[THEN corres_rel_imp[where r="\x y. y = x"],simplified]]) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_split_sched_act,assumption) + apply (rule_tac P="tcb_at ct" in corres_symb_exec_l') + apply (rule_tac corres_symb_exec_l) + apply simp + apply (rule corres_assert_ret) + apply ((wpsimp wp: thread_get_wp' gets_exs_valid)+) + prefer 2 + (* choose thread *) + apply clarsimp + apply (rule corres_split[OF thread_get_isRunnable_corres]) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule scheduleChooseNewThread_corres, simp) + apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps + | clarsimp)+ + (* switch to thread *) + apply (rule corres_split[OF thread_get_isRunnable_corres], + rename_tac was_running wasRunning) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') + apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac tp tp') + apply (rule corres_split) + apply (rule ethread_get_when_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac cp cp') + apply (rule corres_split) + apply (rule scheduleSwitchThreadFastfail_corres; simp) + apply (rule corres_split[OF curDomain_corres]) + apply (rule corres_split[OF isHighestPrio_corres]; simp only:) + apply (rule corres_if, simp) + apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) + apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) + apply (rule corres_if, fastforce) + apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_append_valid_blocked hoare_vcg_all_lift append_thread_queued) + apply (wp tcbSchedAppend_invs'_not_ResumeCurrentThread) + + apply (rule corres_split[OF guarded_switch_to_corres], simp) + apply (rule setSchedulerAction_corres[simplified dc_def]) + apply (wp | simp)+ + + (* isHighestPrio *) + apply (clarsimp simp: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + + apply (simp add: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + apply wpsimp+ + + apply (clarsimp simp: conj_ac cong: conj_cong) + apply wp + apply (rule_tac Q="\_ s. valid_blocked_except t s \ scheduler_action s = switch_thread t" + in hoare_post_imp, fastforce) + apply (wp add: tcb_sched_action_enqueue_valid_blocked_except + tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp + del: gets_wp)+ + apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) + apply (wp gets_wp)+ + + (* abstract final subgoal *) + apply clarsimp + + subgoal for s + apply (clarsimp split: Deterministic_A.scheduler_action.splits + simp: invs_psp_aligned invs_distinct invs_valid_objs invs_arch_state + invs_vspace_objs[simplified] tcb_at_invs) + apply (rule conjI, clarsimp) + apply (fastforce simp: invs_def + valid_sched_def valid_sched_action_def is_activatable_def + st_tcb_at_def obj_at_def valid_state_def only_idle_def + ) + apply (rule conjI, clarsimp) + subgoal for candidate + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def + valid_arch_caps_def valid_sched_action_def + weak_valid_sched_action_def tcb_at_is_etcb_at + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] + valid_blocked_except_def valid_blocked_def invs_hyp_sym_refs) + apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + done + (* choose new thread case *) + apply (intro impI conjI allI tcb_at_invs + | fastforce simp: invs_def cur_tcb_def valid_etcbs_def + valid_sched_def st_tcb_at_def obj_at_def valid_state_def + weak_valid_sched_action_def not_cur_thread_def)+ + apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) + done + + (* haskell final subgoal *) + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + cong: imp_cong split: scheduler_action.splits) + apply (fastforce simp: cur_tcb'_def valid_pspace'_def) + done + +lemma ssa_all_invs_but_ct_not_inQ': + "\all_invs_but_ct_not_inQ' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s)\ + setSchedulerAction sa \\rv. all_invs_but_ct_not_inQ'\" +proof - + show ?thesis + apply (simp add: setSchedulerAction_def) + apply wp + apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def + Invariants_H.valid_queues_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def valid_queues'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + bitmapQ_defs valid_queues_no_bitmap_def + cong: option.case_cong) + done +qed + +lemma ssa_ct_not_inQ: + "\\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s\ + setSchedulerAction sa \\rv. ct_not_inQ\" + by (simp add: setSchedulerAction_def ct_not_inQ_def, wp, clarsimp) + +lemma ssa_all_invs_but_ct_not_inQ''[simplified]: + "\\s. (all_invs_but_ct_not_inQ' s \ sch_act_wf sa s) + \ (sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) + \ (sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (simp only: all_invs_but_not_ct_inQ_check' [symmetric]) + apply (rule hoare_elim_pred_conj) + apply (wp hoare_vcg_conj_lift [OF ssa_all_invs_but_ct_not_inQ' ssa_ct_not_inQ]) + apply (clarsimp) + done + +lemma ssa_invs': + "\invs' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) and + (\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (wp ssa_all_invs_but_ct_not_inQ'') + apply (clarsimp simp add: invs'_def valid_state'_def) + done + +lemma getDomainTime_wp[wp]: "\\s. P (ksDomainTime s) s \ getDomainTime \ P \" + unfolding getDomainTime_def + by wp + +lemma switchToThread_ct_not_queued_2: + "\invs_no_cicd' and tcb_at' t\ switchToThread t \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (simp add: Thread_H.switchToThread_def) + apply (wp) + apply (simp add: AARCH64_H.switchToThread_def setCurThread_def) + apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ + done + +lemma setCurThread_obj_at': + "\ obj_at' P t \ setCurThread t \\rv s. obj_at' P (ksCurThread s) s \" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def st_tcb_at'_def) + done +qed + +lemma switchToIdleThread_ct_not_queued_no_cicd': + "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_obj_at') + apply (rule idle'_not_tcbQueued') + apply (simp add: invs_no_cicd'_def)+ + done + +lemma switchToIdleThread_activatable_2[wp]: + "\invs_no_cicd'\ switchToIdleThread \\rv. ct_in_state' activatable'\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_ct_in_state) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_idle'_def + pred_tcb_at'_def obj_at'_def idle_tcb'_def) + done + +lemma switchToThread_tcb_in_cur_domain': + "\tcb_in_cur_domain' thread\ + ThreadDecls_H.switchToThread thread + \\y s. tcb_in_cur_domain' (ksCurThread s) s\" + apply (simp add: Thread_H.switchToThread_def setCurThread_def) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + done + +lemma chooseThread_invs_no_cicd'_posts: (* generic version *) + "\ invs_no_cicd' \ chooseThread + \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \ + ct_in_state' activatable' s \ + (ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) \" + (is "\_\ _ \\_. ?POST\") +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + show ?thesis + unfolding chooseThread_def Let_def curDomain_def + apply (simp only: return_bind, simp) + apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) + apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply simp + apply (rule hoare_pre) + apply (wp (once) switchToIdleThread_ct_not_queued_no_cicd') + apply (wp (once)) + apply ((wp hoare_disjI1 switchToIdleThread_curr_is_idle)+)[1] + apply simp + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 + switchToThread_tcb_in_cur_domain') + apply clarsimp + apply (clarsimp dest!: invs_no_cicd'_queues + simp: valid_queues_def lookupBitmapPriority_def[symmetric]) + apply (drule (3) lookupBitmapPriority_obj_at') + apply normalise_obj_at' + apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_activatable_2: + "\invs_no_cicd'\ chooseThread \\rv. ct_in_state' activatable'\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_ct_not_queued_2: + "\ invs_no_cicd'\ chooseThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_invs_no_cicd': + "\ invs_no_cicd' \ chooseThread \\rv. invs' \" +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + (* NOTE: do *not* unfold numDomains in the rest of the proof, + it should work for any number *) + + (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) + show ?thesis + unfolding chooseThread_def Let_def curDomain_def + apply (simp only: return_bind, simp) + apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) + apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv) + apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_in_cur_domain': + "\ invs_no_cicd' \ chooseThread \\rv s. ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts, simp_all) + done + +lemma scheduleChooseNewThread_invs': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \ \_ s. invs' s \" + unfolding scheduleChooseNewThread_def + apply (wpsimp wp: ssa_invs' chooseThread_invs_no_cicd' chooseThread_ct_not_queued_2 + chooseThread_activatable_2 chooseThread_invs_no_cicd' + chooseThread_in_cur_domain' nextDomain_invs_no_cicd' chooseThread_ct_not_queued_2) + apply (clarsimp simp: invs'_to_invs_no_cicd'_def) + done + +lemma schedule_invs': + "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac hoare_seq_ext, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (wp scheduleChooseNewThread_invs') + \ \action = SwitchToThread candidate\ + apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' + chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct + switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (frule invs_sch_act_wf') + apply (auto simp: invs_sch_act_wf' obj_at'_activatable_st_tcb_at' + st_tcb_at'_runnable_is_activatable) + done + +lemma setCurThread_nosch: + "\\s. P (ksSchedulerAction s)\ + setCurThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setCurThread_def) + apply wp + apply simp + done + +lemma stt_nosch: + "\\s. P (ksSchedulerAction s)\ + switchToThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToThread_def AARCH64_H.switchToThread_def storeWordUser_def) + apply (wp setCurThread_nosch hoare_drop_imp |simp)+ + done + +lemma stit_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + switchToIdleThread + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def storeWordUser_def) + apply (wp setCurThread_nosch | simp add: getIdleThread_def)+ + done + +lemma schedule_sch: + "\\\ schedule \\rv s. ksSchedulerAction s = ResumeCurrentThread\" + by (wp setSchedulerAction_direct | wpc| simp add: schedule_def scheduleChooseNewThread_def)+ + +lemma schedule_sch_act_simple: + "\\\ schedule \\rv. sch_act_simple\" + apply (rule hoare_strengthen_post [OF schedule_sch]) + apply (simp add: sch_act_simple_def) + done + +lemma ssa_ct: + "\ct_in_state' P\ setSchedulerAction sa \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (unfold setSchedulerAction_def) + apply wp + apply (clarsimp simp add: ct_in_state'_def pred_tcb_at'_def) + done +qed + +lemma scheduleChooseNewThread_ct_activatable'[wp]: + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \\_. ct_in_state' activatable'\" + unfolding scheduleChooseNewThread_def + by (wpsimp simp: ct_in_state'_def + wp: ssa_invs' nextDomain_invs_no_cicd' + chooseThread_activatable_2[simplified ct_in_state'_def] + | (rule hoare_lift_Pf[where f=ksCurThread], solves wp) + | strengthen invs'_invs_no_cicd)+ + +lemma schedule_ct_activatable'[wp]: + "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac hoare_seq_ext, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply wpsimp + \ \action = SwitchToThread\ + apply (wpsimp wp: ssa_invs' setSchedulerAction_direct ssa_ct + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (fastforce dest: invs_sch_act_wf' elim: pred_tcb'_weakenE + simp: sch_act_wf obj_at'_activatable_st_tcb_at') + done + +lemma threadSet_sch_act_sane: + "\sch_act_sane\ threadSet f t \\_. sch_act_sane\" + by (wp sch_act_sane_lift) + +lemma rescheduleRequired_sch_act_sane[wp]: + "\\\ rescheduleRequired \\rv. sch_act_sane\" + apply (simp add: rescheduleRequired_def sch_act_sane_def + setSchedulerAction_def) + by (wp | wpc | clarsimp)+ + +lemma sts_sch_act_sane: + "\sch_act_sane\ setThreadState st t \\_. sch_act_sane\" + apply (simp add: setThreadState_def) + including no_pre + apply (wp hoare_drop_imps + | simp add: threadSet_sch_act_sane)+ + done + +lemma sbn_sch_act_sane: + "\sch_act_sane\ setBoundNotification ntfn t \\_. sch_act_sane\" + apply (simp add: setBoundNotification_def) + apply (wp | simp add: threadSet_sch_act_sane)+ + done + +lemma possibleSwitchTo_corres: + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and pspace_aligned and pspace_distinct) + (valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs') + (possible_switch_to t) + (possibleSwitchTo t)" + supply ethread_get_wp[wp del] + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (clarsimp simp: state_relation_def) + apply (rule tcb_at_cross, erule st_tcb_at_tcb_at; assumption) + apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres], simp) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_if, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule corres_if, simp) + apply (case_tac action; simp) + apply (rule corres_split[OF rescheduleRequired_corres]) + apply (rule tcbSchedEnqueue_corres) + apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule setSchedulerAction_corres, simp) + apply (wpsimp simp: if_apply_def2 + wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ + apply (wp hoare_drop_imps)[1] + apply wp+ + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at + valid_sched_action_def weak_valid_sched_action_def + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) + apply (simp add: tcb_at_is_etcb_at) + done + +end +end diff --git a/proof/refine/AARCH64/StateRelation.thy b/proof/refine/AARCH64/StateRelation.thy new file mode 100644 index 0000000000..43a6fce143 --- /dev/null +++ b/proof/refine/AARCH64/StateRelation.thy @@ -0,0 +1,598 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + The refinement relation between abstract and concrete states +*) + +theory StateRelation +imports InvariantUpdates_H +begin + +context begin interpretation Arch . + +definition cte_map :: "cslot_ptr \ machine_word" where + "cte_map \ \(oref, cref). oref + (of_bl cref << cte_level_bits)" + +lemmas cte_map_def' = cte_map_def[simplified cte_level_bits_def shiftl_t2n mult_ac, simplified] + +definition lookup_failure_map :: "ExceptionTypes_A.lookup_failure \ Fault_H.lookup_failure" where + "lookup_failure_map \ \lf. case lf of + ExceptionTypes_A.InvalidRoot \ Fault_H.InvalidRoot + | ExceptionTypes_A.MissingCapability n \ Fault_H.MissingCapability n + | ExceptionTypes_A.DepthMismatch n m \ Fault_H.DepthMismatch n m + | ExceptionTypes_A.GuardMismatch n g \ Fault_H.GuardMismatch n (of_bl g) (length g)" + +primrec arch_fault_map :: "Machine_A.AARCH64_A.arch_fault \ arch_fault" where + "arch_fault_map (Machine_A.AARCH64_A.VMFault ptr msg) = VMFault ptr msg" +| "arch_fault_map (Machine_A.AARCH64_A.VGICMaintenance datalist) = VGICMaintenance datalist " +| "arch_fault_map (Machine_A.AARCH64_A.VPPIEvent irq) = VPPIEvent irq" +| "arch_fault_map (Machine_A.AARCH64_A.VCPUFault data) = VCPUFault data" + +primrec fault_map :: "ExceptionTypes_A.fault \ Fault_H.fault" where + "fault_map (ExceptionTypes_A.CapFault ref bool failure) = + Fault_H.CapFault ref bool (lookup_failure_map failure)" +| "fault_map (ExceptionTypes_A.ArchFault arch_fault) = + Fault_H.ArchFault (arch_fault_map arch_fault)" +| "fault_map (ExceptionTypes_A.UnknownSyscallException n) = + Fault_H.UnknownSyscallException n" +| "fault_map (ExceptionTypes_A.UserException x y) = + Fault_H.UserException x y" + +type_synonym obj_relation_cut = "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" +type_synonym obj_relation_cuts = "(machine_word \ obj_relation_cut) set" + +definition vmrights_map :: "rights set \ vmrights" where + "vmrights_map S \ if AllowRead \ S + then (if AllowWrite \ S then VMReadWrite else VMReadOnly) + else VMKernelOnly" + +definition zbits_map :: "nat option \ zombie_type" where + "zbits_map N \ case N of Some n \ ZombieCNode n | None \ ZombieTCB" + +definition mdata_map :: + "(Machine_A.AARCH64_A.asid \ vspace_ref) option \ (asid \ vspace_ref) option" where + "mdata_map = map_option (\(asid, ref). (ucast asid, ref))" + +primrec acap_relation :: "arch_cap \ arch_capability \ bool" where + "acap_relation (arch_cap.ASIDPoolCap p asid) c = + (c = ASIDPoolCap p (ucast asid))" +| "acap_relation (arch_cap.ASIDControlCap) c = + (c = ASIDControlCap)" +| "acap_relation (arch_cap.FrameCap p rghts sz dev data) c = + (c = FrameCap p (vmrights_map rghts) sz dev (mdata_map data))" +| "acap_relation (arch_cap.PageTableCap p pt_t data) c = + (c = PageTableCap p pt_t (mdata_map data))" +| "acap_relation (arch_cap.VCPUCap vcpu) c = (c = + arch_capability.VCPUCap vcpu)" + +primrec cap_relation :: "cap \ capability \ bool" where + "cap_relation Structures_A.NullCap c = + (c = Structures_H.NullCap)" +| "cap_relation Structures_A.DomainCap c = + (c = Structures_H.DomainCap)" +| "cap_relation (Structures_A.UntypedCap dev ref n f) c = + (c = Structures_H.UntypedCap dev ref n f)" +| "cap_relation (Structures_A.EndpointCap ref b r) c = + (c = Structures_H.EndpointCap ref b (AllowSend \ r) (AllowRecv \ r) (AllowGrant \ r) + (AllowGrantReply \ r))" +| "cap_relation (Structures_A.NotificationCap ref b r) c = + (c = Structures_H.NotificationCap ref b (AllowSend \ r) (AllowRecv \ r))" +| "cap_relation (Structures_A.CNodeCap ref n L) c = + (c = Structures_H.CNodeCap ref n (of_bl L) (length L))" +| "cap_relation (Structures_A.ThreadCap ref) c = + (c = Structures_H.ThreadCap ref)" +| "cap_relation (Structures_A.ReplyCap ref master r) c = + (c = Structures_H.ReplyCap ref master (AllowGrant \ r))" +| "cap_relation (Structures_A.IRQControlCap) c = + (c = Structures_H.IRQControlCap)" +| "cap_relation (Structures_A.IRQHandlerCap irq) c = + (c = Structures_H.IRQHandlerCap irq)" +| "cap_relation (Structures_A.ArchObjectCap a) c = + (\a'. acap_relation a a' \ c = Structures_H.ArchObjectCap a')" +| "cap_relation (Structures_A.Zombie p b n) c = + (c = Structures_H.Zombie p (zbits_map b) n)" + + +definition cte_relation :: "cap_ref \ obj_relation_cut" where + "cte_relation y \ \ko ko'. \sz cs cte cap. ko = CNode sz cs \ ko' = KOCTE cte + \ cs y = Some cap \ cap_relation cap (cteCap cte)" + +definition abs_asid_entry :: "asidpool_entry \ asid_pool_entry" where + "abs_asid_entry ap = AARCH64_A.ASIDPoolVSpace (apVMID ap) (apVSpace ap)" + +definition asid_pool_relation :: "asid_pool \ asidpool \ bool" where + "asid_pool_relation \ \p p'. p = map_option abs_asid_entry \ inv ASIDPool p' \ ucast" + +lemma inj_ASIDPool[simp]: + "inj ASIDPool" + by (auto intro: injI) + +lemma asid_pool_relation_def': + "asid_pool_relation ap (ASIDPool ap') = + (\asid_low. ap asid_low = map_option abs_asid_entry (ap' (ucast asid_low)))" + by (auto simp add: asid_pool_relation_def) + +definition vgic_map :: "gic_vcpu_interface \ gicvcpuinterface" where + "vgic_map \ \v. VGICInterface (vgic_hcr v) (vgic_vmcr v) (vgic_apr v) (vgic_lr v)" + +definition vcpu_relation :: "AARCH64_A.vcpu \ vcpu \ bool" where + "vcpu_relation \ \v v'. vcpu_tcb v = vcpuTCBPtr v' \ + vgic_map (vcpu_vgic v) = vcpuVGIC v' \ + vcpu_regs v = vcpuRegs v' \ + vcpu_vppi_masked v = vcpuVPPIMasked v' \ + vcpu_vtimer v = vcpuVTimer v'" + +definition ntfn_relation :: "Structures_A.notification \ Structures_H.notification \ bool" where + "ntfn_relation \ \ntfn ntfn'. + (case ntfn_obj ntfn of + Structures_A.IdleNtfn \ ntfnObj ntfn' = Structures_H.IdleNtfn + | Structures_A.WaitingNtfn q \ ntfnObj ntfn' = Structures_H.WaitingNtfn q + | Structures_A.ActiveNtfn b \ ntfnObj ntfn' = Structures_H.ActiveNtfn b) + \ ntfn_bound_tcb ntfn = ntfnBoundTCB ntfn'" + +definition ep_relation :: "Structures_A.endpoint \ Structures_H.endpoint \ bool" where + "ep_relation \ \ep ep'. case ep of + Structures_A.IdleEP \ ep' = Structures_H.IdleEP + | Structures_A.RecvEP q \ ep' = Structures_H.RecvEP q + | Structures_A.SendEP q \ ep' = Structures_H.SendEP q" + +definition fault_rel_optionation :: "ExceptionTypes_A.fault option \ Fault_H.fault option \ bool" + where + "fault_rel_optionation \ \f f'. f' = map_option fault_map f" + +primrec thread_state_relation :: "Structures_A.thread_state \ Structures_H.thread_state \ bool" + where + "thread_state_relation (Structures_A.Running) ts' + = (ts' = Structures_H.Running)" +| "thread_state_relation (Structures_A.Restart) ts' + = (ts' = Structures_H.Restart)" +| "thread_state_relation (Structures_A.Inactive) ts' + = (ts' = Structures_H.Inactive)" +| "thread_state_relation (Structures_A.IdleThreadState) ts' + = (ts' = Structures_H.IdleThreadState)" +| "thread_state_relation (Structures_A.BlockedOnReply) ts' + = (ts' = Structures_H.BlockedOnReply)" +| "thread_state_relation (Structures_A.BlockedOnReceive oref sp) ts' + = (ts' = Structures_H.BlockedOnReceive oref (receiver_can_grant sp))" +| "thread_state_relation (Structures_A.BlockedOnSend oref sp) ts' + = (ts' = Structures_H.BlockedOnSend oref (sender_badge sp) + (sender_can_grant sp) (sender_can_grant_reply sp) (sender_is_call sp))" +| "thread_state_relation (Structures_A.BlockedOnNotification oref) ts' + = (ts' = Structures_H.BlockedOnNotification oref)" + +definition arch_tcb_relation :: "Structures_A.arch_tcb \ Structures_H.arch_tcb \ bool" where + "arch_tcb_relation \ + \atcb atcb'. tcb_context atcb = atcbContext atcb' \ tcb_vcpu atcb = atcbVCPUPtr atcb'" + +definition tcb_relation :: "Structures_A.tcb \ Structures_H.tcb \ bool" where + "tcb_relation \ \tcb tcb'. + tcb_fault_handler tcb = to_bl (tcbFaultHandler tcb') + \ tcb_ipc_buffer tcb = tcbIPCBuffer tcb' + \ arch_tcb_relation (tcb_arch tcb) (tcbArch tcb') + \ thread_state_relation (tcb_state tcb) (tcbState tcb') + \ fault_rel_optionation (tcb_fault tcb) (tcbFault tcb') + \ cap_relation (tcb_ctable tcb) (cteCap (tcbCTable tcb')) + \ cap_relation (tcb_vtable tcb) (cteCap (tcbVTable tcb')) + \ cap_relation (tcb_reply tcb) (cteCap (tcbReply tcb')) + \ cap_relation (tcb_caller tcb) (cteCap (tcbCaller tcb')) + \ cap_relation (tcb_ipcframe tcb) (cteCap (tcbIPCBufferFrame tcb')) + \ tcb_bound_notification tcb = tcbBoundNotification tcb' + \ tcb_mcpriority tcb = tcbMCP tcb'" + +definition + other_obj_relation :: "Structures_A.kernel_object \ Structures_H.kernel_object \ bool" +where + "other_obj_relation obj obj' \ + (case (obj, obj') of + (TCB tcb, KOTCB tcb') \ tcb_relation tcb tcb' + | (Endpoint ep, KOEndpoint ep') \ ep_relation ep ep' + | (Notification ntfn, KONotification ntfn') \ ntfn_relation ntfn ntfn' + | (ArchObj (AARCH64_A.ASIDPool ap), KOArch (KOASIDPool ap')) \ asid_pool_relation ap ap' + | (ArchObj (AARCH64_A.VCPU vcpu), KOArch (KOVCPU vcpu')) \ vcpu_relation vcpu vcpu' + | _ \ False)" + + +primrec pte_relation' :: "AARCH64_A.pte \ AARCH64_H.pte \ bool" where + "pte_relation' AARCH64_A.InvalidPTE x = + (x = AARCH64_H.InvalidPTE)" +| "pte_relation' (AARCH64_A.PageTablePTE ppn) x = + (x = AARCH64_H.PageTablePTE (ucast ppn))" +| "pte_relation' (AARCH64_A.PagePTE page_addr is_small attrs rights) x = + (x = AARCH64_H.PagePTE page_addr is_small (Global \ attrs) (Execute \ attrs) (Device \ attrs) + (vmrights_map rights))" + +definition pte_relation :: "machine_word \ Structures_A.kernel_object \ kernel_object \ bool" where + "pte_relation y \ \ko ko'. \pt pte. ko = ArchObj (PageTable pt) \ ko' = KOArch (KOPTE pte) + \ pte_relation' (pt_apply pt y) pte" + +primrec aobj_relation_cuts :: "AARCH64_A.arch_kernel_obj \ machine_word \ obj_relation_cuts" where + "aobj_relation_cuts (DataPage dev sz) x = + { (x + (n << pageBits), \_ obj. obj = (if dev then KOUserDataDevice else KOUserData)) + | n. n < 2 ^ (pageBitsForSize sz - pageBits) }" +| "aobj_relation_cuts (AARCH64_A.ASIDPool pool) x = + {(x, other_obj_relation)}" +| "aobj_relation_cuts (PageTable pt) x = + (\y. (x + (y << pteBits), pte_relation y)) ` {0..mask (ptTranslationBits (pt_type pt))}" +| "aobj_relation_cuts (AARCH64_A.VCPU v) x = + {(x, other_obj_relation)}" + +primrec obj_relation_cuts :: "Structures_A.kernel_object \ machine_word \ obj_relation_cuts" where + "obj_relation_cuts (CNode sz cs) x = + (if well_formed_cnode_n sz cs + then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} + else {(x, \\)})" +| "obj_relation_cuts (TCB tcb) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (Endpoint ep) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (Notification ntfn) x = {(x, other_obj_relation)}" +| "obj_relation_cuts (ArchObj ao) x = aobj_relation_cuts ao x" + + +lemma obj_relation_cuts_def2: + "obj_relation_cuts ko x = + (case ko of CNode sz cs \ if well_formed_cnode_n sz cs + then {(cte_map (x, y), cte_relation y) | y. y \ dom cs} + else {(x, \\)} + | ArchObj (PageTable pt) \ (\y. (x + (y << pteBits), pte_relation y)) ` + {0..mask (ptTranslationBits (pt_type pt))} + | ArchObj (DataPage dev sz) \ + {(x + (n << pageBits), \_ obj. obj =(if dev then KOUserDataDevice else KOUserData)) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | _ \ {(x, other_obj_relation)})" + by (simp split: Structures_A.kernel_object.split + AARCH64_A.arch_kernel_obj.split) + +lemma obj_relation_cuts_def3: + "obj_relation_cuts ko x = + (case a_type ko of + ACapTable n \ {(cte_map (x, y), cte_relation y) | y. length y = n} + | AArch (APageTable pt_t) \ (\y. (x + (y << pteBits), pte_relation y)) ` + {0..mask (ptTranslationBits pt_t)} + | AArch (AUserData sz) \ {(x + (n << pageBits), \_ obj. obj = KOUserData) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | AArch (ADeviceData sz) \ {(x + (n << pageBits), \_ obj. obj = KOUserDataDevice ) + | n . n < 2 ^ (pageBitsForSize sz - pageBits) } + | AGarbage _ \ {(x, \\)} + | _ \ {(x, other_obj_relation)})" + by (simp add: obj_relation_cuts_def2 a_type_def well_formed_cnode_n_def length_set_helper + split: Structures_A.kernel_object.split AARCH64_A.arch_kernel_obj.split) + +definition is_other_obj_relation_type :: "a_type \ bool" where + "is_other_obj_relation_type tp \ + case tp of + ACapTable n \ False + | AArch (APageTable _) \ False + | AArch (AUserData _) \ False + | AArch (ADeviceData _) \ False + | AGarbage _ \ False + | _ \ True" + +lemma is_other_obj_relation_type_CapTable: + "\ is_other_obj_relation_type (ACapTable n)" + by (simp add: is_other_obj_relation_type_def) + +lemma is_other_obj_relation_type_PageTable: + "\ is_other_obj_relation_type (AArch (APageTable pt_t))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type_UserData: + "\ is_other_obj_relation_type (AArch (AUserData sz))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type_DeviceData: + "\ is_other_obj_relation_type (AArch (ADeviceData sz))" + unfolding is_other_obj_relation_type_def by simp + +lemma is_other_obj_relation_type: + "is_other_obj_relation_type (a_type ko) \ obj_relation_cuts ko x = {(x, other_obj_relation)}" + by (simp add: obj_relation_cuts_def3 is_other_obj_relation_type_def + split: a_type.splits aa_type.splits) + +definition pspace_dom :: "Structures_A.kheap \ machine_word set" where + "pspace_dom ps \ \x\dom ps. fst ` (obj_relation_cuts (the (ps x)) x)" + +definition pspace_relation :: + "Structures_A.kheap \ (machine_word \ Structures_H.kernel_object) \ bool" where + "pspace_relation ab con \ + (pspace_dom ab = dom con) \ + (\x \ dom ab. \(y, P) \ obj_relation_cuts (the (ab x)) x. P (the (ab x)) (the (con y)))" + +definition etcb_relation :: "etcb \ Structures_H.tcb \ bool" where + "etcb_relation \ \etcb tcb'. + tcb_priority etcb = tcbPriority tcb' + \ tcb_time_slice etcb = tcbTimeSlice tcb' + \ tcb_domain etcb = tcbDomain tcb'" + +definition ekheap_relation :: + "(obj_ref \ etcb option) \ (machine_word \ Structures_H.kernel_object) \ bool" where + "ekheap_relation ab con \ + \x \ dom ab. \tcb'. con x = Some (KOTCB tcb') \ etcb_relation (the (ab x)) tcb'" + +primrec sched_act_relation :: "Deterministic_A.scheduler_action \ scheduler_action \ bool" + where + "sched_act_relation resume_cur_thread a' = (a' = ResumeCurrentThread)" | + "sched_act_relation choose_new_thread a' = (a' = ChooseNewThread)" | + "sched_act_relation (switch_thread x) a' = (a' = SwitchToThread x)" + +definition ready_queues_relation :: + "(Deterministic_A.domain \ Structures_A.priority \ Deterministic_A.ready_queue) \ + (domain \ priority \ KernelStateData_H.ready_queue) \ bool" where + "ready_queues_relation qs qs' \ \d p. (qs d p = qs' (d, p))" + +definition ghost_relation :: + "Structures_A.kheap \ (machine_word \ vmpage_size) \ (machine_word \ nat) \ (machine_word \ pt_type) \ bool" where + "ghost_relation h ups cns pt_types \ + (\a sz. (\dev. h a = Some (ArchObj (DataPage dev sz))) \ ups a = Some sz) \ + (\a n. (\cs. h a = Some (CNode n cs) \ well_formed_cnode_n n cs) \ cns a = Some n) \ + (\a pt_t. (\pt. h a = Some (ArchObj (PageTable pt)) \ pt_t = pt_type pt) \ pt_types a = Some pt_t)" + +definition cdt_relation :: "(cslot_ptr \ bool) \ cdt \ cte_heap \ bool" where + "cdt_relation \ \cte_at m m'. + \c. cte_at c \ cte_map ` descendants_of c m = descendants_of' (cte_map c) m'" + +definition cdt_list_relation :: "cdt_list \ cdt \ cte_heap \ bool" where + "cdt_list_relation \ \t m m'. + \c cap node. m' (cte_map c) = Some (CTE cap node) + \ (case next_slot c t m of None \ True + | Some next \ mdbNext node = cte_map next)" + +definition revokable_relation :: + "(cslot_ptr \ bool) \ (cslot_ptr \ cap option) \ cte_heap \ bool" where + "revokable_relation revo cs m' \ + \c cap node. cs c \ None \ + m' (cte_map c) = Some (CTE cap node) \ + revo c = mdbRevocable node" + +definition irq_state_relation :: "irq_state \ irqstate \ bool" where + "irq_state_relation irq irq' \ case (irq, irq') of + (irq_state.IRQInactive, irqstate.IRQInactive) \ True + | (irq_state.IRQSignal, irqstate.IRQSignal) \ True + | (irq_state.IRQTimer, irqstate.IRQTimer) \ True + | _ \ False" + +definition interrupt_state_relation :: + "(irq \ obj_ref) \ (irq \ irq_state) \ interrupt_state \ bool" where + "interrupt_state_relation node_map irqs is \ + (\node irqs'. is = InterruptState node irqs' + \ (\irq. node_map irq = node + (ucast irq << cte_level_bits)) + \ (\irq. irq_state_relation (irqs irq) (irqs' irq)))" + +definition arch_state_relation :: "(arch_state \ AARCH64_H.kernel_state) set" where + "arch_state_relation \ {(s, s') . + arm_asid_table s = armKSASIDTable s' \ ucast + \ arm_us_global_vspace s = armKSGlobalUserVSpace s' + \ arm_next_vmid s = armKSNextVMID s' + \ map_option ucast \ arm_vmid_table s = armKSVMIDTable s' + \ arm_kernel_vspace s = armKSKernelVSpace s' + \ arm_current_vcpu s = armHSCurVCPU s' + \ arm_gicvcpu_numlistregs s = armKSGICVCPUNumListRegs s'}" + +definition rights_mask_map :: "rights set \ Types_H.cap_rights" where + "rights_mask_map \ + \rs. CapRights (AllowWrite \ rs) (AllowRead \ rs) (AllowGrant \ rs) (AllowGrantReply \ rs)" + + +lemma obj_relation_cutsE: + "\ (y, P) \ obj_relation_cuts ko x; P ko ko'; + \sz cs z cap cte. \ ko = CNode sz cs; well_formed_cnode_n sz cs; y = cte_map (x, z); + ko' = KOCTE cte; cs z = Some cap; cap_relation cap (cteCap cte) \ + \ R; + \pt z pte'. \ ko = ArchObj (PageTable pt); y = x + (z << pteBits); + z \ mask (ptTranslationBits (pt_type pt)); ko' = KOArch (KOPTE pte'); + pte_relation' (pt_apply pt z) pte' \ + \ R; + \sz dev n. \ ko = ArchObj (DataPage dev sz); + ko' = (if dev then KOUserDataDevice else KOUserData); + y = x + (n << pageBits); n < 2 ^ (pageBitsForSize sz - pageBits) \ \ R; + \ y = x; other_obj_relation ko ko'; is_other_obj_relation_type (a_type ko) \ \ R + \ \ R" + by (force simp: obj_relation_cuts_def2 is_other_obj_relation_type_def a_type_def + cte_relation_def pte_relation_def + split: Structures_A.kernel_object.splits if_splits AARCH64_A.arch_kernel_obj.splits) + +lemma eq_trans_helper: + "\ x = y; P y = Q \ \ P x = Q" + by simp + +lemma cap_relation_case': + "cap_relation cap cap' = (case cap of + cap.ArchObjectCap arch_cap.ASIDControlCap \ cap_relation cap cap' + | _ \ cap_relation cap cap')" + by (simp split: cap.split arch_cap.split) + +schematic_goal cap_relation_case: + "cap_relation cap cap' = ?P" + apply (subst cap_relation_case') + apply (clarsimp cong: cap.case_cong arch_cap.case_cong) + apply (rule refl) + done + +lemmas cap_relation_split = + eq_trans_helper [where P=P, OF cap_relation_case cap.split[where P=P]] for P +lemmas cap_relation_split_asm = + eq_trans_helper [where P=P, OF cap_relation_case cap.split_asm[where P=P]] for P + + + +text \ + Relations on other data types that aren't stored but used as intermediate values + in the specs. +\ +primrec message_info_map :: "Structures_A.message_info \ Types_H.message_info" where + "message_info_map (Structures_A.MI a b c d) = (Types_H.MI a b c d)" + +lemma mi_map_label[simp]: "msgLabel (message_info_map mi) = mi_label mi" + by (cases mi, simp) + +primrec syscall_error_map :: "ExceptionTypes_A.syscall_error \ Fault_H.syscall_error" where + "syscall_error_map (ExceptionTypes_A.InvalidArgument n) = Fault_H.InvalidArgument n" +| "syscall_error_map (ExceptionTypes_A.InvalidCapability n) = (Fault_H.InvalidCapability n)" +| "syscall_error_map ExceptionTypes_A.IllegalOperation = Fault_H.IllegalOperation" +| "syscall_error_map (ExceptionTypes_A.RangeError n m) = Fault_H.RangeError n m" +| "syscall_error_map ExceptionTypes_A.AlignmentError = Fault_H.AlignmentError" +| "syscall_error_map (ExceptionTypes_A.FailedLookup b lf) = Fault_H.FailedLookup b (lookup_failure_map lf)" +| "syscall_error_map ExceptionTypes_A.TruncatedMessage = Fault_H.TruncatedMessage" +| "syscall_error_map ExceptionTypes_A.DeleteFirst = Fault_H.DeleteFirst" +| "syscall_error_map ExceptionTypes_A.RevokeFirst = Fault_H.RevokeFirst" +| "syscall_error_map (ExceptionTypes_A.NotEnoughMemory n) = Fault_H.syscall_error.NotEnoughMemory n" + +definition APIType_map :: "Structures_A.apiobject_type \ AARCH64_H.object_type" where + "APIType_map ty \ + case ty of + Structures_A.Untyped \ APIObjectType ArchTypes_H.Untyped + | Structures_A.TCBObject \ APIObjectType ArchTypes_H.TCBObject + | Structures_A.EndpointObject \ APIObjectType ArchTypes_H.EndpointObject + | Structures_A.NotificationObject \ APIObjectType ArchTypes_H.NotificationObject + | Structures_A.CapTableObject \ APIObjectType ArchTypes_H.CapTableObject + | ArchObject ao \ (case ao of + SmallPageObj \ SmallPageObject + | LargePageObj \ LargePageObject + | HugePageObj \ HugePageObject + | PageTableObj \ PageTableObject + | AARCH64_A.VCPUObj \ VCPUObject)" + +definition state_relation :: "(det_state \ kernel_state) set" where + "state_relation \ {(s, s'). + pspace_relation (kheap s) (ksPSpace s') + \ ekheap_relation (ekheap s) (ksPSpace s') + \ sched_act_relation (scheduler_action s) (ksSchedulerAction s') + \ ready_queues_relation (ready_queues s) (ksReadyQueues s') + \ ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) + \ cdt_relation (swp cte_at s) (cdt s) (ctes_of s') + \ cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') + \ revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s') + \ (arch_state s, ksArchState s') \ arch_state_relation + \ interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') + \ (cur_thread s = ksCurThread s') + \ (idle_thread s = ksIdleThread s') + \ (machine_state s = ksMachineState s') + \ (work_units_completed s = ksWorkUnitsCompleted s') + \ (domain_index s = ksDomScheduleIdx s') + \ (domain_list s = ksDomSchedule s') + \ (cur_domain s = ksCurDomain s') + \ (domain_time s = ksDomainTime s')}" + +text \Rules for using states in the relation.\ + +lemma curthread_relation: + "(a, b) \ state_relation \ ksCurThread b = cur_thread a" + by (simp add: state_relation_def) + +lemma state_relation_pspace_relation[elim!]: + "(s,s') \ state_relation \ pspace_relation (kheap s) (ksPSpace s')" + by (simp add: state_relation_def) + +lemma state_relation_ekheap_relation[elim!]: + "(s,s') \ state_relation \ ekheap_relation (ekheap s) (ksPSpace s')" + by (simp add: state_relation_def) + +lemma state_relationD: + "(s, s') \ state_relation \ + pspace_relation (kheap s) (ksPSpace s') \ + ekheap_relation (ekheap s) (ksPSpace s') \ + sched_act_relation (scheduler_action s) (ksSchedulerAction s') \ + ready_queues_relation (ready_queues s) (ksReadyQueues s') \ + ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')) \ + cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ + cdt_list_relation (cdt_list s) (cdt s) (ctes_of s') \ + revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s') \ + (arch_state s, ksArchState s') \ arch_state_relation \ + interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s') \ + cur_thread s = ksCurThread s' \ + idle_thread s = ksIdleThread s' \ + machine_state s = ksMachineState s' \ + work_units_completed s = ksWorkUnitsCompleted s' \ + domain_index s = ksDomScheduleIdx s' \ + domain_list s = ksDomSchedule s' \ + cur_domain s = ksCurDomain s' \ + domain_time s = ksDomainTime s'" + unfolding state_relation_def by simp + +lemma state_relationE [elim?]: + assumes sr: "(s, s') \ state_relation" + and rl: "\ pspace_relation (kheap s) (ksPSpace s'); + ekheap_relation (ekheap s) (ksPSpace s'); + sched_act_relation (scheduler_action s) (ksSchedulerAction s'); + ready_queues_relation (ready_queues s) (ksReadyQueues s'); + ghost_relation (kheap s) (gsUserPages s') (gsCNodes s') (gsPTTypes (ksArchState s')); + cdt_relation (swp cte_at s) (cdt s) (ctes_of s') \ + revokable_relation (is_original_cap s) (null_filter (caps_of_state s)) (ctes_of s'); + cdt_list_relation (cdt_list s) (cdt s) (ctes_of s'); + (arch_state s, ksArchState s') \ arch_state_relation; + interrupt_state_relation (interrupt_irq_node s) (interrupt_states s) (ksInterruptState s'); + cur_thread s = ksCurThread s'; + idle_thread s = ksIdleThread s'; + machine_state s = ksMachineState s'; + work_units_completed s = ksWorkUnitsCompleted s'; + domain_index s = ksDomScheduleIdx s'; + domain_list s = ksDomSchedule s'; + cur_domain s = ksCurDomain s'; + domain_time s = ksDomainTime s' \ \ R" + shows "R" + using sr by (blast intro!: rl dest: state_relationD) + +lemmas isCap_defs = + isZombie_def isArchObjectCap_def + isThreadCap_def isCNodeCap_def isNotificationCap_def + isEndpointCap_def isUntypedCap_def isNullCap_def + isIRQHandlerCap_def isIRQControlCap_def isReplyCap_def + isFrameCap_def isPageTableCap_def + isASIDControlCap_def isASIDPoolCap_def + isDomainCap_def isArchFrameCap_def isVCPUCap_def + +lemma isCNodeCap_cap_map[simp]: + "cap_relation c c' \ isCNodeCap c' = is_cnode_cap c" + by (cases c) (auto simp: isCap_defs split: sum.splits) + +lemma sts_rel_idle : + "thread_state_relation st IdleThreadState = (st = Structures_A.IdleThreadState)" + by (cases st, auto) + +lemma pspace_relation_absD: + "\ ab x = Some y; pspace_relation ab con \ + \ \(x', P) \ obj_relation_cuts y x. \z. con x' = Some z \ P y z" + apply (clarsimp simp: pspace_relation_def) + apply (drule bspec, erule domI) + apply simp + apply (drule(1) bspec) + apply (subgoal_tac "a \ pspace_dom ab", clarsimp) + apply (simp (no_asm) add: pspace_dom_def) + apply (fastforce simp: image_def intro: rev_bexI) + done + +lemma ekheap_relation_absD: + "\ ab x = Some y; ekheap_relation ab con \ \ + \tcb'. con x = Some (KOTCB tcb') \ etcb_relation y tcb'" + by (force simp add: ekheap_relation_def) + +lemma in_related_pspace_dom: + "\ s' x = Some y; pspace_relation s s' \ \ x \ pspace_dom s" + by (clarsimp simp add: pspace_relation_def) + +lemma pspace_dom_revE: + "\ x \ pspace_dom ps; \ko y P. \ ps y = Some ko; (x, P) \ obj_relation_cuts ko y \ \ R \ \ R" + by (clarsimp simp add: pspace_dom_def) + +lemma pspace_dom_relatedE: + "\ s' x = Some ko'; pspace_relation s s'; + \y ko P. \ s y = Some ko; (x, P) \ obj_relation_cuts ko y; P ko ko' \ \ R \ \ R" + apply (rule pspace_dom_revE [OF in_related_pspace_dom]; assumption?) + apply (fastforce dest: pspace_relation_absD) + done + +lemma ghost_relation_typ_at: + "ghost_relation (kheap s) ups cns pt_types \ + (\a sz. data_at sz a s = (ups a = Some sz)) \ + (\a n. typ_at (ACapTable n) a s = (cns a = Some n)) \ + (\a pt_t. pt_at pt_t a s = (pt_types a = Some pt_t))" + apply (rule eq_reflection) + apply (clarsimp simp: ghost_relation_def typ_at_eq_kheap_obj data_at_def) + by (intro conjI impI iffI allI; force) + +end + +end diff --git a/proof/refine/AARCH64/SubMonad_R.thy b/proof/refine/AARCH64/SubMonad_R.thy new file mode 100644 index 0000000000..de45a90d91 --- /dev/null +++ b/proof/refine/AARCH64/SubMonad_R.thy @@ -0,0 +1,137 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory SubMonad_R +imports KHeap_R EmptyFail +begin + +(* SubMonadLib *) +lemma submonad_doMachineOp: + "submonad ksMachineState (ksMachineState_update \ K) \ doMachineOp" + apply (unfold_locales) + apply (clarsimp simp: ext stateAssert_def doMachineOp_def o_def gets_def + get_def bind_def return_def submonad_fn_def)+ + done + +interpretation submonad_doMachineOp: + submonad ksMachineState "(ksMachineState_update \ K)" \ doMachineOp + by (rule submonad_doMachineOp) + +lemma corres_machine_op: + assumes P: "corres_underlying Id False True r \ \ x x'" + shows "corres r \ \ (do_machine_op x) (doMachineOp x')" + apply (rule corres_submonad [OF submonad_do_machine_op submonad_doMachineOp _ _ P]) + apply (simp_all add: state_relation_def swp_def) + done + +lemma doMachineOp_mapM: + assumes "\x. empty_fail (m x)" + shows "doMachineOp (mapM m l) = mapM (doMachineOp \ m) l" + apply (rule submonad_mapM [OF submonad_doMachineOp submonad_doMachineOp, + simplified]) + apply (rule assms) + done + +lemma doMachineOp_mapM_x: + assumes "\x. empty_fail (m x)" + shows "doMachineOp (mapM_x m l) = mapM_x (doMachineOp \ m) l" + apply (rule submonad_mapM_x [OF submonad_doMachineOp submonad_doMachineOp, + simplified]) + apply (rule assms) + done + + +context begin interpretation Arch . (*FIXME: arch_split*) +definition + "asUser_fetch \ \t s. case (ksPSpace s t) of + Some (KOTCB tcb) \ (atcbContextGet o tcbArch) tcb + | None \ undefined" + +definition + "asUser_replace \ \t uc s. + let obj = case (ksPSpace s t) of + Some (KOTCB tcb) \ Some (KOTCB (tcb \tcbArch := atcbContextSet uc (tcbArch tcb)\)) + | obj \ obj + in s \ ksPSpace := (ksPSpace s) (t := obj) \" + + +lemma threadGet_stateAssert_gets_asUser: + "threadGet (atcbContextGet o tcbArch) t = do stateAssert (tcb_at' t) []; gets (asUser_fetch t) od" + apply (rule is_stateAssert_gets [OF _ _ empty_fail_threadGet no_fail_threadGet]) + apply (clarsimp simp: threadGet_def liftM_def, wp) + apply (simp add: threadGet_def liftM_def, wp getObject_tcb_at') + apply (simp add: threadGet_def liftM_def, wp) + apply (rule hoare_strengthen_post, rule getObject_obj_at') + apply (simp add: objBits_simps')+ + apply (clarsimp simp: obj_at'_def asUser_fetch_def atcbContextGet_def)+ + done + +lemma threadSet_modify_asUser: + "tcb_at' t st \ + threadSet (\tcb. tcb\ tcbArch := atcbContextSet uc (tcbArch tcb)\) t st = modify (asUser_replace t uc) st" + apply (rule is_modify [OF _ empty_fail_threadSet no_fail_threadSet]) + apply (clarsimp simp: threadSet_def setObject_def split_def + updateObject_default_def) + apply wp + apply (rule_tac Q="\rv. obj_at' ((=) rv) t and ((=) st)" in hoare_post_imp) + apply (clarsimp simp: asUser_replace_def Let_def obj_at'_def fun_upd_def + split: option.split kernel_object.split) + apply (wp getObject_obj_at' | clarsimp simp: objBits_simps' atcbContextSet_def)+ + done + +lemma atcbContext_get_eq[simp] : "atcbContextGet (atcbContextSet x atcb) = x" + by(simp add: atcbContextGet_def atcbContextSet_def) + +lemma atcbContext_set_eq[simp] : "atcbContextSet (atcbContextGet t) t = t" + by (cases t, simp add: atcbContextGet_def atcbContextSet_def) + + +lemma atcbContext_set_set[simp] : "atcbContextSet x (atcbContextSet y atcb) = atcbContextSet x atcb" + by (cases atcb ,simp add: atcbContextSet_def) + +lemma submonad_asUser: + "submonad (asUser_fetch t) (asUser_replace t) (tcb_at' t) (asUser t)" + apply (unfold_locales) + apply (clarsimp simp: asUser_fetch_def asUser_replace_def + Let_def obj_at'_def + split: kernel_object.split option.split) + apply (clarsimp simp: asUser_replace_def Let_def + split: kernel_object.split option.split) + apply (rename_tac tcb) + apply (case_tac tcb, simp) + apply (clarsimp simp: asUser_fetch_def asUser_replace_def Let_def + fun_upd_idem + split: kernel_object.splits option.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp add: map_upd_triv atcbContextSet_def) + apply (clarsimp simp: obj_at'_def asUser_replace_def + Let_def atcbContextSet_def + split: kernel_object.splits option.splits) + apply (rename_tac tcb) + apply (case_tac tcb, simp add: objBitsKO_def ps_clear_def) + apply (rule ext) + apply (clarsimp simp: submonad_fn_def asUser_def bind_assoc split_def) + apply (subst threadGet_stateAssert_gets_asUser, simp add: bind_assoc, rule ext) + apply (rule bind_apply_cong [OF refl])+ + apply (rule bind_apply_cong [OF threadSet_modify_asUser]) + apply (clarsimp simp: in_monad stateAssert_def select_f_def) + apply (rule refl) + done + +end + +global_interpretation submonad_asUser: + submonad "asUser_fetch t" "asUser_replace t" "tcb_at' t" "asUser t" + by (rule submonad_asUser) + +lemma doMachineOp_nosch [wp]: + "\\s. P (ksSchedulerAction s)\ doMachineOp m \\rv s. P (ksSchedulerAction s)\" + apply (simp add: doMachineOp_def split_def) + apply (wp select_f_wp) + apply simp + done + +end diff --git a/proof/refine/AARCH64/Syscall_R.thy b/proof/refine/AARCH64/Syscall_R.thy new file mode 100644 index 0000000000..bbfb69b6bf --- /dev/null +++ b/proof/refine/AARCH64/Syscall_R.thy @@ -0,0 +1,2237 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + Refinement for handleEvent and syscalls +*) + +theory Syscall_R +imports Tcb_R Arch_R Interrupt_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +(* +syscall has 5 sections: m_fault h_fault m_error h_error m_finalise + +run m_fault (faultable code) \ r_fault + failure, i.e. Inr somefault \ \somefault. h_fault; done + +success, i.e. Inl a + \ run \a. m_error a (errable code) \ r_error + failure, i.e. Inr someerror \ \someerror. h_error e; done + success, i.e. Inl b \ \b. m_finalise b + +One can clearly see this is simulating some kind of monadic Maybe sequence +trying to identify all possible errors before actually performing the syscall. +*) + +lemma syscall_corres: + assumes corres: + "corres (fr \ r_flt_rel) P P' m_flt m_flt'" + "\flt flt'. flt' = fault_map flt \ + corres r (P_flt flt) (P'_flt flt') (h_flt flt) (h_flt' flt')" + "\rv rv'. r_flt_rel rv rv' \ + corres (ser \ r_err_rel rv rv') + (P_no_flt rv) (P'_no_flt rv') + (m_err rv) (m_err' rv')" + "\rv rv' err err'. \r_flt_rel rv rv'; err' = syscall_error_map err \ + \ corres r (P_err rv err) + (P'_err rv' err') (h_err err) (h_err' err')" + "\rvf rvf' rve rve'. \r_flt_rel rvf rvf'; r_err_rel rvf rvf' rve rve'\ + \ corres (dc \ r) + (P_no_err rvf rve) (P'_no_err rvf' rve') + (m_fin rve) (m_fin' rve')" + + assumes wp: + "\rv. \Q_no_flt rv\ m_err rv \P_no_err rv\, \P_err rv\" + "\rv'. \Q'_no_flt rv'\ m_err' rv' \P'_no_err rv'\,\P'_err rv'\" + "\Q\ m_flt \\rv. P_no_flt rv and Q_no_flt rv\, \P_flt\" + "\Q'\ m_flt' \\rv. P'_no_flt rv and Q'_no_flt rv\, \P'_flt\" + + shows "corres (dc \ r) (P and Q) (P' and Q') + (Syscall_A.syscall m_flt h_flt m_err h_err m_fin) + (Syscall_H.syscall m_flt' h_flt' m_err' h_err' m_fin')" + apply (simp add: Syscall_A.syscall_def Syscall_H.syscall_def liftE_bindE) + apply (rule corres_split_bind_case_sum) + apply (rule corres_split_bind_case_sum | rule corres | rule wp | simp add: liftE_bindE)+ + done + +lemma syscall_valid': + assumes x: + "\ft. \P_flt ft\ h_flt ft \Q\" + "\err. \P_err err\ h_err err \Q\" + "\rv. \P_no_err rv\ m_fin rv \Q\,\E\" + "\rv. \P_no_flt rv\ m_err rv \P_no_err\, \P_err\" + "\P\ m_flt \P_no_flt\, \P_flt\" + shows "\P\ Syscall_H.syscall m_flt h_flt m_err h_err m_fin \Q\, \E\" + apply (simp add: Syscall_H.syscall_def liftE_bindE + cong: sum.case_cong) + apply (rule hoare_split_bind_case_sumE) + apply (wp x)[1] + apply (rule hoare_split_bind_case_sumE) + apply (wp x|simp)+ + done + + +text \Completing the relationship between abstract/haskell invocations\ + +primrec + inv_relation :: "Invocations_A.invocation \ Invocations_H.invocation \ bool" +where + "inv_relation (Invocations_A.InvokeUntyped i) x = + (\i'. untypinv_relation i i' \ x = InvokeUntyped i')" +| "inv_relation (Invocations_A.InvokeEndpoint w w2 b c) x = + (x = InvokeEndpoint w w2 b c)" +| "inv_relation (Invocations_A.InvokeNotification w w2) x = + (x = InvokeNotification w w2)" +| "inv_relation (Invocations_A.InvokeReply w ptr grant) x = + (x = InvokeReply w (cte_map ptr) grant)" +| "inv_relation (Invocations_A.InvokeTCB i) x = + (\i'. tcbinv_relation i i' \ x = InvokeTCB i')" +| "inv_relation (Invocations_A.InvokeDomain tptr domain) x = + (x = InvokeDomain tptr domain)" +| "inv_relation (Invocations_A.InvokeIRQControl i) x = + (\i'. irq_control_inv_relation i i' \ x = InvokeIRQControl i')" +| "inv_relation (Invocations_A.InvokeIRQHandler i) x = + (\i'. irq_handler_inv_relation i i' \ x = InvokeIRQHandler i')" +| "inv_relation (Invocations_A.InvokeCNode i) x = + (\i'. cnodeinv_relation i i' \ x = InvokeCNode i')" +| "inv_relation (Invocations_A.InvokeArchObject i) x = + (\i'. archinv_relation i i' \ x = InvokeArchObject i')" + +(* In order to assert conditions that must hold for the appropriate + handleInvocation and handle_invocation calls to succeed, we must have + some notion of what a valid invocation is. + This function defines that. + For example, a InvokeEndpoint requires an endpoint at its first + constructor argument. *) + +primrec + valid_invocation' :: "Invocations_H.invocation \ kernel_state \ bool" +where + "valid_invocation' (Invocations_H.InvokeUntyped i) = valid_untyped_inv' i" +| "valid_invocation' (Invocations_H.InvokeEndpoint w w2 b c) = (ep_at' w and ex_nonz_cap_to' w)" +| "valid_invocation' (Invocations_H.InvokeNotification w w2) = (ntfn_at' w and ex_nonz_cap_to' w)" +| "valid_invocation' (Invocations_H.InvokeTCB i) = tcb_inv_wf' i" +| "valid_invocation' (Invocations_H.InvokeDomain thread domain) = + (tcb_at' thread and K (domain \ maxDomain))" +| "valid_invocation' (Invocations_H.InvokeReply thread slot grant) = + (tcb_at' thread and cte_wp_at' (\cte. \gr. cteCap cte = ReplyCap thread False gr) slot)" +| "valid_invocation' (Invocations_H.InvokeIRQControl i) = irq_control_inv_valid' i" +| "valid_invocation' (Invocations_H.InvokeIRQHandler i) = irq_handler_inv_valid' i" +| "valid_invocation' (Invocations_H.InvokeCNode i) = valid_cnode_inv' i" +| "valid_invocation' (Invocations_H.InvokeArchObject i) = valid_arch_inv' i" + + +(* FIXME: move *) +lemma decodeDomainInvocation_corres: + shows "\ list_all2 cap_relation (map fst cs) (map fst cs'); + list_all2 (\p pa. snd pa = cte_map (snd p)) cs cs' \ \ + corres (ser \ ((\x. inv_relation x \ uncurry Invocations_H.invocation.InvokeDomain) \ (\(x,y). Invocations_A.invocation.InvokeDomain x y))) \ \ + (decode_domain_invocation label args cs) + (decodeDomainInvocation label args cs')" + apply (simp add: decode_domain_invocation_def decodeDomainInvocation_def) + apply (rule whenE_throwError_corres_initial) + apply (simp+)[2] + apply (case_tac "args", simp_all) + apply (rule corres_guard_imp) + apply (rule_tac r'="\domain domain'. domain = domain'" and R="\_. \" and R'="\_. \" + in corres_splitEE) apply (rule whenE_throwError_corres) + apply (simp+)[2] + apply (rule corres_returnOkTT) + apply simp + apply (rule whenE_throwError_corres_initial) + apply simp + apply (case_tac "cs") + apply ((case_tac "cs'", ((simp add: null_def)+)[2])+)[2] + apply (subgoal_tac "cap_relation (fst (hd cs)) (fst (hd cs'))") + apply (case_tac "fst (hd cs)") + apply (case_tac "fst (hd cs')", simp+, rule corres_returnOkTT) + apply (simp add: inv_relation_def o_def uncurry_def) + apply (case_tac "fst (hd cs')", fastforce+) + apply (case_tac "cs") + apply (case_tac "cs'", ((simp add: list_all2_map2 list_all2_map1)+)[2]) + apply (case_tac "cs'", ((simp add: list_all2_map2 list_all2_map1)+)[2]) + apply (wp | simp)+ + done + +lemma decodeInvocation_corres: + "\cptr = to_bl cptr'; mi' = message_info_map mi; + slot' = cte_map slot; cap_relation cap cap'; + args = args'; list_all2 cap_relation (map fst excaps) (map fst excaps'); + list_all2 (\p pa. snd pa = cte_map (snd p)) excaps excaps' \ + \ + corres (ser \ inv_relation) + (invs and valid_sched and valid_list + and valid_cap cap and cte_at slot and cte_wp_at ((=) cap) slot + and (\s. \x\set excaps. s \ fst x \ cte_at (snd x) s) + and (\s. length args < 2 ^ word_bits)) + (invs' and valid_cap' cap' and cte_at' slot' + and (\s. \x\set excaps'. s \' fst x \ cte_at' (snd x) s)) + (decode_invocation (mi_label mi) args cptr slot cap excaps) + (RetypeDecls_H.decodeInvocation (mi_label mi) args' cptr' slot' cap' excaps')" + apply (rule corres_gen_asm) + apply (unfold decode_invocation_def decodeInvocation_def) + apply (case_tac cap, simp_all only: cap.simps) + \ \dammit, simp_all messes things up, must handle cases manually\ + \ \Null\ + apply (simp add: isCap_defs) + \ \Untyped\ + apply (simp add: isCap_defs Let_def o_def split del: if_split) + apply (rule corres_guard_imp, rule decodeUntypedInvocation_corres) + apply ((clarsimp simp:cte_wp_at_caps_of_state)+)[3] + \ \(Async)Endpoint\ + apply (simp add: isCap_defs returnOk_def) + apply (simp add: isCap_defs) + apply (clarsimp simp: returnOk_def neq_Nil_conv) + \ \ReplyCap\ + apply (simp add: isCap_defs Let_def returnOk_def) + \ \CNodeCap\ + apply (rename_tac word nat list) + apply (simp add: isCap_defs Let_def CanModify_def + split del: if_split cong: if_cong) + apply (clarsimp simp add: o_def) + apply (rule corres_guard_imp) + apply (rule_tac F="length list \ 64" in corres_gen_asm) + apply (rule decodeCNodeInvocation_corres, simp+) + apply (simp add: valid_cap_def word_bits_def) + apply simp + \ \ThreadCap\ + apply (simp add: isCap_defs Let_def CanModify_def + split del: if_split cong: if_cong) + apply (clarsimp simp add: o_def) + apply (rule corres_guard_imp) + apply (rule decodeTCBInvocation_corres, rule refl, + simp_all add: valid_cap_def valid_cap'_def)[3] + apply (simp add: split_def) + apply (rule list_all2_conj) + apply (simp add: list_all2_map2 list_all2_map1) + apply assumption + \ \DomainCap\ + apply (simp add: isCap_defs) + apply (rule corres_guard_imp) + apply (rule decodeDomainInvocation_corres) + apply (simp+)[4] + \ \IRQControl\ + apply (simp add: isCap_defs o_def) + apply (rule corres_guard_imp, rule decodeIRQControlInvocation_corres, simp+)[1] + \ \IRQHandler\ + apply (simp add: isCap_defs o_def) + apply (rule corres_guard_imp, rule decodeIRQHandlerInvocation_corres, simp+)[1] + \ \Zombie\ + apply (simp add: isCap_defs) + \ \Arch\ + apply (clarsimp simp only: cap_relation.simps) + apply (clarsimp simp add: isCap_defs Let_def o_def) + apply (rule corres_guard_imp [OF arch_decodeInvocation_corres]) + apply (simp_all add: list_all2_map2 list_all2_map1)+ + done + +declare mapME_Nil [simp] + +lemma hinv_corres_assist: + "\ info' = message_info_map info \ + \ corres (fr \ (\(p, cap, extracaps, buffer) (p', capa, extracapsa, buffera). + p' = cte_map p \ cap_relation cap capa \ buffer = buffera \ + list_all2 + (\x y. cap_relation (fst x) (fst y) \ snd y = cte_map (snd x)) + extracaps extracapsa)) + + (invs and tcb_at thread and (\_. valid_message_info info)) + (invs' and tcb_at' thread) + (doE (cap, slot) \ + cap_fault_on_failure cptr' False + (lookup_cap_and_slot thread (to_bl cptr')); + do + buffer \ lookup_ipc_buffer False thread; + doE extracaps \ lookup_extra_caps thread buffer info; + returnOk (slot, cap, extracaps, buffer) + odE + od + odE) + (doE (cap, slot) \ capFaultOnFailure cptr' False (lookupCapAndSlot thread cptr'); + do buffer \ VSpace_H.lookupIPCBuffer False thread; + doE extracaps \ lookupExtraCaps thread buffer info'; + returnOk (slot, cap, extracaps, buffer) + odE + od + odE)" + apply (clarsimp simp add: split_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF corres_cap_fault]) + \ \switched over to argument of corres_cap_fault\ + apply (rule lookupCapAndSlot_corres, simp) + apply (rule corres_split[OF lookupIPCBuffer_corres]) + apply (rule corres_splitEE) + apply (rule lookupExtraCaps_corres; simp) + apply (rule corres_returnOkTT) + apply (wp | simp)+ + apply auto + done + +lemma msg_from_syserr_map[simp]: + "msgFromSyscallError (syscall_error_map err) = msg_from_syscall_error err" + apply (simp add: msgFromSyscallError_def) + apply (case_tac err,clarsimp+) + done + +lemma threadSet_tcbDomain_update_ct_idle_or_in_cur_domain': + "\ct_idle_or_in_cur_domain' and (\s. ksSchedulerAction s \ ResumeCurrentThread) \ + threadSet (tcbDomain_update (\_. domain)) t + \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_imp_lift) + apply (wp | wps)+ + apply (auto simp: obj_at'_def) + done + +lemma threadSet_tcbDomain_update_ct_not_inQ: + "\ct_not_inQ \ threadSet (tcbDomain_update (\_. domain)) t \\_. ct_not_inQ\" + apply (simp add: threadSet_def ct_not_inQ_def) + apply (wp) + apply (rule hoare_convert_imp [OF setObject_nosch]) + apply (rule updateObject_tcb_inv) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + +(* FIXME: move *) +lemma setObject_F_ct_activatable': + "\\tcb f. tcbState (F f tcb) = tcbState tcb \ \ \ct_in_state' activatable' and obj_at' ((=) tcb) t\ + setObject t (F f tcb) + \\_. ct_in_state' activatable'\" + apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def) + apply (rule hoare_pre) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemmas setObject_tcbDomain_update_ct_activatable'[wp] = setObject_F_ct_activatable'[where F="tcbDomain_update", simplified] + +(* FIXME: move *) +lemma setObject_F_st_tcb_at': + "\\tcb f. tcbState (F f tcb) = tcbState tcb \ \ \st_tcb_at' P t' and obj_at' ((=) tcb) t\ + setObject t (F f tcb) + \\_. st_tcb_at' P t'\" + apply (simp add: st_tcb_at'_def) + apply (rule hoare_pre) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemmas setObject_tcbDomain_update_st_tcb_at'[wp] = setObject_F_st_tcb_at'[where F="tcbDomain_update", simplified] + +lemma threadSet_tcbDomain_update_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s \ sch_act_not t s\ + threadSet (tcbDomain_update (\_. domain)) t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: sch_act_wf_cases split: scheduler_action.split) + apply (wp hoare_vcg_conj_lift) + apply (simp add: threadSet_def) + apply wp + apply (wps setObject_sa_unchanged) + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (rename_tac word) + apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ + st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" + in hoare_strengthen_post) + apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp threadSet_tcbDomain_triv')+ + apply (auto simp: obj_at'_def) + done + +lemma setDomain_corres: + "corres dc + (valid_etcbs and valid_sched and tcb_at tptr and pspace_aligned and pspace_distinct) + (invs' and sch_act_simple + and tcb_at' tptr and (\s. new_dom \ maxDomain)) + (set_domain tptr new_dom) + (setDomain tptr new_dom)" + apply (rule corres_gen_asm2) + apply (simp add: set_domain_def setDomain_def thread_set_domain_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split) + apply (rule ethread_set_corres; simp) + apply (clarsimp simp: etcb_relation_def) + apply (rule corres_split[OF isRunnable_corres]) + apply simp + apply (rule corres_split) + apply clarsimp + apply (rule corres_when[OF refl]) + apply (rule tcbSchedEnqueue_corres) + apply (rule corres_when[OF refl]) + apply (rule rescheduleRequired_corres) + apply ((wp hoare_drop_imps hoare_vcg_conj_lift | clarsimp| assumption)+)[5] + apply clarsimp + apply (rule_tac Q="\_. valid_objs' and valid_queues' and valid_queues and + (\s. sch_act_wf (ksSchedulerAction s) s) and tcb_at' tptr" + in hoare_strengthen_post[rotated]) + apply (auto simp: invs'_def valid_state'_def sch_act_wf_weak st_tcb_at'_def o_def)[1] + apply (wp threadSet_valid_objs' threadSet_valid_queues'_no_state + threadSet_valid_queues_no_state + threadSet_pred_tcb_no_state | simp)+ + apply (rule_tac Q = "\r s. invs' s \ (\p. tptr \ set (ksReadyQueues s p)) \ sch_act_simple s + \ tcb_at' tptr s" in hoare_strengthen_post[rotated]) + apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def sch_act_simple_def) + apply (clarsimp simp:valid_tcb'_def) + apply (drule(1) bspec) + apply (clarsimp simp:tcb_cte_cases_def cteSizeBits_def) + apply fastforce + apply (wp hoare_vcg_all_lift Tcb_R.tcbSchedDequeue_not_in_queue)+ + apply clarsimp + apply (frule tcb_at_is_etcb_at) + apply simp+ + apply (auto elim: tcb_at_is_etcb_at valid_objs'_maxDomain valid_objs'_maxPriority pred_tcb'_weakenE + simp: valid_sched_def valid_sched_action_def) + done + + +lemma performInvocation_corres: + "\ inv_relation i i'; call \ block \ \ + corres (dc \ (=)) + (einvs and valid_invocation i + and simple_sched_action + and ct_active + and (\s. (\w w2 b c. i = Invocations_A.InvokeEndpoint w w2 b c) \ st_tcb_at simple (cur_thread s) s)) + (invs' and sch_act_simple and valid_invocation' i' and ct_active') + (perform_invocation block call i) (performInvocation block call i')" + apply (simp add: performInvocation_def) + apply (case_tac i) + apply (clarsimp simp: o_def liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_rel_imp, rule inv_untyped_corres) + apply simp + apply (case_tac x, simp_all)[1] + apply (rule corres_returnOkTT, simp) + apply wp+ + apply simp+ + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF sendIPC_corres]) + apply simp + apply (rule corres_trivial) + apply simp + apply wp+ + apply (clarsimp simp: ct_in_state_def) + apply (fastforce elim: st_tcb_ex_cap) + apply (clarsimp simp: pred_conj_def invs'_def cur_tcb'_def simple_sane_strg + sch_act_simple_def) + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF sendSignal_corres]) + apply (rule corres_trivial) + apply (simp add: returnOk_def) + apply wp+ + apply (simp+)[2] + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_nor[OF doReplyTransfer_corres']) + apply (rule corres_trivial, simp) + apply wp+ + apply (clarsimp simp: tcb_at_invs) + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def) + apply (erule cte_wp_at_weakenE, fastforce simp: is_reply_cap_to_def) + apply (clarsimp simp: tcb_at_invs') + apply (fastforce elim!: cte_wp_at_weakenE') + apply (clarsimp simp: liftME_def) + apply (rule corres_guard_imp) + apply (erule invokeTCB_corres) + apply (simp)+ + \ \domain cap\ + apply (clarsimp simp: invoke_domain_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF setDomain_corres]) + apply (rule corres_trivial, simp) + apply (wp)+ + apply ((clarsimp simp: invs_psp_aligned invs_distinct)+)[2] + \ \CNodes\ + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF invokeCNode_corres]) + apply assumption + apply (rule corres_trivial, simp add: returnOk_def) + apply wp+ + apply (clarsimp+)[2] + apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule corres_guard_imp, rule performIRQControl_corres, simp+) + apply (clarsimp simp: liftME_def[symmetric] o_def dc_def[symmetric]) + apply (rule corres_guard_imp, rule invokeIRQHandler_corres, simp+) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule arch_performInvocation_corres, assumption) + apply (clarsimp+)[2] + done + +lemma sendSignal_tcb_at'[wp]: + "\tcb_at' t\ + sendSignal ntfnptr bdg + \\rv. tcb_at' t\" + apply (simp add: sendSignal_def + cong: list.case_cong Structures_H.notification.case_cong) + apply (wp ntfn'_cases_weak_wp list_cases_weak_wp hoare_drop_imps | wpc | simp)+ + done + +lemmas checkCap_inv_typ_at' + = checkCap_inv[where P="\s. P (typ_at' T p s)" for P T p] + +crunches restart, bindNotification, performTransfer + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemma invokeTCB_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ + invokeTCB tinv + \\rv s. P (typ_at' T p s)\" + apply (cases tinv, + simp_all add: invokeTCB_def + getThreadBufferSlot_def locateSlot_conv + split del: if_split) + apply (simp only: cases_simp if_cancel simp_thms conj_comms pred_conj_def + Let_def split_def getThreadVSpaceRoot + | (simp split del: if_split cong: if_cong) + | (wp mapM_x_wp[where S=UNIV, simplified] + checkCap_inv_typ_at' + case_options_weak_wp)[1] + | wpcw)+ + done + +lemmas invokeTCB_typ_ats[wp] = typ_at_lifts [OF invokeTCB_typ_at'] + +crunch typ_at'[wp]: doReplyTransfer "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps) + +lemmas doReplyTransfer_typ_ats[wp] = typ_at_lifts [OF doReplyTransfer_typ_at'] + +crunch typ_at'[wp]: "performIRQControl" "\s. P (typ_at' T p s)" + +lemmas invokeIRQControl_typ_ats[wp] = + typ_at_lifts [OF performIRQControl_typ_at'] + +crunch typ_at'[wp]: InterruptDecls_H.invokeIRQHandler "\s. P (typ_at' T p s)" + +lemmas invokeIRQHandler_typ_ats[wp] = + typ_at_lifts [OF InterruptDecls_H_invokeIRQHandler_typ_at'] + +crunch tcb_at'[wp]: setDomain "tcb_at' tptr" + (simp: crunch_simps) + +lemma pinv_tcb'[wp]: + "\invs' and st_tcb_at' active' tptr + and valid_invocation' i and ct_active'\ + RetypeDecls_H.performInvocation block call i + \\rv. tcb_at' tptr\" + apply (simp add: performInvocation_def) + apply (case_tac i, simp_all) + apply (wp invokeArch_tcb_at' | clarsimp simp: pred_tcb_at')+ + done + +lemma sts_cte_at[wp]: + "\cte_at' p\ setThreadState st t \\rv. cte_at' p\" + apply (simp add: setThreadState_def) + apply (wp|simp)+ + done + +crunch obj_at_ntfn[wp]: setThreadState "obj_at' (\ntfn. P (ntfnBoundTCB ntfn) (ntfnObj ntfn)) ntfnptr" + (wp: obj_at_setObject2 crunch_wps + simp: crunch_simps updateObject_default_def in_monad) + +lemma sts_mcpriority_tcb_at'[wp]: + "\mcpriority_tcb_at' P t\ + setThreadState st t' + \\_. mcpriority_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sts_valid_inv'[wp]: + "\valid_invocation' i\ setThreadState st t \\rv. valid_invocation' i\" + apply (case_tac i, simp_all add: sts_valid_untyped_inv' sts_valid_arch_inv') + apply (wp | simp)+ + defer + apply (rename_tac cnode_invocation) + apply (case_tac cnode_invocation, simp_all add: cte_wp_at_ctes_of) + apply (wp | simp)+ + apply (rename_tac irqcontrol_invocation) + apply (case_tac irqcontrol_invocation, simp_all add: arch_irq_control_inv_valid'_def) + apply (rename_tac archirq_inv) + apply (case_tac archirq_inv; simp) + apply (wp | simp add: irq_issued'_def)+ + apply (rename_tac irqhandler_invocation) + apply (case_tac irqhandler_invocation, simp_all) + apply (wp hoare_vcg_ex_lift ex_cte_cap_to'_pres | simp)+ + apply (rename_tac tcbinvocation) + apply (case_tac tcbinvocation, + simp_all add: setThreadState_tcb', + auto intro!: hoare_vcg_conj_lift hoare_vcg_disj_lift + simp only: imp_conv_disj simp_thms pred_conj_def, + auto intro!: hoare_vcg_prop + sts_cap_to' sts_cte_cap_to' + setThreadState_typ_ats + split: option.splits)[1] + apply (wp sts_bound_tcb_at' hoare_vcg_all_lift hoare_vcg_const_imp_lift)+ + done + +(* FIXME: move to TCB *) +crunch inv[wp]: decodeDomainInvocation P + (wp: crunch_wps simp: crunch_simps) + +lemma arch_cap_exhausted: + "\\ isFrameCap cap; \ isPageTableCap cap; \ isASIDControlCap cap; \ isASIDPoolCap cap; \ isVCPUCap cap\ + \ undefined \P\" + by (cases cap; simp add: isCap_simps) + +crunch inv[wp]: decodeInvocation P + (simp: crunch_simps wp: crunch_wps arch_cap_exhausted mapME_x_inv_wp getASID_wp) + +(* FIXME: move to TCB *) +lemma dec_dom_inv_wf[wp]: + "\invs' and (\s. \x \ set excaps. s \' fst x)\ + decodeDomainInvocation label args excaps + \\x s. tcb_at' (fst x) s \ snd x \ maxDomain\, -" + apply (simp add:decodeDomainInvocation_def) + apply (wp whenE_throwError_wp | wpc |simp)+ + apply clarsimp + apply (drule_tac x = "hd excaps" in bspec) + apply (rule hd_in_set) + apply (simp add:null_def) + apply (simp add:valid_cap'_def) + apply (simp add:not_le) + apply (simp del: Word.of_nat_unat flip: ucast_nat_def) + apply (rule word_of_nat_le) + apply (simp add: le_maxDomain_eq_less_numDomains) + done + +lemma decode_inv_wf'[wp]: + "\valid_cap' cap and invs' and sch_act_simple + and cte_wp_at' ((=) cap \ cteCap) slot and real_cte_at' slot + and (\s. \r\zobj_refs' cap. ex_nonz_cap_to' r s) + and (\s. \r\cte_refs' cap (irq_node' s). ex_cte_cap_to' r s) + and (\s. \cap \ set excaps. \r\cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s) + and (\s. \cap \ set excaps. \r\zobj_refs' (fst cap). ex_nonz_cap_to' r s) + and (\s. \x \ set excaps. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s) + and (\s. \x \ set excaps. s \' fst x) + and (\s. \x \ set excaps. real_cte_at' (snd x) s) + and (\s. \x \ set excaps. ex_cte_cap_wp_to' isCNodeCap (snd x) s) + and (\s. \x \ set excaps. cte_wp_at' (badge_derived' (fst x) o cteCap) (snd x) s)\ + decodeInvocation label args cap_index slot cap excaps + \valid_invocation'\,-" + apply (case_tac cap, simp_all add: decodeInvocation_def Let_def isCap_defs uncurry_def split_def + split del: if_split + cong: if_cong) + apply (rule hoare_pre, + ((wp decodeTCBInv_wf | simp add: o_def)+)[1], + clarsimp simp: valid_cap'_def cte_wp_at_ctes_of + | (rule exI, rule exI, erule (1) conjI) + | drule_tac t="cteCap cte" in sym, simp)+ + done + +lemma ct_active_imp_simple'[elim!]: + "ct_active' s \ st_tcb_at' simple' (ksCurThread s) s" + by (clarsimp simp: ct_in_state'_def + elim!: pred_tcb'_weakenE) + +lemma ct_running_imp_simple'[elim!]: + "ct_running' s \ st_tcb_at' simple' (ksCurThread s) s" + by (clarsimp simp: ct_in_state'_def + elim!: pred_tcb'_weakenE) + +lemma active_ex_cap'[elim]: + "\ ct_active' s; if_live_then_nonz_cap' s \ + \ ex_nonz_cap_to' (ksCurThread s) s" + by (fastforce simp: ct_in_state'_def elim!: st_tcb_ex_cap'') + +crunch it[wp]: handleFaultReply "\s. P (ksIdleThread s)" + +lemma handleFaultReply_invs[wp]: + "\invs' and tcb_at' t\ handleFaultReply x t label msg \\rv. invs'\" + apply (simp add: handleFaultReply_def) + apply (case_tac x; wpsimp simp: handleArchFaultReply_def) + done + +crunch sch_act_simple[wp]: handleFaultReply sch_act_simple + (wp: crunch_wps) + +lemma transferCaps_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + transferCaps info caps ep rcvr rcvBuf + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" +proof - + have CTEF: "\P p s. \ cte_wp_at' P p s; \cte. P cte \ False \ \ False" + by (erule cte_wp_atE', auto) + show ?thesis + unfolding transferCaps_def + apply (wp | wpc)+ + apply (rule transferCapsToSlots_pres2) + apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) + apply (rule PUC,simp) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ + done +qed + +crunch cte_wp_at' [wp]: setMessageInfo "cte_wp_at' P p" + +lemma copyMRs_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ copyMRs sender sendBuf receiver recvBuf n \\_. cte_wp_at' P ptr\" + unfolding copyMRs_def + apply (wp mapM_wp | wpc | simp add: split_def | rule equalityD1)+ + done + +lemma doNormalTransfer_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows + "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + doNormalTransfer st send_buffer ep b gr rt recv_buffer + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" + unfolding doNormalTransfer_def + apply (wp transferCaps_non_null_cte_wp_at' | simp add:PUC)+ + done + +lemma setMRs_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ setMRs thread buffer messageData \\_. cte_wp_at' P ptr\" + by (simp add: setMRs_def zipWithM_x_mapM split_def, wp crunch_wps) + +lemma doFaultTransfer_cte_wp_at'[wp]: + "\cte_wp_at' P ptr\ + doFaultTransfer badge sender receiver receiverIPCBuffer + \\_. cte_wp_at' P ptr\" + unfolding doFaultTransfer_def + apply (wp | wpc | simp add: split_def)+ + done + +lemma doIPCTransfer_non_null_cte_wp_at': + assumes PUC: "\cap. P cap \ \ isUntypedCap cap" + shows + "\cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\ + doIPCTransfer sender endpoint badge grant receiver + \\_. cte_wp_at' (\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap) ptr\" + unfolding doIPCTransfer_def + apply (wp doNormalTransfer_non_null_cte_wp_at' hoare_drop_imp hoare_allI | wpc | clarsimp simp:PUC)+ + done + +lemma doIPCTransfer_non_null_cte_wp_at2': + fixes P + assumes PNN: "\cte. P (cteCap cte) \ cteCap cte \ capability.NullCap" + and PUC: "\cap. P cap \ \ isUntypedCap cap" + shows "\cte_wp_at' (\cte. P (cteCap cte)) ptr\ + doIPCTransfer sender endpoint badge grant receiver + \\_. cte_wp_at' (\cte. P (cteCap cte)) ptr\" + proof - + have PimpQ: "\P Q ptr s. \ cte_wp_at' (\cte. P (cteCap cte)) ptr s; + \cte. P (cteCap cte) \ Q (cteCap cte) \ + \ cte_wp_at' (\cte. P (cteCap cte) \ Q (cteCap cte)) ptr s" + by (erule cte_wp_at_weakenE', clarsimp) + show ?thesis + apply (rule hoare_chain [OF doIPCTransfer_non_null_cte_wp_at']) + apply (erule PUC) + apply (erule PimpQ) + apply (drule PNN, clarsimp) + apply (erule cte_wp_at_weakenE') + apply (clarsimp) + done + qed + +lemma st_tcb_at'_eqD: + "\ st_tcb_at' (\s. s = st) t s; st_tcb_at' (\s. s = st') t s \ \ st = st'" + by (clarsimp simp add: pred_tcb_at'_def obj_at'_def) + +lemma isReply_awaiting_reply': + "isReply st = awaiting_reply' st" + by (case_tac st, (clarsimp simp add: isReply_def)+) + +lemma doReply_invs[wp]: + "\tcb_at' t and tcb_at' t' and + cte_wp_at' (\cte. \grant. cteCap cte = ReplyCap t False grant) slot and + invs' and sch_act_simple\ + doReplyTransfer t' t slot grant + \\rv. invs'\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (rule hoare_seq_ext [OF _ gts_sp']) + apply (rule hoare_seq_ext [OF _ assert_sp]) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (wp, wpc) + apply (wp) + apply (wp (once) sts_invs_minor'') + apply (simp) + apply (wp (once) sts_st_tcb') + apply (wp)[1] + apply (rule_tac Q="\rv s. invs' s + \ t \ ksIdleThread s + \ st_tcb_at' awaiting_reply' t s" + in hoare_post_imp) + apply (clarsimp) + apply (frule_tac t=t in invs'_not_runnable_not_queued) + apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) + apply (clarsimp | drule(1) obj_at_conj')+ + apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) + apply (drule(1) pred_tcb_at_conj') + apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") + apply (clarsimp) + apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" + in pred_tcb'_weakenE) + apply (case_tac st, clarsimp+) + apply (wp cteDeleteOne_reply_pred_tcb_at)+ + apply (clarsimp) + apply (rule_tac Q="\_. (\s. t \ ksIdleThread s) + and cte_wp_at' (\cte. \grant. cteCap cte = capability.ReplyCap t False grant) slot" + in hoare_strengthen_post [rotated]) + apply (fastforce simp: cte_wp_at'_def) + apply (wp) + apply (rule hoare_strengthen_post [OF doIPCTransfer_non_null_cte_wp_at']) + apply (erule conjE) + apply assumption + apply (erule cte_wp_at_weakenE') + apply (fastforce) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) + apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s + \ st_tcb_at' awaiting_reply' t s + \ t \ ksIdleThread s" + in hoare_post_imp) + apply (clarsimp) + apply (frule_tac t=t in invs'_not_runnable_not_queued) + apply (erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (rule conjI, erule pred_tcb'_weakenE, case_tac st, clarsimp+) + apply (rule conjI, rule impI, erule pred_tcb'_weakenE, case_tac st) + apply (clarsimp | drule(1) obj_at_conj')+ + apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def) + apply (drule(1) pred_tcb_at_conj') + apply (subgoal_tac "st_tcb_at' (\_. False) (ksCurThread s) s") + apply (clarsimp) + apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" + in pred_tcb'_weakenE) + apply (case_tac st, clarsimp+) + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp + | clarsimp simp add: inQ_def)+ + apply (rule_tac Q="\_. invs' and tcb_at' t + and sch_act_simple and st_tcb_at' awaiting_reply' t" + in hoare_strengthen_post [rotated]) + apply (clarsimp) + apply (rule conjI) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: obj_at'_def idle_tcb'_def pred_tcb_at'_def) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: invs'_def valid_state'_def valid_idle'_def) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (rule conjI) + apply (clarsimp simp : invs'_def valid_state'_def valid_idle'_def pred_tcb_at'_def + obj_at'_def idle_tcb'_def) + apply (rule conjI) + apply clarsimp + apply (frule invs'_not_runnable_not_queued) + apply (erule pred_tcb'_weakenE, clarsimp) + apply (frule (1) not_tcbQueued_not_ksQ) + apply simp + apply clarsimp + apply (wp cteDeleteOne_reply_pred_tcb_at hoare_drop_imp hoare_allI)+ + apply (clarsimp simp add: isReply_awaiting_reply' cte_wp_at_ctes_of) + apply (auto dest!: st_tcb_idle'[rotated] simp:isCap_simps) + done + +lemma ct_active_runnable' [simp]: + "ct_active' s \ ct_in_state' runnable' s" + by (fastforce simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + +lemma valid_irq_node_tcbSchedEnqueue[wp]: + "\\s. valid_irq_node' (irq_node' s) s \ tcbSchedEnqueue ptr + \\rv s'. valid_irq_node' (irq_node' s') s'\" + apply (rule hoare_pre) + apply (simp add:valid_irq_node'_def ) + apply (wp unless_wp hoare_vcg_all_lift | wps)+ + apply (simp add:tcbSchedEnqueue_def) + apply (wp unless_wp| simp)+ + apply (simp add:valid_irq_node'_def) + done + +lemma rescheduleRequired_valid_queues_but_ct_domain: + "\\s. Invariants_H.valid_queues s \ valid_objs' s + \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) \ + rescheduleRequired + \\_. Invariants_H.valid_queues\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + done + +lemma rescheduleRequired_valid_queues'_but_ct_domain: + "\\s. valid_queues' s + \ (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s) + \ + rescheduleRequired + \\_. valid_queues'\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp | fastforce simp: valid_queues'_def)+ + done + +lemma tcbSchedEnqueue_valid_action: + "\\s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\ + tcbSchedEnqueue ptr + \\rv s. \x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s\" + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift) + apply clarsimp + done + +abbreviation (input) "all_invs_but_sch_extra \ + \s. valid_pspace' s \ Invariants_H.valid_queues s \ + sym_refs (state_refs_of' s) \ + sym_refs (state_hyp_refs_of' s) \ + if_live_then_nonz_cap' s \ + if_unsafe_then_cap' s \ + valid_idle' s \ + valid_global_refs' s \ + valid_arch_state' s \ + valid_irq_node' (irq_node' s) s \ + valid_irq_handlers' s \ + valid_irq_states' s \ + irqs_masked' s \ + valid_machine_state' s \ + cur_tcb' s \ + untyped_ranges_zero' s \ + valid_queues' s \ pspace_domain_valid s \ + ksCurDomain s \ maxDomain \ valid_dom_schedule' s \ + (\x. ksSchedulerAction s = SwitchToThread x \ st_tcb_at' runnable' x s)" + + +lemma rescheduleRequired_all_invs_but_extra: + "\\s. all_invs_but_sch_extra s\ + rescheduleRequired \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (wp add: rescheduleRequired_ct_not_inQ + rescheduleRequired_sch_act' + rescheduleRequired_valid_queues_but_ct_domain + rescheduleRequired_valid_queues'_but_ct_domain + valid_irq_node_lift valid_irq_handlers_lift'' + irqs_masked_lift cur_tcb_lift) + apply auto + done + +lemma threadSet_all_invs_but_sch_extra: + shows "\ tcb_at' t and (\s. (\p. t \ set (ksReadyQueues s p))) and + all_invs_but_sch_extra and sch_act_simple and + K (ds \ maxDomain) \ + threadSet (tcbDomain_update (\_. ds)) t + \\rv. all_invs_but_sch_extra \" + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + apply (wp threadSet_valid_pspace'T_P[where P = False and Q = \ and Q' = \]) + apply (simp add:tcb_cte_cases_def cteSizeBits_def)+ + apply (wp + threadSet_valid_pspace'T_P + threadSet_state_refs_of'T_P[where f'=id and P'=False and Q=\ and g'=id and Q'=\] + threadSet_state_hyp_refs_of' + threadSet_idle'T + threadSet_global_refsT + threadSet_cur + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_valid_queues'_no_state + threadSet_tcbDomain_update_ct_idle_or_in_cur_domain' + threadSet_valid_queues + threadSet_valid_dom_schedule' + threadSet_iflive'T + threadSet_ifunsafe'T + untyped_ranges_zero_lift + | simp add:tcb_cte_cases_def cteSizeBits_def cteCaps_of_def o_def)+ + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift threadSet_pred_tcb_no_state | simp)+ + apply (clarsimp simp:sch_act_simple_def o_def cteCaps_of_def) + apply (intro conjI) + apply fastforce+ + done + +lemma threadSet_not_curthread_ct_domain: + "\\s. ptr \ ksCurThread s \ ct_idle_or_in_cur_domain' s\ threadSet f ptr \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add:ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + apply (wp hoare_vcg_imp_lift hoare_vcg_disj_lift | wps)+ + apply clarsimp + done + +lemma setDomain_invs': + "\invs' and sch_act_simple and ct_active' and + (tcb_at' ptr and + (\s. sch_act_not ptr s) and + (\y. domain \ maxDomain))\ + setDomain ptr domain \\y. invs'\" + apply (simp add:setDomain_def ) + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra + tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) + apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s + \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp:invs'_def valid_state'_def st_tcb_at'_def[symmetric] valid_pspace'_def) + apply (erule st_tcb_ex_cap'') + apply simp + apply (case_tac st,simp_all)[1] + apply (rule hoare_strengthen_post[OF hoare_vcg_conj_lift]) + apply (rule threadSet_all_invs_but_sch_extra) + prefer 2 + apply clarsimp + apply assumption + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + threadSet_tcbDomain_update_ct_not_inQ | simp)+ + apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s + \ domain \ maxDomain + \ (ptr \ curThread \ ct_not_inQ s \ sch_act_not ptr s)" + in hoare_strengthen_post[rotated]) + apply (clarsimp simp:invs'_def valid_state'_def) + apply (wp hoare_vcg_imp_lift)+ + apply (clarsimp simp:invs'_def valid_pspace'_def valid_state'_def)+ + done + +lemma performInv_invs'[wp]: + "\invs' and sch_act_simple + and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) + and ct_active' and valid_invocation' i\ + RetypeDecls_H.performInvocation block call i \\rv. invs'\" + unfolding performInvocation_def + apply (cases i) + apply ((clarsimp simp: simple_sane_strg sch_act_simple_def + ct_not_ksQ sch_act_sane_def + | wp tcbinv_invs' arch_performInvocation_invs' + setDomain_invs' + | rule conjI | erule active_ex_cap')+) + done + +lemma getSlotCap_to_refs[wp]: + "\\\ getSlotCap ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\" + by (simp add: getSlotCap_def | wp)+ + +lemma lcs_valid' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. s \' fst x\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp|clarsimp simp: split_def)+ + done + +lemma lcs_ex_cap_to' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. \r\cte_refs' (fst x) (irq_node' s). ex_cte_cap_to' r s\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp | simp add: split_def)+ + done + +lemma lcs_ex_nonz_cap_to' [wp]: + "\invs'\ lookupCapAndSlot t xs \\x s. \r\zobj_refs' (fst x). ex_nonz_cap_to' r s\, -" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp | simp add: split_def)+ + done + +lemma lcs_cte_at' [wp]: + "\valid_objs'\ lookupCapAndSlot t xs \\rv s. cte_at' (snd rv) s\,-" + unfolding lookupCapAndSlot_def + apply (rule hoare_pre) + apply (wp|simp)+ + done + +lemma lec_ex_cap_to' [wp]: + "\invs'\ + lookupExtraCaps t xa mi + \\rv s. (\cap \ set rv. \r\cte_refs' (fst cap) (irq_node' s). ex_cte_cap_to' r s)\, -" + unfolding lookupExtraCaps_def + apply (cases "msgExtraCaps mi = 0") + apply simp + apply (wp mapME_set | simp)+ + done + +lemma lec_ex_nonz_cap_to' [wp]: + "\invs'\ + lookupExtraCaps t xa mi + \\rv s. (\cap \ set rv. \r\zobj_refs' (fst cap). ex_nonz_cap_to' r s)\, -" + unfolding lookupExtraCaps_def + apply (cases "msgExtraCaps mi = 0") + apply simp + apply (wp mapME_set | simp)+ + done + +(* FIXME: move *) +lemma getSlotCap_eq [wp]: + "\\\ getSlotCap slot + \\cap. cte_wp_at' ((=) cap \ cteCap) slot\" + by (wpsimp wp: getCTE_wp' simp: getSlotCap_def cte_wp_at_ctes_of) + +lemma lcs_eq [wp]: + "\\\ lookupCapAndSlot t cptr \\rv. cte_wp_at' ((=) (fst rv) o cteCap) (snd rv)\,-" + by (wpsimp simp: lookupCapAndSlot_def) + +lemma lec_eq[wp]: + "\\\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. cte_wp_at' ((=) (fst x) o cteCap) (snd x) s)\,-" + by (wpsimp wp: mapME_set simp: lookupExtraCaps_def) + +lemma lookupExtras_real_ctes[wp]: + "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. real_cte_at' (snd x) s\,-" + apply (simp add: lookupExtraCaps_def Let_def split del: if_split cong: if_cong) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp case_options_weak_wp mapM_wp' lsft_real_cte | simp)+ + done + +lemma lookupExtras_ctes[wp]: + "\valid_objs'\ lookupExtraCaps t xs info \\rv s. \x \ set rv. cte_at' (snd x) s\,-" + apply (rule hoare_post_imp_R) + apply (rule lookupExtras_real_ctes) + apply (simp add: real_cte_at') + done + +lemma lsft_ex_cte_cap_to': + "\invs' and K (\cap. isCNodeCap cap \ P cap)\ + lookupSlotForThread t cref + \\rv s. ex_cte_cap_wp_to' P rv s\,-" + apply (simp add: lookupSlotForThread_def split_def) + apply (wp rab_cte_cap_to' getSlotCap_cap_to2 | simp)+ + done + +lemma lec_caps_to'[wp]: + "\invs' and K (\cap. isCNodeCap cap \ P cap)\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. ex_cte_cap_wp_to' P (snd x) s)\,-" + apply (simp add: lookupExtraCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp lsft_ex_cte_cap_to' mapM_wp' + | simp | wpc)+ + done + +lemma getSlotCap_badge_derived[wp]: + "\\\ getSlotCap p \\cap. cte_wp_at' (badge_derived' cap \ cteCap) p\" + apply (simp add: getSlotCap_def) + apply (wp getCTE_wp) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma lec_derived'[wp]: + "\invs'\ + lookupExtraCaps t buffer info + \\rv s. (\x\set rv. cte_wp_at' (badge_derived' (fst x) o cteCap) (snd x) s)\,-" + apply (simp add: lookupExtraCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp mapME_set) + apply (simp add: lookupCapAndSlot_def split_def) + apply (wp | simp)+ + done + +lemma get_mrs_length_rv[wp]: + "\\s. \n. n \ msg_max_length \ P n\ get_mrs thread buf mi \\rv s. P (length rv)\" + supply if_split[split del] + apply (simp add: get_mrs_def) + apply (wp mapM_length | wpc | simp del: upt.simps)+ + apply (clarsimp simp: msgRegisters_unfold msg_max_length_def) + done + +lemma st_tcb_at_idle_thread': + "\ st_tcb_at' P (ksIdleThread s) s; valid_idle' s \ + \ P IdleThreadState" + by (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + +crunch tcb_at'[wp]: replyFromKernel "tcb_at' t" + +lemma invs_weak_sch_act_wf_strg: + "invs' s \ weak_sch_act_wf (ksSchedulerAction s) s" + by clarsimp + +(* FIXME: move *) +lemma rct_sch_act_simple[simp]: + "ksSchedulerAction s = ResumeCurrentThread \ sch_act_simple s" + by (simp add: sch_act_simple_def) + +(* FIXME: move *) +lemma rct_sch_act_sane[simp]: + "ksSchedulerAction s = ResumeCurrentThread \ sch_act_sane s" + by (simp add: sch_act_sane_def) + +lemma lookupCapAndSlot_real_cte_at'[wp]: + "\valid_objs'\ lookupCapAndSlot thread ptr \\rv. real_cte_at' (snd rv)\, -" +apply (simp add: lookupCapAndSlot_def lookupSlotForThread_def) +apply (wp resolveAddressBits_real_cte_at' | simp add: split_def)+ +done + +lemmas set_thread_state_active_valid_sched = + set_thread_state_runnable_valid_sched[simplified runnable_eq_active] + +crunches reply_from_kernel + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +lemma handleInvocation_corres: + "c \ b \ + corres (dc \ dc) + (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') + (handle_invocation c b) + (handleInvocation c b)" + apply (simp add: handle_invocation_def handleInvocation_def liftE_bindE) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF getMessageInfo_corres]) + apply clarsimp + apply (simp add: liftM_def cap_register_def capRegister_def) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule syscall_corres) + apply (rule hinv_corres_assist, simp) + apply (clarsimp simp add: when_def) + apply (rule handleFault_corres) + apply simp + apply (simp add: split_def) + apply (rule corres_split[OF getMRs_corres]) + apply (rule decodeInvocation_corres, simp_all)[1] + apply (fastforce simp: list_all2_map2 list_all2_map1 elim: list_all2_mono) + apply (fastforce simp: list_all2_map2 list_all2_map1 elim: list_all2_mono) + apply wp[1] + apply (drule sym[OF conjunct1]) + apply simp + apply wp[1] + apply (clarsimp simp: when_def) + apply (rule replyFromKernel_corres) + apply (rule corres_split[OF setThreadState_corres], simp) + apply (rule corres_splitEE[OF performInvocation_corres]) + apply simp+ + apply (rule corres_split[OF getThreadState_corres]) + apply (rename_tac state state') + apply (case_tac state, simp_all)[1] + apply (fold dc_def)[1] + apply (rule corres_split) + apply (rule corres_when [OF refl replyFromKernel_corres]) + apply (rule setThreadState_corres) + apply simp + apply (simp add: when_def) + apply (rule conjI, rule impI) + apply (wp reply_from_kernel_tcb_at) + apply (rule impI, wp+) + apply (wpsimp wp: hoare_drop_imps|strengthen invs_distinct invs_psp_aligned)+ + apply (rule_tac Q="\rv. einvs and simple_sched_action and valid_invocation rve + and (\s. thread = cur_thread s) + and st_tcb_at active thread" + in hoare_post_imp) + apply (clarsimp simp: simple_from_active ct_in_state_def + elim!: st_tcb_weakenE) + apply (wp sts_st_tcb_at' set_thread_state_simple_sched_action + set_thread_state_active_valid_sched) + apply (rule_tac Q="\rv. invs' and valid_invocation' rve' + and (\s. thread = ksCurThread s) + and st_tcb_at' active' thread + and (\s. ksSchedulerAction s = ResumeCurrentThread)" + in hoare_post_imp) + apply (clarsimp simp: ct_in_state'_def) + apply (frule(1) ct_not_ksQ) + apply (clarsimp) + apply (wp setThreadState_nonqueued_state_update + setThreadState_st_tcb setThreadState_rct)[1] + apply (wp lec_caps_to lsft_ex_cte_cap_to + | simp add: split_def liftE_bindE[symmetric] + ct_in_state'_def ball_conj_distrib + | rule hoare_vcg_E_elim)+ + apply (clarsimp simp: tcb_at_invs invs_valid_objs + valid_tcb_state_def ct_in_state_def + simple_from_active invs_mdb + invs_distinct invs_psp_aligned) + apply (clarsimp simp: msg_max_length_def word_bits_def) + apply (erule st_tcb_ex_cap, clarsimp+) + apply fastforce + apply (clarsimp) + apply (frule tcb_at_invs') + apply (clarsimp simp: invs'_def valid_state'_def + ct_in_state'_def ct_not_inQ_def) + apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) + apply (frule pred_tcb'_weakenE [where P=active' and P'=simple'], clarsimp) + apply (frule(1) st_tcb_ex_cap'', fastforce) + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) st_tcb_at_idle_thread') + apply (simp) + done + +lemma ts_Restart_case_helper': + "(case ts of Structures_H.Restart \ A | _ \ B) + = (if ts = Structures_H.Restart then A else B)" + by (cases ts, simp_all) + +lemma gts_imp': + "\Q\ getThreadState t \R\ \ + \\s. st_tcb_at' P t s \ Q s\ getThreadState t \\rv s. P rv \ R rv s\" + apply (simp only: imp_conv_disj) + apply (erule hoare_vcg_disj_lift[rotated]) + apply (rule hoare_strengthen_post [OF gts_sp']) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +crunch st_tcb_at'[wp]: replyFromKernel "st_tcb_at' P t" +crunch cap_to'[wp]: replyFromKernel "ex_nonz_cap_to' p" +crunch it'[wp]: replyFromKernel "\s. P (ksIdleThread s)" +crunch sch_act_simple[wp]: replyFromKernel sch_act_simple + (rule: sch_act_simple_lift) + +lemma rfk_ksQ[wp]: + "\\s. P (ksReadyQueues s p)\ replyFromKernel t x1 \\_ s. P (ksReadyQueues s p)\" + apply (case_tac x1) + apply (simp add: replyFromKernel_def) + apply (wp) + done + +lemma hinv_invs'[wp]: + "\invs' and ct_active' and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleInvocation calling blocking + \\rv. invs'\" + apply (simp add: handleInvocation_def split_def + ts_Restart_case_helper') + apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' + hoare_vcg_all_lift hoare_weak_lift_imp) + apply simp + apply (intro conjI impI) + apply (wp gts_imp' | simp)+ + apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) + apply clarsimp + apply (subgoal_tac "thread \ ksIdleThread s", simp_all)[1] + apply (fastforce elim!: pred_tcb'_weakenE st_tcb_ex_cap'') + apply (clarsimp simp: valid_idle'_def valid_state'_def + invs'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + apply wp+ + apply (rule_tac Q="\rv'. invs' and valid_invocation' rv + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and (\s. ksCurThread s = thread) + and st_tcb_at' active' thread" + in hoare_post_imp) + apply (clarsimp simp: ct_in_state'_def) + apply (frule(1) ct_not_ksQ) + apply (clarsimp) + apply (wp sts_invs_minor' setThreadState_st_tcb setThreadState_rct | simp)+ + apply (clarsimp) + apply (frule(1) ct_not_ksQ) + apply (fastforce simp add: tcb_at_invs' ct_in_state'_def + simple_sane_strg + sch_act_simple_def + elim!: pred_tcb'_weakenE st_tcb_ex_cap'' + dest: st_tcb_at_idle_thread')+ + done + +crunch typ_at'[wp]: handleFault "\s. P (typ_at' T p s)" + +lemmas handleFault_typ_ats[wp] = typ_at_lifts [OF handleFault_typ_at'] + +lemma handleSend_corres: + "corres (dc \ dc) + (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_active') + (handle_send blocking) (handleSend blocking)" + by (simp add: handle_send_def handleSend_def handleInvocation_corres) + +lemma hs_invs'[wp]: + "\invs' and ct_active' and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleSend blocking \\r. invs'\" + apply (rule validE_valid) + apply (simp add: handleSend_def) + apply (wp | simp)+ + done + +lemma getThreadCallerSlot_map: + "getThreadCallerSlot t = return (cte_map (t, tcb_cnode_index 3))" + by (simp add: getThreadCallerSlot_def locateSlot_conv + cte_map_def tcb_cnode_index_def tcbCallerSlot_def + cte_level_bits_def) + +lemma tcb_at_cte_at_map: + "\ tcb_at' t s; offs \ dom tcb_cap_cases \ \ cte_at' (cte_map (t, offs)) s" + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (drule tcb_cases_related) + apply (auto elim: cte_wp_at_tcbI') + done + +lemma deleteCallerCap_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (delete_caller_cap t) + (deleteCallerCap t)" + apply (simp add: delete_caller_cap_def deleteCallerCap_def + getThreadCallerSlot_map) + apply (rule corres_guard_imp) + apply (rule_tac P'="cte_at' (cte_map (t, tcb_cnode_index 3))" in corres_symb_exec_r_conj) + apply (rule_tac F="isReplyCap rv \ rv = capability.NullCap" + and P="cte_wp_at (\cap. is_reply_cap cap \ cap = cap.NullCap) (t, tcb_cnode_index 3) + and einvs" + and P'="invs' and cte_wp_at' (\cte. cteCap cte = rv) + (cte_map (t, tcb_cnode_index 3))" in corres_req) + apply (clarsimp simp: cte_wp_at_caps_of_state state_relation_def) + apply (drule caps_of_state_cteD) + apply (drule(1) pspace_relation_cte_wp_at, clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of is_reply_cap_relation cap_relation_NullCapI) + apply simp + apply (rule corres_guard_imp, rule cap_delete_one_corres) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps) + apply (auto simp: can_fast_finalise_def)[1] + apply (clarsimp simp: cte_wp_at_ctes_of) + apply ((wp getCTE_wp')+ | simp add: getSlotCap_def)+ + apply clarsimp + apply (frule tcb_at_cte_at[where ref="tcb_cnode_index 3"]) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (frule tcb_cap_valid_caps_of_stateD, clarsimp) + apply (drule(1) tcb_cnode_index_3_reply_or_null) + apply (auto simp: can_fast_finalise_def is_cap_simps + intro: tcb_at_cte_at_map tcb_at_cte_at)[1] + apply clarsimp + apply (frule_tac offs="tcb_cnode_index 3" in tcb_at_cte_at_map) + apply (simp add: tcb_cap_cases_def) + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma deleteCallerCap_invs[wp]: + "\invs'\ deleteCallerCap t \\rv. invs'\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_invs hoare_drop_imps) + done + +lemma deleteCallerCap_simple[wp]: + "\st_tcb_at' simple' t\ deleteCallerCap t' \\rv. st_tcb_at' simple' t\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_st_tcb_at hoare_drop_imps | simp)+ + done + +lemma cteDeleteOne_reply_cap_to''[wp]: + "\ex_nonz_cap_to' p and + cte_wp_at' (\c. isReplyCap (cteCap c) \ isNullCap (cteCap c)) slot\ + cteDeleteOne slot + \\rv. ex_nonz_cap_to' p\" + apply (simp add: cteDeleteOne_def ex_nonz_cap_to'_def unless_def) + apply (rule hoare_seq_ext [OF _ getCTE_sp]) + apply (rule hoare_assume_pre) + apply (subgoal_tac "isReplyCap (cteCap cte) \ isNullCap (cteCap cte)") + apply (wp hoare_vcg_ex_lift emptySlot_cte_wp_cap_other isFinalCapability_inv + | clarsimp simp: finaliseCap_def isCap_simps | simp + | wp (once) hoare_drop_imps)+ + apply (fastforce simp: cte_wp_at_ctes_of) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +lemma deleteCallerCap_nonz_cap: + "\ex_nonz_cap_to' p and tcb_at' t and valid_objs'\ + deleteCallerCap t + \\rv. ex_nonz_cap_to' p\" + apply (simp add: deleteCallerCap_def getSlotCap_def getThreadCallerSlot_map + locateSlot_conv ) + apply (rule hoare_pre) + apply (wp cteDeleteOne_reply_cap_to'' getCTE_wp') + apply clarsimp + apply (frule_tac offs="tcb_cnode_index 3" in tcb_at_cte_at_map) + apply (clarsimp simp: tcb_cap_cases_def) + apply (auto simp: ex_nonz_cap_to'_def isCap_simps cte_wp_at_ctes_of) + done + +crunch sch_act_sane[wp]: cteDeleteOne sch_act_sane + (wp: crunch_wps loadObject_default_inv getObject_inv + simp: crunch_simps unless_def + rule: sch_act_sane_lift) + +crunch sch_act_sane[wp]: deleteCallerCap sch_act_sane + (wp: crunch_wps) + +lemma delete_caller_cap_valid_ep_cap: + "\valid_cap (cap.EndpointCap r a b)\ delete_caller_cap thread \\rv. valid_cap (cap.EndpointCap r a b)\" + apply (clarsimp simp: delete_caller_cap_def cap_delete_one_def valid_cap_def) + apply (rule hoare_pre) + by (wp get_cap_wp fast_finalise_typ_at abs_typ_at_lifts(1) + | simp add: unless_def valid_cap_def)+ + +lemma handleRecv_isBlocking_corres': + "corres dc (einvs and ct_in_state active + and (\s. ex_nonz_cap_to (cur_thread s) s)) + (invs' and ct_in_state' simple' + and sch_act_sane + and (\s. \p. ksCurThread s \ set (ksReadyQueues s p)) + and (\s. ex_nonz_cap_to' (ksCurThread s) s)) + (handle_recv isBlocking) (handleRecv isBlocking)" + (is "corres dc (?pre1) (?pre2) (handle_recv _) (handleRecv _)") + apply (simp add: handle_recv_def handleRecv_def liftM_bind Let_def + cap_register_def capRegister_def + cong: if_cong cap.case_cong capability.case_cong bool.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_eqr[OF asUser_getRegister_corres]) + apply (rule corres_split_catch) + apply (rule corres_cap_fault) + apply (rule corres_splitEE[OF lookupCap_corres]) + apply (rule_tac P="?pre1 and tcb_at thread + and (\s. (cur_thread s) = thread ) + and valid_cap rv" + and P'="?pre2 and tcb_at' thread and valid_cap' rv'" in corres_inst) + apply (clarsimp split: cap_relation_split_asm arch_cap.split_asm split del: if_split + simp: lookup_failure_map_def whenE_def) + apply (rule corres_guard_imp) + apply (rename_tac rights) + apply (case_tac "AllowRead \ rights"; simp) + apply (rule corres_split_nor[OF deleteCallerCap_corres]) + apply (rule receiveIPC_corres) + apply (clarsimp)+ + apply (wp delete_caller_cap_nonz_cap delete_caller_cap_valid_ep_cap)+ + apply (clarsimp)+ + apply (clarsimp simp: lookup_failure_map_def)+ + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply (rule corres_guard_imp) + apply (rename_tac rights) + apply (case_tac "AllowRead \ rights"; simp) + apply (rule_tac r'=ntfn_relation in corres_splitEE) + apply clarsimp + apply (rule getNotification_corres) + apply (rule corres_if) + apply (clarsimp simp: ntfn_relation_def) + apply (clarsimp, rule receiveSignal_corres) + prefer 3 + apply (rule corres_trivial) + apply (clarsimp simp: lookup_failure_map_def)+ + apply (wp get_simple_ko_wp getNotification_wp | wpcw | simp)+ + apply (clarsimp simp: lookup_failure_map_def) + apply (clarsimp simp: valid_cap_def ct_in_state_def) + apply (clarsimp simp: valid_cap'_def capAligned_def) + apply wp+ + apply (rule handleFault_corres) + apply simp + apply (wp get_simple_ko_wp | wpcw | simp)+ + apply (rule hoare_vcg_E_elim) + apply (simp add: lookup_cap_def lookup_slot_for_thread_def) + apply wp + apply (simp add: split_def) + apply (wp resolve_address_bits_valid_fault2)+ + apply (wp getNotification_wp | wpcw | simp add: valid_fault_def whenE_def split del: if_split)+ + apply (clarsimp simp add: ct_in_state_def ct_in_state'_def conj_comms invs_valid_tcb_ctable + invs_valid_objs tcb_at_invs invs_psp_aligned invs_cur) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + ct_in_state'_def sch_act_sane_not) + done + +lemma handleRecv_isBlocking_corres: + "corres dc (einvs and ct_active) + (invs' and ct_active' and sch_act_sane and + (\s. \p. ksCurThread s \ set (ksReadyQueues s p))) + (handle_recv isBlocking) (handleRecv isBlocking)" + apply (rule corres_guard_imp) + apply (rule handleRecv_isBlocking_corres') + apply (clarsimp simp: ct_in_state_def) + apply (fastforce elim!: st_tcb_weakenE st_tcb_ex_cap) + apply (clarsimp simp: ct_in_state'_def invs'_def valid_state'_def) + apply (frule(1) st_tcb_ex_cap'') + apply (auto elim: pred_tcb'_weakenE) + done + +lemma lookupCap_refs[wp]: + "\invs'\ lookupCap t ref \\rv s. \r\zobj_refs' rv. ex_nonz_cap_to' r s\,-" + by (simp add: lookupCap_def split_def | wp | simp add: o_def)+ + +lemma deleteCallerCap_ksQ_ct': + "\invs' and ct_in_state' simple' and sch_act_sane and + (\s. ksCurThread s \ set (ksReadyQueues s p) \ thread = ksCurThread s)\ + deleteCallerCap thread + \\rv s. thread \ set (ksReadyQueues s p)\" + apply (rule_tac Q="\rv s. thread = ksCurThread s \ ksCurThread s \ set (ksReadyQueues s p)" + in hoare_strengthen_post) + apply (wp deleteCallerCap_ct_not_ksQ) + apply auto + done + +lemma hw_invs'[wp]: + "\invs' and ct_in_state' simple' and sch_act_sane + and (\s. ex_nonz_cap_to' (ksCurThread s) s) + and (\s. ksCurThread s \ ksIdleThread s) + and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + handleRecv isBlocking \\r. invs'\" + apply (simp add: handleRecv_def cong: if_cong) + apply (rule hoare_pre) + apply ((wp getNotification_wp | wpc | simp)+)[1] + apply (clarsimp simp: ct_in_state'_def) + apply ((wp deleteCallerCap_nonz_cap hoare_vcg_all_lift + deleteCallerCap_ksQ_ct' + hoare_lift_Pf2[OF deleteCallerCap_simple + deleteCallerCap_ct'] + | wpc | simp)+)[1] + apply simp + apply (wp deleteCallerCap_nonz_cap hoare_vcg_all_lift + deleteCallerCap_ksQ_ct' + hoare_lift_Pf2[OF deleteCallerCap_simple + deleteCallerCap_ct'] + | wpc | simp add: ct_in_state'_def whenE_def split del: if_split)+ + apply (rule validE_validE_R) + apply (rule_tac Q="\rv s. invs' s + \ sch_act_sane s + \ (\p. ksCurThread s \ set (ksReadyQueues s p)) + \ thread = ksCurThread s + \ ct_in_state' simple' s + \ ex_nonz_cap_to' thread s + \ thread \ ksIdleThread s + \ (\x \ zobj_refs' rv. ex_nonz_cap_to' x s)" + and E="\_ _. True" + in hoare_post_impErr[rotated]) + apply (clarsimp simp: isCap_simps ct_in_state'_def pred_tcb_at' invs_valid_objs' + sch_act_sane_not obj_at'_def pred_tcb_at'_def) + apply (assumption) + apply (wp)+ + apply (clarsimp) + apply (auto elim: st_tcb_ex_cap'' pred_tcb'_weakenE + dest!: st_tcb_at_idle_thread' + simp: ct_in_state'_def sch_act_sane_def) + done + +lemma setSchedulerAction_obj_at'[wp]: + "\obj_at' P p\ setSchedulerAction sa \\rv. obj_at' P p\" + unfolding setSchedulerAction_def + by (wp, clarsimp elim!: obj_at'_pspaceI) + +lemma handleYield_corres: + "corres dc einvs (invs' and ct_active' and (\s. ksSchedulerAction s = ResumeCurrentThread)) handle_yield handleYield" + apply (clarsimp simp: handle_yield_def handleYield_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply simp + apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (rule rescheduleRequired_corres) + apply (wp weak_sch_act_wf_lift_linear tcbSchedDequeue_valid_queues | simp add: )+ + apply (simp add: invs_def valid_sched_def valid_sched_action_def cur_tcb_def + tcb_at_is_etcb_at valid_state_def valid_pspace_def) + apply clarsimp + apply (frule ct_active_runnable') + apply (clarsimp simp: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def) + apply (erule(1) valid_objs_valid_tcbE[OF valid_pspace_valid_objs']) + apply (simp add:valid_tcb'_def) + done + +lemma hy_invs': + "\invs' and ct_active'\ handleYield \\r. invs' and ct_active'\" + apply (simp add: handleYield_def) + apply (wp ct_in_state_thread_state_lift' + rescheduleRequired_all_invs_but_ct_not_inQ + tcbSchedAppend_invs_but_ct_not_inQ' | simp)+ + apply (clarsimp simp add: invs'_def valid_state'_def ct_in_state'_def sch_act_wf_weak cur_tcb'_def + valid_pspace_valid_objs' valid_objs'_maxDomain tcb_in_cur_domain'_def + ) + apply (simp add:ct_active_runnable'[unfolded ct_in_state'_def]) + done + + +lemma dmo_addressTranslateS1_invs'[wp]: + "doMachineOp (addressTranslateS1 addr) \ invs' \" + unfolding addressTranslateS1_def + by (wpsimp wp: dmo_machine_op_lift_invs' dmo'_gets_wp simp: doMachineOp_bind) + +lemma curVCPUActive_invs'[wp]: + "curVCPUActive \invs'\" + unfolding curVCPUActive_def + by wpsimp + +lemma getHSR_invs'[wp]: + "doMachineOp getHSR \invs'\" + by (simp add: getHSR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma getDFSR_invs'[wp]: + "doMachineOp getDFSR \invs'\" + by (simp add: getDFSR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma getFAR_invs'[wp]: + "doMachineOp getFAR \invs'\" + by (simp add: getFAR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma getIFSR_invs'[wp]: + "doMachineOp getIFSR \invs'\" + by (simp add: getIFSR_def doMachineOp_def split_def select_f_returns | wp)+ + +lemma hv_invs'[wp]: "\invs' and tcb_at' t'\ handleVMFault t' vptr \\r. invs'\" + apply (simp add: AARCH64_H.handleVMFault_def + cong: vmfault_type.case_cong) + apply (rule hoare_pre) + apply (wp | wpcw | simp)+ + done + +crunch nosch[wp]: handleVMFault "\s. P (ksSchedulerAction s)" + +lemma active_from_running': + "ct_running' s' \ ct_active' s'" + by (clarsimp elim!: pred_tcb'_weakenE + simp: ct_in_state'_def)+ + +lemma simple_from_running': + "ct_running' s' \ ct_in_state' simple' s'" + by (clarsimp elim!: pred_tcb'_weakenE + simp: ct_in_state'_def)+ + +lemma handleReply_corres: + "corres dc (einvs and ct_running) (invs' and ct_running') + handle_reply handleReply" + apply (simp add: handle_reply_def handleReply_def + getThreadCallerSlot_map + getSlotCap_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split[OF get_cap_corres]) + apply (rule_tac P="einvs and cte_wp_at ((=) caller_cap) (thread, tcb_cnode_index 3) + and K (is_reply_cap caller_cap \ caller_cap = cap.NullCap) + and tcb_at thread and st_tcb_at active thread + and valid_cap caller_cap" + and P'="invs' and tcb_at' thread + and valid_cap' (cteCap rv') + and cte_at' (cte_map (thread, tcb_cnode_index 3))" + in corres_inst) + apply (auto split: cap_relation_split_asm arch_cap.split_asm bool.split + intro!: corres_guard_imp [OF deleteCallerCap_corres] + corres_guard_imp [OF doReplyTransfer_corres] + corres_fail + simp: valid_cap_def valid_cap'_def is_cap_simps assert_def is_reply_cap_to_def)[1] + apply (fastforce simp: invs_def valid_state_def + cte_wp_at_caps_of_state st_tcb_def2 + dest: valid_reply_caps_of_stateD) + apply (wp get_cap_cte_wp_at get_cap_wp | simp add: cte_wp_at_eq_simp)+ + apply (intro conjI impI allI, + (fastforce simp: invs_def valid_state_def + intro: tcb_at_cte_at)+) + apply (clarsimp, frule tcb_at_invs) + apply (fastforce dest: tcb_caller_cap simp: cte_wp_at_def) + apply clarsimp + apply (clarsimp simp: ct_in_state_def elim!: st_tcb_weakenE) + apply (fastforce intro: cte_wp_valid_cap elim: cte_wp_at_weakenE) + apply (fastforce intro: tcb_at_cte_at_map) + done + +lemma hr_invs'[wp]: + "\invs' and sch_act_simple\ handleReply \\rv. invs'\" + apply (simp add: handleReply_def getSlotCap_def + getThreadCallerSlot_map getCurThread_def) + apply (wp getCTE_wp | wpc | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule ctes_of_valid', clarsimp+) + apply (simp add: valid_cap'_def) + apply (simp add: invs'_def cur_tcb'_def) + done + +crunch ksCurThread[wp]: handleReply "\s. P (ksCurThread s)" + (wp: crunch_wps transferCapsToSlots_pres1 setObject_ep_ct + setObject_ntfn_ct + simp: unless_def crunch_simps + ignore: transferCapsToSlots) + +lemmas cteDeleteOne_st_tcb_at_simple'[wp] = + cteDeleteOne_st_tcb_at[where P=simple', simplified] + +crunch st_tcb_at_simple'[wp]: handleReply "st_tcb_at' simple' t'" + (wp: hoare_post_taut crunch_wps sts_st_tcb_at'_cases + threadSet_pred_tcb_no_state + ignore: setThreadState) + +lemmas handleReply_ct_in_state_simple[wp] = + ct_in_state_thread_state_lift' [OF handleReply_ksCurThread + handleReply_st_tcb_at_simple'] + + +(* FIXME: move *) +lemma doReplyTransfer_st_tcb_at_active: + "\st_tcb_at' active' t and tcb_at' t' and K (t \ t') and + cte_wp_at' (\cte. cteCap cte = (capability.ReplyCap t' False g)) sl\ + doReplyTransfer t t' sl g + \\rv. st_tcb_at' active' t\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (wp setThreadState_st_tcb sts_pred_tcb_neq' cteDeleteOne_reply_pred_tcb_at + hoare_drop_imps threadSet_pred_tcb_no_state hoare_exI + doIPCTransfer_non_null_cte_wp_at2' | wpc | clarsimp simp:isCap_simps)+ + apply (fastforce) + done + +lemma hr_ct_active'[wp]: + "\invs' and ct_active'\ handleReply \\rv. ct_active'\" + apply (simp add: handleReply_def getSlotCap_def getCurThread_def + getThreadCallerSlot_def locateSlot_conv) + apply (rule hoare_seq_ext) + apply (rule ct_in_state'_decomp) + apply ((wp hoare_drop_imps | wpc | simp)+)[1] + apply (subst haskell_assert_def) + apply (wp hoare_vcg_all_lift getCTE_wp doReplyTransfer_st_tcb_at_active + | wpc | simp)+ + apply (fastforce simp: ct_in_state'_def cte_wp_at_ctes_of valid_cap'_def + dest: ctes_of_valid') + done + +lemma handleCall_corres: + "corres (dc \ dc) (einvs and (\s. scheduler_action s = resume_cur_thread) and ct_active) + (invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + ct_active') + handle_call handleCall" + by (simp add: handle_call_def handleCall_def liftE_bindE handleInvocation_corres) + +lemma hc_invs'[wp]: + "\invs' and + (\s. ksSchedulerAction s = ResumeCurrentThread) and + ct_active'\ + handleCall + \\rv. invs'\" + apply (simp add: handleCall_def) + apply (wp) + apply (clarsimp) + done + +lemma cteInsert_sane[wp]: + "\sch_act_sane\ cteInsert newCap srcSlot destSlot \\_. sch_act_sane\" + apply (simp add: sch_act_sane_def) + apply (wp hoare_vcg_all_lift + hoare_convert_imp [OF cteInsert_nosch cteInsert_ct]) + done + +crunch sane [wp]: setExtraBadge sch_act_sane + +crunch sane [wp]: transferCaps "sch_act_sane" + (wp: transferCapsToSlots_pres1 crunch_wps + simp: crunch_simps + ignore: transferCapsToSlots) + +lemma possibleSwitchTo_sane: + "\\s. sch_act_sane s \ t \ ksCurThread s\ possibleSwitchTo t \\_. sch_act_sane\" + apply (simp add: possibleSwitchTo_def setSchedulerAction_def curDomain_def + cong: if_cong) + apply (wp hoare_drop_imps | wpc)+ + apply (simp add: sch_act_sane_def) + done + +crunch sane [wp]: handleFaultReply sch_act_sane + ( wp: threadGet_inv hoare_drop_imps crunch_wps + simp: crunch_simps + ignore: setSchedulerAction) + +crunch sane [wp]: doIPCTransfer sch_act_sane + ( wp: threadGet_inv hoare_drop_imps crunch_wps + simp: crunch_simps + ignore: setSchedulerAction) + +lemma doReplyTransfer_sane: + "\\s. sch_act_sane s \ t' \ ksCurThread s\ + doReplyTransfer t t' callerSlot g \\rv. sch_act_sane\" + apply (simp add: doReplyTransfer_def liftM_def) + apply (wp possibleSwitchTo_sane hoare_drop_imps hoare_vcg_all_lift|wpc)+ + apply simp + done + +lemma handleReply_sane: + "\sch_act_sane\ handleReply \\rv. sch_act_sane\" + apply (simp add: handleReply_def getSlotCap_def getThreadCallerSlot_def locateSlot_conv) + apply (rule hoare_pre) + apply (wp haskell_assert_wp doReplyTransfer_sane getCTE_wp'| wpc)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma handleReply_nonz_cap_to_ct: + "\ct_active' and invs' and sch_act_simple\ + handleReply + \\rv s. ex_nonz_cap_to' (ksCurThread s) s\" + apply (rule_tac Q="\rv. ct_active' and invs'" + in hoare_post_imp) + apply (auto simp: ct_in_state'_def elim: st_tcb_ex_cap'')[1] + apply (wp | simp)+ + done + +crunch ksQ[wp]: handleFaultReply "\s. P (ksReadyQueues s p)" + +lemma doReplyTransfer_ct_not_ksQ: + "\ invs' and sch_act_simple + and tcb_at' thread and tcb_at' word + and ct_in_state' simple' + and (\s. ksCurThread s \ word) + and (\s. \p. ksCurThread s \ set(ksReadyQueues s p))\ + doReplyTransfer thread word callerSlot g + \\rv s. \p. ksCurThread s \ set(ksReadyQueues s p)\" +proof - + have astct: "\t p. + \(\s. ksCurThread s \ set(ksReadyQueues s p) \ sch_act_sane s) + and (\s. ksCurThread s \ t)\ + possibleSwitchTo t \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" + apply (rule hoare_weaken_pre) + apply (wps possibleSwitchTo_ct') + apply (wp possibleSwitchTo_ksQ') + apply (clarsimp simp: sch_act_sane_def) + done + have stsct: "\t st p. + \(\s. ksCurThread s \ set(ksReadyQueues s p)) and sch_act_simple\ + setThreadState st t + \\rv s. ksCurThread s \ set(ksReadyQueues s p)\" + apply (rule hoare_weaken_pre) + apply (wps setThreadState_ct') + apply (wp hoare_vcg_all_lift sts_ksQ) + apply (clarsimp) + done + show ?thesis + apply (simp add: doReplyTransfer_def) + apply (wp, wpc) + apply (wp astct stsct hoare_vcg_all_lift + cteDeleteOne_ct_not_ksQ hoare_drop_imp + hoare_lift_Pf2 [OF cteDeleteOne_sch_act_not cteDeleteOne_ct'] + hoare_lift_Pf2 [OF doIPCTransfer_pred_tcb_at' doIPCTransfer_ct'] + hoare_lift_Pf2 [OF doIPCTransfer_ksQ doIPCTransfer_ct'] + hoare_lift_Pf2 [OF threadSet_ksQ threadSet_ct] + hoare_lift_Pf2 [OF handleFaultReply_ksQ handleFaultReply_ct'] + | simp add: ct_in_state'_def)+ + apply (fastforce simp: sch_act_simple_def sch_act_sane_def ct_in_state'_def)+ + done +qed + +lemma handleReply_ct_not_ksQ: + "\invs' and sch_act_simple + and ct_in_state' simple' + and (\s. \p. ksCurThread s \ set (ksReadyQueues s p))\ + handleReply + \\rv s. \p. ksCurThread s \ set (ksReadyQueues s p)\" + apply (simp add: handleReply_def del: split_paired_All) + apply (subst haskell_assert_def) + apply (wp | wpc)+ + apply (wp doReplyTransfer_ct_not_ksQ getThreadCallerSlot_inv)+ + apply (rule_tac Q="\cap. + (\s. \p. ksCurThread s \ set(ksReadyQueues s p)) + and invs' + and sch_act_simple + and (\s. thread = ksCurThread s) + and tcb_at' thread + and ct_in_state' simple' + and cte_wp_at' (\c. cteCap c = cap) callerSlot" + in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def + cte_wp_at_ctes_of valid_cap'_def + dest!: ctes_of_valid') + apply (wp getSlotCap_cte_wp_at getThreadCallerSlot_inv)+ + apply (clarsimp) + done + +crunch valid_etcbs[wp]: handle_recv "valid_etcbs" + (wp: crunch_wps simp: crunch_simps) + +lemma handleReply_handleRecv_corres: + "corres dc (einvs and ct_running) + (invs' and ct_running' and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (do x \ handle_reply; handle_recv True od) + (do x \ handleReply; handleRecv True od)" + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF handleReply_corres]) + apply (rule handleRecv_isBlocking_corres') + apply (wp handle_reply_nonz_cap_to_ct handleReply_sane + handleReply_nonz_cap_to_ct handleReply_ct_not_ksQ handle_reply_valid_sched)+ + apply (fastforce simp: ct_in_state_def ct_in_state'_def simple_sane_strg + elim!: st_tcb_weakenE st_tcb_ex_cap') + apply (clarsimp simp: ct_in_state'_def) + apply (frule(1) ct_not_ksQ) + apply (fastforce elim: pred_tcb'_weakenE) + done + +lemma handleHypervisorFault_corres: + "corres dc (einvs and st_tcb_at active thread and ex_nonz_cap_to thread) + (invs' and sch_act_not thread + and (\s. \p. thread \ set(ksReadyQueues s p)) + and st_tcb_at' simple' thread and ex_nonz_cap_to' thread) + (handle_hypervisor_fault thread fault) (handleHypervisorFault thread fault)" + apply (cases fault; clarsimp simp add: handleHypervisorFault_def returnOk_def2) + apply (corresK corres: handleFault_corres) + apply (clarsimp simp: valid_fault_def) + done + + +lemma dmo_machine_rest_lift: + "(\s m. P (s\ksMachineState := ksMachineState s\machine_state_rest := m\\) = P s) \ + \P\ doMachineOp (machine_op_lift f') \\rv. P\" + apply (wpsimp simp: doMachineOp_def machine_op_lift_def machine_rest_lift_def in_monad) + apply (clarsimp simp: select_f_def ignore_failure_def split: if_split_asm) + done + +lemma hvmf_invs_lift: + "(\s m. P (s\ksMachineState := ksMachineState s\machine_state_rest := m\\) = P s) \ + \P\ handleVMFault t flt \\_ _. True\, \\_. P\" + unfolding handleVMFault_def + by (wpsimp wp: dmo_machine_rest_lift asUser_inv dmo'_gets_wp + simp: getHSR_def addressTranslateS1_def getDFSR_def getFAR_def getIFSR_def + curVCPUActive_def doMachineOp_bind getRestartPC_def getRegister_def) + +lemma hvmf_invs_etc: + "\invs' and sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p)) and st_tcb_at' simple' t and + ex_nonz_cap_to' t\ + handleVMFault t f + \\_ _. True\, + \\_. invs' and sch_act_not t and (\s. \p. t \ set (ksReadyQueues s p)) and + st_tcb_at' simple' t and ex_nonz_cap_to' t\" + apply (rule hvmf_invs_lift) + apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def) + done + +lemma handleEvent_corres: + "corres (dc \ dc) (einvs and (\s. event \ Interrupt \ ct_running s) and + (\s. scheduler_action s = resume_cur_thread)) + (invs' and (\s. event \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)) + (handle_event event) (handleEvent event)" +proof - + have hw: + "\isBlocking. corres dc (einvs and ct_running and (\s. scheduler_action s = resume_cur_thread)) + (invs' and ct_running' + and (\s. ksSchedulerAction s = ResumeCurrentThread)) + (handle_recv isBlocking) (handleRecv isBlocking)" + apply (rule corres_guard_imp [OF handleRecv_isBlocking_corres]) + apply (clarsimp simp: ct_in_state_def ct_in_state'_def + elim!: st_tcb_weakenE pred_tcb'_weakenE + dest!: ct_not_ksQ)+ + done + show ?thesis + apply (case_tac event) + apply (simp_all add: handleEvent_def) + + apply (rename_tac syscall) + apply (case_tac syscall) + apply (auto intro: corres_guard_imp[OF handleSend_corres] + corres_guard_imp[OF hw] + corres_guard_imp [OF handleReply_corres] + corres_guard_imp[OF handleReply_handleRecv_corres] + corres_guard_imp[OF handleCall_corres] + corres_guard_imp[OF handleYield_corres] + active_from_running active_from_running' + simp: simple_sane_strg)[8] + apply (rule corres_underlying_split) + apply (rule corres_guard_imp[OF getCurThread_corres], simp+) + apply (rule handleFault_corres) + apply simp + apply (simp add: valid_fault_def) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (frule(1) ct_not_ksQ) + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + apply (rule corres_underlying_split) + apply (rule corres_guard_imp, rule getCurThread_corres, simp+) + apply (rule handleFault_corres) + apply (simp add: valid_fault_def) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def valid_fault_def) + apply wp + apply clarsimp + apply (frule(1) ct_not_ksQ) + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[where R="\rv. einvs" + and R'="\rv s. \x. rv = Some x \ R'' x s" + for R'']) + apply (rule corres_machine_op) + apply (rule corres_Id; wpsimp) + apply (case_tac rv, simp_all add: doMachineOp_return)[1] + apply (rule handleInterrupt_corres) + apply (wp hoare_vcg_all_lift + doMachineOp_getActiveIRQ_IRQ_active' + | simp + | simp add: imp_conjR | wp (once) hoare_drop_imps)+ + apply force + apply simp + apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def valid_queues_def + valid_queues_no_bitmap_def) + apply (rule_tac corres_underlying_split) + apply (rule corres_guard_imp, rule getCurThread_corres, simp+) + apply (rule corres_split_catch) + apply (rule handleVMFault_corres) + apply (erule handleFault_corres) + apply (wp handle_vm_fault_valid_fault) + apply (wp hvmf_invs_etc) + apply wp + apply (clarsimp simp: simple_from_running tcb_at_invs) + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (frule(1) ct_not_ksQ) + apply (fastforce simp: simple_sane_strg sch_act_simple_def ct_in_state'_def + elim: st_tcb_ex_cap'' pred_tcb'_weakenE) + apply (rule corres_underlying_split) + apply (rule corres_guard_imp[OF getCurThread_corres], simp+) + apply (rule handleHypervisorFault_corres) + apply wp + apply (fastforce elim!: st_tcb_ex_cap st_tcb_weakenE + simp: ct_in_state_def) + apply wp + apply (clarsimp) + apply (frule(1) ct_not_ksQ) + apply (auto simp: ct_in_state'_def sch_act_simple_def + sch_act_sane_def + elim: pred_tcb'_weakenE st_tcb_ex_cap'')[1] + done +qed + +crunches handleVMFault + for st_tcb_at'[wp]: "st_tcb_at' P t" + and cap_to'[wp]: "ex_nonz_cap_to' t" + and norq[wp]: "\s. P (ksReadyQueues s)" + +crunches handleVMFault, handleHypervisorFault + for ksit[wp]: "\s. P (ksIdleThread s)" + (wp: crunch_wps getSlotCap_wp simp: getThreadReplySlot_def getThreadCallerSlot_def locateSlotTCB_def locateSlotBasic_def) + +lemma hh_invs'[wp]: + "\invs' and sch_act_not p and (\s. \a b. p \ set (ksReadyQueues s (a, b))) and + st_tcb_at' simple' p and ex_nonz_cap_to' p and (\s. p \ ksIdleThread s)\ + handleHypervisorFault p t \\_. invs'\" + apply (simp add: AARCH64_H.handleHypervisorFault_def) + apply (cases t; wpsimp) + done + +lemma ct_not_idle': + fixes s + assumes vi: "valid_idle' s" + and cts: "ct_in_state' (\tcb. \idle' tcb) s" + shows "ksCurThread s \ ksIdleThread s" +proof + assume "ksCurThread s = ksIdleThread s" + with vi have "ct_in_state' idle' s" + unfolding ct_in_state'_def valid_idle'_def + by (clarsimp simp: pred_tcb_at'_def obj_at'_def idle_tcb'_def) + + with cts show False + unfolding ct_in_state'_def + by (fastforce dest: pred_tcb_at_conj') +qed + +lemma ct_running_not_idle'[simp]: + "\invs' s; ct_running' s\ \ ksCurThread s \ ksIdleThread s" + apply (rule ct_not_idle') + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def + elim: pred_tcb'_weakenE)+ + done + +lemma ct_active_not_idle'[simp]: + "\invs' s; ct_active' s\ \ ksCurThread s \ ksIdleThread s" + apply (rule ct_not_idle') + apply (fastforce simp: invs'_def valid_state'_def ct_in_state'_def + elim: pred_tcb'_weakenE)+ + done + +lemma deleteCallerCap_st_tcb_at_runnable[wp]: + "\st_tcb_at' runnable' t\ deleteCallerCap t' \\rv. st_tcb_at' runnable' t\" + apply (simp add: deleteCallerCap_def getThreadCallerSlot_def + locateSlot_conv) + apply (wp cteDeleteOne_tcb_at_runnable' hoare_drop_imps | simp)+ + done + +crunches handleFault,receiveSignal,receiveIPC,asUser + for ksCurThread[wp]: "\s. P (ksCurThread s)" + (wp: hoare_drop_imps crunch_wps simp: crunch_simps) + +lemma handleRecv_ksCurThread[wp]: + "\\s. P (ksCurThread s) \ handleRecv b \\rv s. P (ksCurThread s) \" + unfolding handleRecv_def + by ((simp, wp hoare_drop_imps) | wpc | wpsimp wp: hoare_drop_imps)+ + +lemma he_invs'[wp]: + "\invs' and + (\s. event \ Interrupt \ ct_running' s) and + (\s. ksSchedulerAction s = ResumeCurrentThread)\ + handleEvent event + \\rv. invs'\" +proof - + have nidle: "\s. invs' s \ ct_active' s \ ksCurThread s \ ksIdleThread s" + by (clarsimp) + show ?thesis + apply (case_tac event, simp_all add: handleEvent_def) + apply (rename_tac syscall) + apply (case_tac syscall, + (wp handleReply_sane handleReply_nonz_cap_to_ct handleReply_ksCurThread + handleReply_ct_not_ksQ + | clarsimp simp: active_from_running' simple_from_running' simple_sane_strg simp del: split_paired_All + | rule conjI active_ex_cap' + | drule ct_not_ksQ[rotated] + | strengthen nidle)+) + apply (rule hoare_strengthen_post, + rule hoare_weaken_pre, + rule hy_invs') + apply (simp add: active_from_running') + apply simp + apply (simp add: active_from_running') + apply (wp + | rule conjI + | erule pred_tcb'_weakenE st_tcb_ex_cap'' + | clarsimp simp: tcb_at_invs ct_in_state'_def simple_sane_strg sch_act_simple_def + | drule st_tcb_at_idle_thread' + | drule ct_not_ksQ[rotated] + | wpc | wp (once) hoare_drop_imps hoare_vcg_all_lift)+ + done +qed + +lemma inv_irq_IRQInactive: + "\\\ performIRQControl irqcontrol_invocation + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: performIRQControl_def) + apply (rule hoare_pre) + apply (wpc|wp|simp add: AARCH64_H.performIRQControl_def)+ + done + +lemma inv_arch_IRQInactive: + "\\\ Arch.performInvocation invocation + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (wpsimp simp: performARMMMUInvocation_def AARCH64_H.performInvocation_def) + done + +lemma retype_pi_IRQInactive: + "\valid_irq_states'\ RetypeDecls_H.performInvocation blocking call v + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: Retype_H.performInvocation_def) + apply (rule hoare_pre) + apply (wpc | + wp inv_tcb_IRQInactive inv_cnode_IRQInactive inv_irq_IRQInactive + inv_untyped_IRQInactive inv_arch_IRQInactive | + simp)+ + done + +lemma hi_IRQInactive: + "\valid_irq_states'\ handleInvocation call blocking + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleInvocation_def split_def) + apply (wp syscall_valid' retype_pi_IRQInactive) + done + +lemma handleSend_IRQInactive: + "\invs'\ handleSend blocking + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleSend_def) + apply (rule hoare_pre) + apply (wp hi_IRQInactive) + apply (simp add: invs'_def valid_state'_def) + done + +lemma handleCall_IRQInactive: + "\invs'\ handleCall + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + apply (simp add: handleCall_def) + apply (rule hoare_pre) + apply (wp hi_IRQInactive) + apply (simp add: invs'_def valid_state'_def) + done + +end + +end diff --git a/proof/refine/AARCH64/TcbAcc_R.thy b/proof/refine/AARCH64/TcbAcc_R.thy new file mode 100644 index 0000000000..2ae604d46e --- /dev/null +++ b/proof/refine/AARCH64/TcbAcc_R.thy @@ -0,0 +1,4723 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory TcbAcc_R +imports CSpace_R ArchMove_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare if_weak_cong [cong] +declare hoare_in_monad_post[wp] +declare trans_state_update'[symmetric,simp] +declare storeWordUser_typ_at' [wp] + +(* Auxiliaries and basic properties of priority bitmap functions *) + +lemma countLeadingZeros_word_clz[simp]: + "countLeadingZeros w = word_clz w" + unfolding countLeadingZeros_def word_clz_def + by (simp add: to_bl_upt) + +lemma wordLog2_word_log2[simp]: + "wordLog2 = word_log2" + apply (rule ext) + unfolding wordLog2_def word_log2_def + by (simp add: word_size wordBits_def) + +lemmas bitmap_fun_defs = addToBitmap_def removeFromBitmap_def + modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def + +(* lookupBitmapPriority is a cleaner version of getHighestPrio *) +definition + "lookupBitmapPriority d \ \s. + l1IndexToPrio (word_log2 (ksReadyQueuesL1Bitmap s d)) || + of_nat (word_log2 (ksReadyQueuesL2Bitmap s (d, + invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d)))))" + +lemma getHighestPrio_def'[simp]: + "getHighestPrio d = gets (lookupBitmapPriority d)" + unfolding getHighestPrio_def gets_def + by (fastforce simp: gets_def get_bind_apply lookupBitmapPriority_def bitmap_fun_defs) + +(* isHighestPrio_def' is a cleaner version of isHighestPrio_def *) +lemma isHighestPrio_def': + "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" + unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' + apply (rule ext) + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def + split: if_splits) + done + +lemma getHighestPrio_inv[wp]: + "\ P \ getHighestPrio d \\_. P \" + unfolding bitmap_fun_defs by simp + +lemma valid_bitmapQ_bitmapQ_simp: + "\ valid_bitmapQ s \ \ + bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" + unfolding valid_bitmapQ_def + by simp + +lemma prioToL1Index_l1IndexToPrio_or_id: + "\ unat (w'::priority) < 2 ^ wordRadix ; w < 2^(size w' - wordRadix) \ + \ prioToL1Index ((l1IndexToPrio w) || w') = w" + unfolding l1IndexToPrio_def prioToL1Index_def + apply (simp add: shiftr_over_or_dist shiftr_le_0 wordRadix_def') + apply (subst shiftl_shiftr_id, simp, simp add: word_size) + apply (rule word_of_nat_less) + apply simp + apply (subst unat_of_nat_eq, simp_all add: word_size) + done + +lemma bitmapQ_no_L1_orphansD: + "\ bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d !! i \ + \ ksReadyQueuesL2Bitmap s (d, invertL1Index i) \ 0 \ i < l2BitmapSize" + unfolding bitmapQ_no_L1_orphans_def by simp + +lemma l1IndexToPrio_wordRadix_mask[simp]: + "l1IndexToPrio i && mask wordRadix = 0" + unfolding l1IndexToPrio_def + by (simp add: wordRadix_def') + +definition + (* when in the middle of updates, a particular queue might not be entirely valid *) + valid_queues_no_bitmap_except :: "machine_word \ kernel_state \ bool" +where + "valid_queues_no_bitmap_except t' \ \s. + (\d p. (\t \ set (ksReadyQueues s (d, p)). t \ t' \ obj_at' (inQ d p and runnable' \ tcbState) t s) + \ distinct (ksReadyQueues s (d, p)) + \ (d > maxDomain \ p > maxPriority \ ksReadyQueues s (d,p) = []))" + +lemma valid_queues_no_bitmap_exceptI[intro]: + "valid_queues_no_bitmap s \ valid_queues_no_bitmap_except t s" + unfolding valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def + by simp + +lemma st_tcb_at_coerce_abstract: + assumes t: "st_tcb_at' P t c" + assumes sr: "(a, c) \ state_relation" + shows "st_tcb_at (\st. \st'. thread_state_relation st st' \ P st') t a" + using assms + apply (clarsimp simp: state_relation_def pred_tcb_at'_def obj_at'_def objBits_simps) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE, simp_all) + apply (clarsimp simp: st_tcb_at_def obj_at_def other_obj_relation_def + tcb_relation_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + apply fastforce + done + +lemma st_tcb_at_runnable_coerce_concrete: + assumes t: "st_tcb_at runnable t a" + assumes sr: "(a, c) \ state_relation" + assumes tcb: "tcb_at' t c" + shows "st_tcb_at' runnable' t c" + using t + apply - + apply (rule ccontr) + apply (drule pred_tcb_at'_Not[THEN iffD2, OF conjI, OF tcb]) + apply (drule st_tcb_at_coerce_abstract[OF _ sr]) + apply (clarsimp simp: st_tcb_def2) + apply (case_tac "tcb_state tcb"; simp) + done + +lemma pspace_relation_tcb_at': + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at t a" + assumes aligned: "pspace_aligned' c" + assumes distinct: "pspace_distinct' c" + shows "tcb_at' t c" using assms + apply (clarsimp simp: obj_at_def) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: is_tcb other_obj_relation_def) + apply (simp split: kernel_object.split_asm) + apply (drule(2) aligned_distinct_obj_atI'[where 'a=tcb], simp) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma tcb_at_cross: + "\ tcb_at t s; pspace_aligned s; pspace_distinct s; + pspace_relation (kheap s) (ksPSpace s') \ \ tcb_at' t s'" + apply (drule (2) pspace_distinct_cross) + apply (drule (1) pspace_aligned_cross) + apply (erule (3) pspace_relation_tcb_at') + done + +lemma st_tcb_at_runnable_cross: + "\ st_tcb_at runnable t s; pspace_aligned s; pspace_distinct s; (s, s') \ state_relation \ + \ st_tcb_at' runnable' t s'" + apply (frule (1) pspace_distinct_cross, fastforce simp: state_relation_def) + apply (frule pspace_aligned_cross, fastforce simp: state_relation_def) + apply (prop_tac "tcb_at t s", clarsimp simp: st_tcb_at_def obj_at_def is_tcb) + apply (drule (2) tcb_at_cross, fastforce simp: state_relation_def) + apply (erule (2) st_tcb_at_runnable_coerce_concrete) + done + +lemma cur_tcb_cross: + "\ cur_tcb s; pspace_aligned s; pspace_distinct s; (s,s') \ state_relation \ \ cur_tcb' s'" + apply (clarsimp simp: cur_tcb'_def cur_tcb_def state_relation_def) + apply (erule (3) tcb_at_cross) + done + +lemma valid_objs_valid_tcbE: "\s t.\ valid_objs' s; tcb_at' t s; \tcb. valid_tcb' tcb s \ R s tcb \ \ obj_at' (R s) t s" + apply (clarsimp simp add: valid_objs'_def ran_def typ_at'_def + ko_wp_at'_def valid_obj'_def valid_tcb'_def obj_at'_def) + apply (fastforce simp: projectKO_def projectKO_opt_tcb return_def valid_tcb'_def) + done + +lemma valid_objs'_maxDomain: + "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbDomain tcb \ maxDomain) t s" + apply (erule (1) valid_objs_valid_tcbE) + apply (clarsimp simp: valid_tcb'_def) + done + +lemma valid_objs'_maxPriority: + "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbPriority tcb \ maxPriority) t s" + apply (erule (1) valid_objs_valid_tcbE) + apply (clarsimp simp: valid_tcb'_def) + done + +lemma doMachineOp_irq_states': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\valid_irq_states'\ doMachineOp f \\rv. valid_irq_states'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks) + apply simp + apply simp + done + +lemma dmo_invs': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\(\s. \m. \(r,m')\fst (f m). \p. + pointerInUserData p s \ pointerInDeviceData p s \ + underlying_memory m' p = underlying_memory m p) and + invs'\ doMachineOp f \\r. invs'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subst invs'_machine) + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks, simp+) + apply (fastforce simp add: valid_machine_state'_def) + apply assumption + done + +lemma dmo_invs_no_cicd': + assumes masks: "\P. \\s. P (irq_masks s)\ f \\_ s. P (irq_masks s)\" + shows "\(\s. \m. \(r,m')\fst (f m). \p. + pointerInUserData p s \ pointerInDeviceData p s \ + underlying_memory m' p = underlying_memory m p) and + invs_no_cicd'\ doMachineOp f \\r. invs_no_cicd'\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (subst invs_no_cicd'_machine) + apply (drule use_valid) + apply (rule_tac P="\m. m = irq_masks (ksMachineState s)" in masks, simp+) + apply (fastforce simp add: valid_machine_state'_def) + apply assumption + done + +lemma dmo_lift': + assumes f: "\P\ f \Q\" + shows "\\s. P (ksMachineState s)\ doMachineOp f + \\rv s. Q rv (ksMachineState s)\" + apply (simp add: doMachineOp_def split_def) + apply wp + apply clarsimp + apply (erule (1) use_valid [OF _ f]) + done + +lemma doMachineOp_getActiveIRQ_IRQ_active: + "\valid_irq_states'\ + doMachineOp (getActiveIRQ in_kernel) + \\rv s. \irq. rv = Some irq \ intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (rule hoare_lift_Pf3 [where f="ksInterruptState"]) + prefer 2 + apply wp + apply (simp add: irq_state_independent_H_def) + apply assumption + apply (rule dmo_lift') + apply (rule getActiveIRQ_masked) + done + +lemma doMachineOp_getActiveIRQ_IRQ_active': + "\valid_irq_states'\ + doMachineOp (getActiveIRQ in_kernel) + \\rv s. rv = Some irq \ intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule doMachineOp_getActiveIRQ_IRQ_active) + apply simp + done + +lemma preemptionPoint_irq [wp]: + "\valid_irq_states'\ preemptionPoint -, + \\irq s. intStateIRQTable (ksInterruptState s) irq \ IRQInactive\" + apply (simp add: preemptionPoint_def setWorkUnits_def modifyWorkUnits_def getWorkUnits_def) + apply (wp whenE_wp|wpc)+ + apply (rule hoare_post_imp) + prefer 2 + apply (rule doMachineOp_getActiveIRQ_IRQ_active) + apply clarsimp + apply wp+ + apply clarsimp + done + +lemmas doMachineOp_obj_at = doMachineOp_obj_at' + +lemma updateObject_tcb_inv: + "\P\ updateObject (obj::tcb) ko p q n \\rv. P\" + by simp (rule updateObject_default_inv) + +lemma setObject_update_TCB_corres': + assumes tcbs: "tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'" + assumes tables: "\(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes r: "r () ()" + assumes exst: "exst_same tcb' tcbu'" + shows "corres r (ko_at (TCB tcb) add) + (ko_at' tcb' add) + (set_object add (TCB tcbu)) (setObject add tcbu')" + apply (rule_tac F="tcb_relation tcb tcb' \ exst_same tcb' tcbu'" in corres_req) + apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) + apply (frule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def exst) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule setObject_other_corres[where P="(=) tcb'"]) + apply (rule ext)+ + apply simp + defer + apply (simp add: is_other_obj_relation_type_def + objBits_simps' other_obj_relation_def tcbs r)+ + apply (fastforce elim!: obj_at_weakenE dest: bspec[OF tables]) + apply (subst(asm) eq_commute, assumption) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subst map_to_ctes_upd_tcb, assumption+) + apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) + apply (subst if_not_P) + apply (fastforce dest: bspec [OF tables', OF ranI]) + apply simp + done + +lemma setObject_update_TCB_corres: + "\ tcb_relation tcb tcb' \ tcb_relation tcbu tcbu'; + \(getF, v) \ ran tcb_cap_cases. getF tcbu = getF tcb; + \(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'; + r () (); exst_same tcb' tcbu'\ + \ corres r (\s. get_tcb add s = Some tcb) + (\s'. (tcb', s') \ fst (getObject add s')) + (set_object add (TCB tcbu)) (setObject add tcbu')" + apply (rule corres_guard_imp) + apply (erule (3) setObject_update_TCB_corres', force) + apply fastforce + apply (clarsimp simp: getObject_def in_monad split_def obj_at'_def + loadObject_default_def objBits_simps' in_magnitude_check) + done + +lemma getObject_TCB_corres: + "corres tcb_relation (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets_the (get_tcb t)) (getObject t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_gets_the) + apply (rule corres_get_tcb) + apply (simp add: tcb_at_def) + apply assumption + done + +lemma threadGet_corres: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ r (f tcb) (f' tcb')" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get f t) (threadGet f' t)" + apply (simp add: thread_get_def threadGet_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule getObject_TCB_corres) + apply (simp add: x) + done + +lemma threadGet_inv [wp]: "\P\ threadGet f t \\rv. P\" + by (simp add: threadGet_def getObject_inv_tcb | wp)+ + +lemma ball_tcb_cte_casesI: + "\ P (tcbCTable, tcbCTable_update); + P (tcbVTable, tcbVTable_update); + P (tcbReply, tcbReply_update); + P (tcbCaller, tcbCaller_update); + P (tcbIPCBufferFrame, tcbIPCBufferFrame_update) \ + \ \x \ ran tcb_cte_cases. P x" + by (simp add: tcb_cte_cases_def cteSizeBits_def) + +lemma all_tcbI: + "\ \a b c d e f g h i j k l m n p q. P (Thread a b c d e f g h i j k l m n p q) \ \ \tcb. P tcb" + by (rule allI, case_tac tcb, simp) + +lemma threadset_corresT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (f tcb) (f' tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cap_cases. getF (f tcb) = getF tcb" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (f' tcb) = getF tcb" + assumes e: "\tcb'. exst_same tcb' (f' tcb')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (thread_set f t) (threadSet f' t)" + apply (simp add: thread_set_def threadSet_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getObject_TCB_corres]) + apply (rule setObject_update_TCB_corres') + apply (erule x) + apply (rule y) + apply (clarsimp simp: bspec_split [OF spec [OF z]]) + apply fastforce + apply simp + apply (rule e) + apply wp+ + apply (clarsimp simp add: tcb_at_def obj_at_def) + apply (drule get_tcb_SomeD) + apply fastforce + apply simp + done + +lemmas threadset_corres = + threadset_corresT [OF _ _ all_tcbI, OF _ ball_tcb_cap_casesI ball_tcb_cte_casesI] + +lemma pspace_relation_tcb_at: + assumes p: "pspace_relation (kheap a) (ksPSpace c)" + assumes t: "tcb_at' t c" + shows "tcb_at t a" using assms + apply (clarsimp simp: obj_at'_def) + apply (erule(1) pspace_dom_relatedE) + apply (erule(1) obj_relation_cutsE) + apply (clarsimp simp: other_obj_relation_def is_tcb obj_at_def + split: Structures_A.kernel_object.split_asm if_split_asm + AARCH64_A.arch_kernel_obj.split_asm)+ + done + +lemma threadSet_corres_noopT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation tcb (fn tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (fn tcb) = getF tcb" + assumes e: "\tcb'. exst_same tcb' (fn tcb')" + shows "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (return v) (threadSet fn t)" +proof - + have S: "\t s. tcb_at t s \ return v s = (thread_set id t >>= (\x. return v)) s" + apply (clarsimp simp: tcb_at_def) + apply (simp add: return_def thread_set_def gets_the_def assert_def + assert_opt_def simpler_gets_def set_object_def get_object_def + put_def get_def bind_def) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) + apply (simp add: map_upd_triv get_tcb_SomeD)+ + done + show ?thesis + apply (rule stronger_corres_guard_imp) + apply (subst corres_cong [OF refl refl S refl refl]) + defer + apply (subst bind_return [symmetric], + rule corres_underlying_split [OF threadset_corresT]) + apply (simp add: x) + apply simp + apply (rule y) + apply (rule e) + apply (rule corres_noop [where P=\ and P'=\]) + apply simp + apply (rule no_fail_pre, wpsimp+)[1] + apply wpsimp+ + done +qed + +lemmas threadSet_corres_noop = + threadSet_corres_noopT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma threadSet_corres_noop_splitT: + assumes x: "\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation tcb (fn tcb')" + assumes y: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (fn tcb) = getF tcb" + assumes z: "corres r P Q' m m'" + assumes w: "\P'\ threadSet fn t \\x. Q'\" + assumes e: "\tcb'. exst_same tcb' (fn tcb')" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct and P) P' + m (threadSet fn t >>= (\rv. m'))" + apply (rule corres_guard_imp) + apply (subst return_bind[symmetric]) + apply (rule corres_split_nor[OF threadSet_corres_noopT]) + apply (simp add: x) + apply (rule y) + apply (rule e) + apply (rule z) + apply (wp w)+ + apply simp + apply simp + done + +lemmas threadSet_corres_noop_split = + threadSet_corres_noop_splitT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma threadSet_tcb' [wp]: + "\tcb_at' t\ threadSet f t' \\rv. tcb_at' t\" + by (simp add: threadSet_def) wp + +lemma threadSet_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ threadSet f t \\rv s. P (ksSchedulerAction s)\" + unfolding threadSet_def + by (simp add: updateObject_default_def | wp setObject_nosch)+ + +(* The function "thread_set f p" updates a TCB at p using function f. + It should not be used to change capabilities, though. *) +lemma setObject_tcb_valid_objs: + "\valid_objs' and (tcb_at' t and valid_obj' (injectKO v))\ setObject t (v :: tcb) \\rv. valid_objs'\" + apply (rule setObject_valid_objs') + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma setObject_tcb_at': + "\tcb_at' t'\ setObject t (v :: tcb) \\rv. tcb_at' t'\" + apply (rule obj_at_setObject1) + apply (clarsimp simp: updateObject_default_def return_def in_monad) + apply (simp add: objBits_simps) + done + +lemma setObject_sa_unchanged: + "\\s. P (ksSchedulerAction s)\ setObject t (v :: tcb) \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setObject_def split_def) + apply (wp | simp add: updateObject_default_def)+ + done + +lemma setObject_queues_unchanged: + assumes inv: "\P p q n obj. \P\ updateObject v obj p q n \\r. P\" + shows "\\s. P (ksReadyQueues s)\ setObject t v \\rv s. P (ksReadyQueues s)\" + apply (simp add: setObject_def split_def) + apply (wp inv | simp)+ + done + +lemma setObject_queues_unchanged_tcb[wp]: + "\\s. P (ksReadyQueues s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueues s)\" + apply (rule setObject_queues_unchanged) + apply (wp|simp add: updateObject_default_def)+ + done + +lemma setObject_queuesL1_unchanged_tcb[wp]: + "\\s. P (ksReadyQueuesL1Bitmap s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueuesL1Bitmap s)\" + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma setObject_queuesL2_unchanged_tcb[wp]: + "\\s. P (ksReadyQueuesL2Bitmap s)\ setObject t (v :: tcb) \\rv s. P (ksReadyQueuesL2Bitmap s)\" + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma setObject_tcb_ctes_of[wp]: + "\\s. P (ctes_of s) \ + obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t s\ + setObject t v + \\rv s. P (ctes_of s)\" + apply (rule setObject_ctes_of) + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps' in_magnitude_check) + apply fastforce + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps in_magnitude_check bind_def) + done + +lemma setObject_tcb_mdb' [wp]: + "\ valid_mdb' and + obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t\ + setObject t (v :: tcb) + \\rv. valid_mdb'\" + unfolding valid_mdb'_def pred_conj_def + by (rule setObject_tcb_ctes_of) + +lemma setObject_tcb_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_st_refs_of' (tcbState v) + \ tcb_bound_refs' (tcbBoundNotification v)))\ + setObject t (v :: tcb) \\rv s. P (state_refs_of' s)\" + by (wp setObject_state_refs_of', + simp_all add: objBits_simps' fun_upd_def) + +lemma setObject_tcb_iflive': + "\\s. if_live_then_nonz_cap' s \ + (live' (injectKO v) \ ex_nonz_cap_to' t s) + \ obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t s\ + setObject t (v :: tcb) + \\rv. if_live_then_nonz_cap'\" + apply (rule setObject_iflive') + apply (simp add: objBits_simps')+ + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def bind_def) + done + +lemma setObject_tcb_idle': + "\\s. valid_idle' s \ + (t = ksIdleThread s \ idle_tcb' v)\ + setObject t (v :: tcb) \\rv. valid_idle'\" + apply (rule hoare_pre) + apply (rule setObject_idle') + apply (simp add: objBits_simps')+ + apply (simp add: updateObject_default_inv) + apply (simp add: idle_tcb_ps_def) + done + +lemma setObject_tcb_irq_node'[wp]: + "\\s. P (irq_node' s)\ setObject t (v :: tcb) \\rv s. P (irq_node' s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_tcb_ifunsafe': + "\if_unsafe_then_cap' and obj_at' (\t. \(getF, setF) \ ran tcb_cte_cases. getF t = getF v) t\ + setObject t (v :: tcb) \\rv. if_unsafe_then_cap'\" + unfolding pred_conj_def + apply (rule setObject_ifunsafe') + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def bind_def) + apply wp + done + +lemma setObject_tcb_arch' [wp]: + "\\s. P (ksArchState s)\ setObject t (v :: tcb) \\rv s. P (ksArchState s)\" + apply (simp add: setObject_def split_def updateObject_default_def) + apply wp + apply simp + done + +lemma setObject_tcb_valid_arch' [wp]: + "\valid_arch_state'\ setObject t (v :: tcb) \\rv. valid_arch_state'\" + by (wpsimp wp: valid_arch_state_lift' setObject_typ_at' setObject_ko_wp_at + simp: objBits_simps', rule refl; simp add: pred_conj_def) + (clarsimp simp: is_vcpu'_def ko_wp_at'_def obj_at'_def) + +lemma setObject_tcb_refs' [wp]: + "\\s. P (global_refs' s)\ setObject t (v::tcb) \\rv s. P (global_refs' s)\" + apply (clarsimp simp: setObject_def split_def updateObject_default_def) + apply wp + apply (simp add: global_refs'_def) + done + +lemma setObject_tcb_valid_globals' [wp]: + "\valid_global_refs' and + obj_at' (\tcb. (\(getF, setF) \ ran tcb_cte_cases. getF tcb = getF v)) t\ + setObject t (v :: tcb) + \\rv. valid_global_refs'\" + unfolding pred_conj_def valid_global_refs'_def + apply (rule hoare_lift_Pf2 [where f="global_refs'"]) + apply (rule hoare_lift_Pf2 [where f="gsMaxObjectSize"]) + apply (rule setObject_ctes_of) + apply (clarsimp simp: updateObject_default_def in_monad obj_at'_def + in_magnitude_check objBits_simps' prod_eq_iff) + apply fastforce + apply (clarsimp simp: updateObject_default_def in_monad prod_eq_iff + obj_at'_def objBits_simps in_magnitude_check bind_def) + apply (wp | wp setObject_ksPSpace_only updateObject_default_inv | simp)+ + done + +lemma setObject_tcb_irq_states' [wp]: + "\valid_irq_states'\ setObject t (v :: tcb) \\rv. valid_irq_states'\" + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF setObject_ksInterrupt]) + apply (simp, rule updateObject_default_inv) + apply (rule hoare_use_eq [where f=ksMachineState, OF setObject_ksMachine]) + apply (simp, rule updateObject_default_inv) + apply wp + apply assumption + done + +lemma getObject_tcb_wp: + "\\s. tcb_at' p s \ (\t::tcb. ko_at' t p s \ Q t s)\ getObject p \Q\" + by (clarsimp simp: getObject_def valid_def in_monad split_def objBits_simps' + loadObject_default_def obj_at'_def in_magnitude_check) + +lemma setObject_tcb_pspace_no_overlap': + "\pspace_no_overlap' w s and tcb_at' t\ + setObject t (tcb::tcb) + \\rv. pspace_no_overlap' w s\" + apply (clarsimp simp: setObject_def split_def valid_def in_monad) + apply (clarsimp simp: obj_at'_def) + apply (erule (1) ps_clear_lookupAround2) + apply (rule order_refl) + apply (erule is_aligned_no_overflow) + apply simp + apply (clarsimp simp: updateObject_default_def in_monad objBits_simps in_magnitude_check) + apply (fastforce simp: pspace_no_overlap'_def objBits_simps) + done + +lemma threadSet_pspace_no_overlap' [wp]: + "\pspace_no_overlap' w s\ threadSet f t \\rv. pspace_no_overlap' w s\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_pspace_no_overlap' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_global_refsT: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\valid_global_refs'\ threadSet F t \\rv. valid_global_refs'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_valid_globals' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_global_refs[wp] = + threadSet_global_refsT [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_valid_pspace'T_P: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. (P \ Q (tcbState tcb)) \ + (\s. valid_tcb_state' (tcbState tcb) s + \ valid_tcb_state' (tcbState (F tcb)) s)" + assumes v: "\tcb. (P \ Q' (tcbBoundNotification tcb)) \ + (\s. valid_bound_ntfn' (tcbBoundNotification tcb) s + \ valid_bound_ntfn' (tcbBoundNotification (F tcb)) s)" + + assumes y: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits + \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes u: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes w: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes w': "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes v': "\tcb s. valid_arch_tcb' (tcbArch tcb) s \ valid_arch_tcb' (tcbArch (F tcb)) s" + shows + "\valid_pspace' and (\s. P \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ + threadSet F t + \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def threadSet_def) + apply (rule hoare_pre, + wp setObject_tcb_valid_objs getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (erule(1) valid_objsE') + apply (clarsimp simp add: valid_obj'_def valid_tcb'_def + bspec_split [OF spec [OF x]] z + split_paired_Ball y u w v w' v') + done + +lemmas threadSet_valid_pspace'T = + threadSet_valid_pspace'T_P[where P=False, simplified] + +lemmas threadSet_valid_pspace' = + threadSet_valid_pspace'T [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_ifunsafe'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows "\if_unsafe_then_cap'\ threadSet F t \\rv. if_unsafe_then_cap'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_ifunsafe' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_ifunsafe' = + threadSet_ifunsafe'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_state_refs_of'_helper[simp]: + "{r. (r \ tcb_st_refs_of' ts \ + r \ tcb_bound_refs' ntfnptr) \ + snd r = TCBBound} = + tcb_bound_refs' ntfnptr" + by (auto simp: tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits) + +lemma threadSet_state_refs_of'_helper'[simp]: + "{r. (r \ tcb_st_refs_of' ts \ + r \ tcb_bound_refs' ntfnptr) \ + snd r \ TCBBound} = + tcb_st_refs_of' ts" + by (auto simp: tcb_st_refs_of'_def tcb_bound_refs'_def + split: thread_state.splits) + +lemma threadSet_state_refs_of'T_P: + assumes x: "\tcb. (P' \ Q (tcbState tcb)) \ + tcb_st_refs_of' (tcbState (F tcb)) + = f' (tcb_st_refs_of' (tcbState tcb))" + assumes y: "\tcb. (P' \ Q' (tcbBoundNotification tcb)) \ + tcb_bound_refs' (tcbBoundNotification (F tcb)) + = g' (tcb_bound_refs' (tcbBoundNotification tcb))" + shows + "\\s. P ((state_refs_of' s) (t := f' {r \ state_refs_of' s t. snd r \ TCBBound} + \ g' {r \ state_refs_of' s t. snd r = TCBBound})) + \ (P' \ st_tcb_at' Q t s \ bound_tcb_at' Q' t s)\ + threadSet F t + \\rv s. P (state_refs_of' s)\" + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def + elim!: rsubst[where P=P] intro!: ext) + apply (cut_tac s=s and p=t and 'a=tcb in ko_at_state_refs_ofD') + apply (simp add: obj_at'_def) + apply (clarsimp simp: x y) + done + +lemmas threadSet_state_refs_of'T = + threadSet_state_refs_of'T_P [where P'=False, simplified] + +lemmas threadSet_state_refs_of' = + threadSet_state_refs_of'T [OF all_tcbI all_tcbI] + +lemma threadSet_state_hyp_refs_of': + assumes y: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows "\\s. P (state_hyp_refs_of' s)\ threadSet F t \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: threadSet_def) + apply (wpsimp wp: setObject_state_hyp_refs_of' getObject_tcb_wp + simp: objBits_simps' obj_at'_def state_hyp_refs_of'_def) + apply (clarsimp simp:objBits_simps' y state_hyp_refs_of'_def + elim!: rsubst[where P=P] intro!: ext)+ + done + +lemma threadSet_iflive'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows + "\\s. if_live_then_nonz_cap' s + \ ((\tcb. \ bound (tcbBoundNotification tcb) \ bound (tcbBoundNotification (F tcb)) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. (tcbState tcb = Inactive \ tcbState tcb = IdleThreadState) + \ tcbState (F tcb) \ Inactive + \ tcbState (F tcb) \ IdleThreadState + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s) + \ ((\tcb. \ bound (atcbVCPUPtr (tcbArch tcb)) \ bound (atcbVCPUPtr (tcbArch (F tcb))) + \ ko_at' tcb t s) \ ex_nonz_cap_to' t s)\ + threadSet F t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_iflive' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def live'_def hyp_live'_def) + apply (subst conj_assoc[symmetric], subst imp_disjL[symmetric])+ + apply (rule conjI) + apply (rule impI, clarsimp) + apply (erule if_live_then_nonz_capE') + apply (clarsimp simp: ko_wp_at'_def live'_def hyp_live'_def) + apply (clarsimp simp: bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_iflive' = + threadSet_iflive'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_cte_wp_at'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\\s. P' (cte_wp_at' P p s)\ threadSet F t \\rv s. P' (cte_wp_at' P p s)\" + apply (simp add: threadSet_def) + apply (rule hoare_seq_ext [where B="\rv s. P' (cte_wp_at' P p s) \ obj_at' ((=) rv) t s"]) + apply (rename_tac tcb) + apply (rule setObject_cte_wp_at2') + apply (clarsimp simp: updateObject_default_def in_monad objBits_simps' + obj_at'_def in_magnitude_check prod_eq_iff) + apply (case_tac tcb, clarsimp simp: bspec_split [OF spec [OF x]]) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemmas threadSet_cte_wp_at' = + threadSet_cte_wp_at'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma threadSet_ctes_ofT: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (F tcb) = getF tcb" + shows "\\s. P (ctes_of s)\ threadSet F t \\rv s. P (ctes_of s)\" + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + apply (case_tac obj) + apply (simp add: bspec_split [OF spec [OF x]]) + done + +lemmas threadSet_ctes_of = + threadSet_ctes_ofT [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemmas threadSet_cap_to' = ex_nonz_cap_to_pres' [OF threadSet_cte_wp_at'] + +lemma threadSet_idle'T: + assumes x: "\tcb. \(getF, setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + shows + "\\s. valid_idle' s + \ (t = ksIdleThread s \ + (\tcb. ko_at' tcb t s \ idle_tcb' tcb \ idle_tcb' (F tcb)))\ + threadSet F t + \\rv. valid_idle'\" + apply (simp add: threadSet_def) + apply (wp setObject_tcb_idle' getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def valid_idle'_def pred_tcb_at'_def) + done + +lemmas threadSet_idle' = + threadSet_idle'T [OF all_tcbI, OF ball_tcb_cte_casesI] + +lemma set_tcb_valid_bitmapQ[wp]: + "\ valid_bitmapQ \ setObject t (f tcb :: tcb) \\_. valid_bitmapQ \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_tcb_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ setObject t (f tcb :: tcb) \\_. bitmapQ_no_L1_orphans \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma set_tcb_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ setObject t (f tcb :: tcb) \\_. bitmapQ_no_L2_orphans \" + apply (rule setObject_tcb_pre) + apply (simp add: bitmapQ_defs setObject_def split_def) + apply (wp hoare_Ball_helper hoare_vcg_all_lift updateObject_default_inv | simp add: bitmapQ_def)+ + done + +lemma threadSet_valid_queues_no_bitmap: + "\ valid_queues_no_bitmap and + (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ + \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) + \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ + \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s + \ t \ set (ksReadyQueues s (d, p)) + )\ + threadSet f t + \\rv. valid_queues_no_bitmap \" + apply (simp add: threadSet_def) + apply wp + apply (simp add: Invariants_H.valid_queues_no_bitmap_def' pred_tcb_at'_def) + + apply (wp setObject_queues_unchanged_tcb + hoare_Ball_helper + hoare_vcg_all_lift + setObject_tcb_strongest)[1] + apply (wp getObject_tcb_wp) + apply (clarsimp simp: valid_queues_no_bitmap_def' pred_tcb_at'_def) + apply (clarsimp simp: obj_at'_def) + apply (fastforce) + done + +lemma threadSet_valid_bitmapQ[wp]: + "\ valid_bitmapQ \ threadSet f t \ \rv. valid_bitmapQ \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_valid_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ threadSet f t \ \rv. bitmapQ_no_L1_orphans \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_valid_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ threadSet f t \ \rv. bitmapQ_no_L2_orphans \" + unfolding bitmapQ_defs threadSet_def + by (clarsimp simp: setObject_def split_def) + (wp | simp add: updateObject_default_def)+ + +lemma threadSet_valid_queues: + "\Invariants_H.valid_queues and + (\s. \d p. (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ + \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) + \ obj_at' (\tcb. (inQ d p tcb \ runnable' (tcbState tcb)) \ + \(inQ d p (f tcb) \ runnable' (tcbState (f tcb)))) t s + \ t \ set (ksReadyQueues s (d, p)) + )\ + threadSet f t + \\rv. Invariants_H.valid_queues\" + unfolding valid_queues_def + by (wp threadSet_valid_queues_no_bitmap;simp) + +definition + addToQs :: "(Structures_H.tcb \ Structures_H.tcb) + \ machine_word \ (domain \ priority \ machine_word list) + \ (domain \ priority \ machine_word list)" +where + "addToQs F t \ \qs (qdom, prio). if (\ko. \ inQ qdom prio (F ko)) + then t # qs (qdom, prio) + else qs (qdom, prio)" + +lemma addToQs_set_def: + "(t' \ set (addToQs F t qs (qdom, prio))) = (t' \ set (qs (qdom, prio)) + \ (t' = t \ (\ko. \ inQ qdom prio (F ko))))" + by (auto simp add: addToQs_def) + +lemma threadSet_valid_queues_addToQs: + "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko + \ t \ set (ksReadyQueues s (qdom, prio))) + \ valid_queues' (ksReadyQueues_update (addToQs F t) s)\ + threadSet F t + \\rv. valid_queues'\" + apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def + split del: if_split) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ + apply (wp getObject_tcb_wp updateObject_default_inv + | simp split del: if_split)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def objBits_simps addToQs_set_def + split del: if_split cong: if_cong) + apply (fastforce split: if_split_asm) + done + +lemma threadSet_valid_queues_Qf: + "\\s. (\ko qdom prio. ko_at' ko t s \ inQ qdom prio (F ko) \ \ inQ qdom prio ko + \ t \ set (ksReadyQueues s (qdom, prio))) + \ valid_queues' (ksReadyQueues_update Qf s) + \ (\prio. set (Qf (ksReadyQueues s) prio) + \ set (addToQs F t (ksReadyQueues s) prio))\ + threadSet F t + \\rv. valid_queues'\" + apply (wp threadSet_valid_queues_addToQs) + apply (clarsimp simp: valid_queues'_def subset_iff) + done + +lemma addToQs_subset: + "set (qs p) \ set (addToQs F t qs p)" +by (clarsimp simp: addToQs_def split_def) + +lemmas threadSet_valid_queues' + = threadSet_valid_queues_Qf + [where Qf=id, simplified ksReadyQueues_update_id + id_apply addToQs_subset simp_thms] + +lemma threadSet_cur: + "\\s. cur_tcb' s\ threadSet f t \\rv s. cur_tcb' s\" + apply (simp add: threadSet_def cur_tcb'_def) + apply (wp hoare_lift_Pf [OF setObject_tcb_at'] setObject_ct_inv) + done + +lemma modifyReadyQueuesL1Bitmap_obj_at[wp]: + "\obj_at' P t\ modifyReadyQueuesL1Bitmap a b \\rv. obj_at' P t\" + apply (simp add: modifyReadyQueuesL1Bitmap_def getReadyQueuesL1Bitmap_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +crunches setThreadState, setBoundNotification + for valid_arch' [wp]: valid_arch_state' + (simp: unless_def crunch_simps) + +crunch ksInterrupt'[wp]: threadSet "\s. P (ksInterruptState s)" + (wp: setObject_ksInterrupt updateObject_default_inv) + +crunch ksArchState[wp]: threadSet "\s. P (ksArchState s)" + +lemma threadSet_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ threadSet t F \\rv s. P (typ_at' T p s)\" + by (simp add: threadSet_def, wp setObject_typ_at') + +lemmas threadSet_typ_at_lifts[wp] = typ_at_lifts [OF threadSet_typ_at'] + +crunch irq_states' [wp]: threadSet valid_irq_states' + +crunch pspace_domain_valid [wp]: threadSet "pspace_domain_valid" + +lemma threadSet_obj_at'_really_strongest: + "\\s. tcb_at' t s \ obj_at' (\obj. if t = t' then P (f obj) else P obj) + t' s\ threadSet f t \\rv. obj_at' P t'\" + apply (simp add: threadSet_def) + apply (rule hoare_wp_splits) + apply (rule setObject_tcb_strongest) + apply (simp only: imp_conv_disj) + apply (subst simp_thms(32)[symmetric], rule hoare_vcg_disj_lift) + apply (rule hoare_post_imp [where Q="\rv s. \ tcb_at' t s \ tcb_at' t s"]) + apply simp + apply (subst simp_thms(21)[symmetric], rule hoare_vcg_conj_lift) + apply (rule getObject_inv_tcb) + apply (rule hoare_strengthen_post [OF getObject_ko_at]) + apply simp + apply (simp add: objBits_simps') + apply (erule obj_at'_weakenE) + apply simp + apply (cases "t = t'", simp_all) + apply (rule OMG_getObject_tcb) + apply wp + done + +(* FIXME: move *) +lemma tcb_at_typ_at': + "tcb_at' p s = typ_at' TCBT p s" + unfolding typ_at'_def + apply (rule iffI) + apply (clarsimp simp add: obj_at'_def ko_wp_at'_def) + apply (clarsimp simp add: obj_at'_def ko_wp_at'_def) + apply (case_tac ko; simp) + done + +(* FIXME: move *) +lemma not_obj_at': + "(\obj_at' (\tcb::tcb. P tcb) t s) = (\typ_at' TCBT t s \ obj_at' (Not \ P) t s)" + apply (simp add: obj_at'_real_def typ_at'_def ko_wp_at'_def objBits_simps) + apply (rule iffI) + apply (clarsimp) + apply (case_tac ko) + apply (clarsimp)+ + done + +(* FIXME: move *) +lemma not_obj_at_elim': + assumes typat: "typ_at' TCBT t s" + and nobj: "\obj_at' (\tcb::tcb. P tcb) t s" + shows "obj_at' (Not \ P) t s" + using assms + apply - + apply (drule not_obj_at' [THEN iffD1]) + apply (clarsimp) + done + +(* FIXME: move *) +lemmas tcb_at_not_obj_at_elim' = not_obj_at_elim' [OF tcb_at_typ_at' [THEN iffD1]] + +(* FIXME: move *) +lemma lift_neg_pred_tcb_at': + assumes typat: "\P T p. \\s. P (typ_at' T p s)\ f \\_ s. P (typ_at' T p s)\" + and sttcb: "\S p. \pred_tcb_at' proj S p\ f \\_. pred_tcb_at' proj S p\" + shows "\\s. P (pred_tcb_at' proj S p s)\ f \\_ s. P (pred_tcb_at' proj S p s)\" + apply (rule_tac P=P in P_bool_lift) + apply (rule sttcb) + apply (simp add: pred_tcb_at'_def not_obj_at') + apply (wp hoare_convert_imp) + apply (rule typat) + prefer 2 + apply assumption + apply (rule hoare_chain [OF sttcb]) + apply (fastforce simp: pred_tcb_at'_def comp_def) + apply (clarsimp simp: pred_tcb_at'_def elim!: obj_at'_weakenE) + done + +lemma threadSet_obj_at'_strongish[wp]: + "\obj_at' (\obj. if t = t' then P (f obj) else P obj) t'\ + threadSet f t \\rv. obj_at' P t'\" + by (simp add: hoare_weaken_pre [OF threadSet_obj_at'_really_strongest]) + +lemma threadSet_pred_tcb_no_state: + assumes "\tcb. proj (tcb_to_itcb' (f tcb)) = proj (tcb_to_itcb' tcb)" + shows "\\s. P (pred_tcb_at' proj P' t' s)\ threadSet f t \\rv s. P (pred_tcb_at' proj P' t' s)\" +proof - + have pos: "\P' t' t. + \pred_tcb_at' proj P' t'\ threadSet f t \\rv. pred_tcb_at' proj P' t'\" + apply (simp add: pred_tcb_at'_def) + apply (wp threadSet_obj_at'_strongish) + apply clarsimp + apply (erule obj_at'_weakenE) + apply (insert assms) + apply clarsimp + done + show ?thesis + apply (rule_tac P=P in P_bool_lift) + apply (rule pos) + apply (rule_tac Q="\_ s. \ tcb_at' t' s \ pred_tcb_at' proj (\tcb. \ P' tcb) t' s" + in hoare_post_imp) + apply (erule disjE) + apply (clarsimp dest!: pred_tcb_at') + apply (clarsimp) + apply (frule_tac P=P' and Q="\tcb. \ P' tcb" in pred_tcb_at_conj') + apply (clarsimp)+ + apply (wp hoare_convert_imp) + apply (simp add: typ_at_tcb' [symmetric]) + apply (wp pos)+ + apply (clarsimp simp: pred_tcb_at'_def not_obj_at' elim!: obj_at'_weakenE) + done +qed + +lemma threadSet_ct[wp]: + "\\s. P (ksCurThread s)\ threadSet f t \\rv s. P (ksCurThread s)\" + apply (simp add: threadSet_def) + apply (wp setObject_ct_inv) + done + +lemma threadSet_cd[wp]: + "\\s. P (ksCurDomain s)\ threadSet f t \\rv s. P (ksCurDomain s)\" + apply (simp add: threadSet_def) + apply (wp setObject_cd_inv) + done + + +lemma threadSet_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ threadSet f t \\rv s. P (ksDomSchedule s)\" + apply (simp add: threadSet_def) + apply (wp setObject_ksDomSchedule_inv) + done + +lemma threadSet_it[wp]: + "\\s. P (ksIdleThread s)\ threadSet f t \\rv s. P (ksIdleThread s)\" + apply (simp add: threadSet_def) + apply (wp setObject_it_inv) + done + +lemma threadSet_sch_act: + "(\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb) \ + \\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (wp sch_act_wf_lift threadSet_pred_tcb_no_state | simp add: tcb_in_cur_domain'_def)+ + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish | simp)+ + done + +lemma threadSet_sch_actT_P: + assumes z: "\ P \ (\tcb. tcbState (F tcb) = tcbState tcb + \ tcbDomain (F tcb) = tcbDomain tcb)" + assumes z': "P \ (\tcb. tcbState (F tcb) = Inactive \ tcbDomain (F tcb) = tcbDomain tcb ) + \ (\st. Q st \ st = Inactive)" + shows "\\s. sch_act_wf (ksSchedulerAction s) s \ (P \ st_tcb_at' Q t s)\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + using z z' + apply (case_tac P, simp_all add: threadSet_sch_act) + apply (clarsimp simp: valid_def) + apply (frule_tac P1="\sa. sch_act_wf sa s" + in use_valid [OF _ threadSet_nosch], assumption) + apply (frule_tac P1="(=) (ksCurThread s)" + in use_valid [OF _ threadSet_ct], rule refl) + apply (frule_tac P1="(=) (ksCurDomain s)" + in use_valid [OF _ threadSet_cd], rule refl) + apply (case_tac "ksSchedulerAction b", + simp_all add: ct_in_state'_def pred_tcb_at'_def) + apply (subgoal_tac "t \ ksCurThread s") + apply (drule_tac t'1="ksCurThread s" + and P1="activatable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def) + apply (clarsimp simp: o_def) + apply (fastforce simp: obj_at'_def) + apply (rename_tac word) + apply (subgoal_tac "t \ word") + apply (frule_tac t'1=word + and P1="runnable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def) + apply (rule conjI) + apply (clarsimp simp: o_def) + apply (clarsimp simp: tcb_in_cur_domain'_def) + apply (frule_tac t'1=word + and P1="\tcb. ksCurDomain b = tcbDomain tcb" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def)+ + apply (fastforce simp: obj_at'_def) + done + +lemma threadSet_ksMachine[wp]: + "\\s. P (ksMachineState s)\ threadSet F t \\_ s. P (ksMachineState s)\" + apply (simp add: threadSet_def) + by (wp setObject_ksMachine updateObject_default_inv | + simp)+ + +lemma threadSet_vms'[wp]: + "\valid_machine_state'\ threadSet F t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + by (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + +lemma threadSet_not_inQ: + "\ct_not_inQ and (\s. (\tcb. tcbQueued (F tcb) \ \ tcbQueued tcb) + \ ksSchedulerAction s = ResumeCurrentThread + \ t \ ksCurThread s)\ + threadSet F t \\_. ct_not_inQ\" + apply (simp add: threadSet_def ct_not_inQ_def) + apply (wp) + apply (rule hoare_convert_imp [OF setObject_nosch]) + apply (rule updateObject_tcb_inv) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + +lemma threadSet_invs_trivial_helper[simp]: + "{r \ state_refs_of' s t. snd r \ TCBBound} + \ {r \ state_refs_of' s t. snd r = TCBBound} = state_refs_of' s t" + by auto + +lemma threadSet_ct_idle_or_in_cur_domain': + "(\tcb. tcbDomain (F tcb) = tcbDomain tcb) \ \ct_idle_or_in_cur_domain'\ threadSet F t \\_. ct_idle_or_in_cur_domain'\" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| simp)+ + done + +crunch ksDomScheduleIdx[wp]: threadSet "\s. P (ksDomScheduleIdx s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) +crunch gsUntypedZeroRanges[wp]: threadSet "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma setObject_tcb_ksDomScheduleIdx [wp]: + "\\s. P (ksDomScheduleIdx s) \ setObject t (v::tcb) \\_ s. P (ksDomScheduleIdx s)\" + apply (simp add:setObject_def updateObject_default_def in_monad) + apply wpsimp + done + +lemma threadSet_valid_dom_schedule': + "\ valid_dom_schedule'\ threadSet F t \\_. valid_dom_schedule'\" + unfolding threadSet_def + by (wp setObject_ksDomSchedule_inv hoare_Ball_helper) + +lemma threadSet_invs_trivialT: + assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes r: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows + "\\s. invs' s \ + (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ + (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ + ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ + (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ + threadSet F t + \\rv. invs'\" +proof - + from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast + note threadSet_sch_actT_P[where P=False, simplified] + have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ + valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" + by (auto simp: z) + show ?thesis + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (rule hoare_pre) + apply (wp x w v u b + threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_valid_queues + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_valid_queues' + threadSet_cur + untyped_ranges_zero_lift + |clarsimp simp: y z a r domains cteCaps_of_def valid_arch_tcb'_def | rule refl)+ + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) + by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) +qed + +lemmas threadSet_invs_trivial = + threadSet_invs_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma zobj_refs'_capRange: + "s \' cap \ zobj_refs' cap \ capRange cap" + apply (cases cap; simp add: valid_cap'_def capAligned_def capRange_def is_aligned_no_overflow) + apply (rename_tac aobj_cap) + apply (case_tac aobj_cap; clarsimp dest!: is_aligned_no_overflow) + done + +lemma global'_no_ex_cap: + "\valid_global_refs' s; valid_pspace' s\ \ \ ex_nonz_cap_to' (ksIdleThread s) s" + apply (clarsimp simp: ex_nonz_cap_to'_def valid_global_refs'_def valid_refs'_def2 valid_pspace'_def) + apply (drule cte_wp_at_norm', clarsimp) + apply (frule(1) cte_wp_at_valid_objs_valid_cap', clarsimp) + apply (clarsimp simp: cte_wp_at'_def dest!: zobj_refs'_capRange, blast) + done + +lemma getObject_tcb_sp: + "\P\ getObject r \\t::tcb. P and ko_at' t r\" + by (wp getObject_obj_at'; simp) + +lemma threadSet_valid_objs': + "\valid_objs' and (\s. \tcb. valid_tcb' tcb s \ valid_tcb' (f tcb) s)\ + threadSet f t + \\rv. valid_objs'\" + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + prefer 2 + apply assumption + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (clarsimp elim!: obj_at'_weakenE) + done + +lemma atcbVCPUPtr_atcbContextSet_id[simp]: + "atcbVCPUPtr (atcbContextSet f (tcbArch tcb)) = atcbVCPUPtr (tcbArch tcb)" + by (simp add: atcbContextSet_def) + +lemma asUser_corres': + assumes y: "corres_underlying Id False True r \ \ f g" + shows "corres r (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t f) (asUser t g)" +proof - + note arch_tcb_context_get_def[simp] + note atcbContextGet_def[simp] + note arch_tcb_context_set_def[simp] + note atcbContextSet_def[simp] + have L1: "corres (\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (gets_the (get_tcb t)) (threadGet (atcbContextGet o tcbArch) t)" + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_gets_the) + apply (simp add: threadGet_def) + apply (rule corres_rel_imp [OF corres_get_tcb]) + apply (simp add: tcb_relation_def arch_tcb_relation_def) + apply (simp add: tcb_at_def)+ + done + have L2: "\tcb tcb' con con'. \ tcb_relation tcb tcb'; con = con'\ + \ tcb_relation (tcb \ tcb_arch := arch_tcb_context_set con (tcb_arch tcb) \) + (tcb' \ tcbArch := atcbContextSet con' (tcbArch tcb') \)" + by (simp add: tcb_relation_def arch_tcb_relation_def) + have L3: "\r add tcb tcb' con con'. \ r () (); con = con'\ \ + corres r (\s. get_tcb add s = Some tcb) + (\s'. (tcb', s') \ fst (getObject add s')) + (set_object add (TCB (tcb \ tcb_arch := arch_tcb_context_set con (tcb_arch tcb) \))) + (setObject add (tcb' \ tcbArch := atcbContextSet con' (tcbArch tcb') \))" + by (rule setObject_update_TCB_corres [OF L2], + (simp add: tcb_cte_cases_def tcb_cap_cases_def cteSizeBits_def exst_same_def)+) + have L4: "\con con'. con = con' \ + corres (\(irv, nc) (irv', nc'). r irv irv' \ nc = nc') + \ \ (select_f (f con)) (select_f (g con'))" + using y + by (fastforce simp: corres_underlying_def select_f_def split_def Id_def) + show ?thesis + apply (rule corres_cross_over_guard[where Q="tcb_at' t"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp add: as_user_def asUser_def) + apply (rule corres_guard_imp) + apply (rule_tac r'="\tcb con. (arch_tcb_context_get o tcb_arch) tcb = con" + in corres_split) + apply simp + apply (rule L1[simplified]) + apply (rule corres_split[OF L4]) + apply simp + apply clarsimp + apply (rule corres_split_nor) + apply (simp add: threadSet_def) + apply (rule corres_symb_exec_r) + apply (rule L3[simplified]) + prefer 5 + apply (rule no_fail_pre_and, wp) + apply (wp select_f_inv | simp)+ + done +qed + +lemma asUser_corres: + assumes y: "corres_underlying Id False True r \ \ f g" + shows "corres r (tcb_at t and invs) (tcb_at' t and invs') (as_user t f) (asUser t g)" + apply (rule corres_guard_imp) + apply (rule asUser_corres' [OF y]) + apply (simp add: invs_def valid_state_def valid_pspace_def) + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + done + +lemma asUser_inv: + assumes x: "\P. \P\ f \\x. P\" + shows "\P\ asUser t f \\x. P\" +proof - + have P: "\a b input. (a, b) \ fst (f input) \ b = input" + by (rule use_valid [OF _ x], assumption, rule refl) + have R: "\x. tcbArch_update (\_. tcbArch x) x = x" + by (case_tac x, simp) + show ?thesis + apply (simp add: asUser_def split_def threadGet_def threadSet_def + liftM_def bind_assoc) + apply (clarsimp simp: valid_def in_monad getObject_def setObject_def + loadObject_default_def objBits_simps' + modify_def split_def updateObject_default_def + in_magnitude_check select_f_def + dest!: P) + apply (simp add: R map_upd_triv) + done +qed + +lemma asUser_getRegister_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (getRegister r)) (asUser t (getRegister r))" + apply (rule asUser_corres') + apply (clarsimp simp: getRegister_def) + done + +lemma user_getreg_inv'[wp]: + "\P\ asUser t (getRegister r) \\x. P\" + apply (rule asUser_inv) + apply (simp_all add: getRegister_def) + done + +lemma asUser_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ asUser t' f \\rv s. P (typ_at' T p s)\" + by (simp add: asUser_def bind_assoc split_def, wp select_f_inv) + +lemmas asUser_typ_ats[wp] = typ_at_lifts [OF asUser_typ_at'] + +lemma asUser_invs[wp]: + "\invs' and tcb_at' t\ asUser t m \\rv. invs'\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_invs_trivial hoare_drop_imps | simp)+ + done + +lemma asUser_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ asUser t m \\rv s. P (ksSchedulerAction s)\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + done + +crunch aligned'[wp]: asUser pspace_aligned' + (simp: crunch_simps wp: crunch_wps) +crunch distinct'[wp]: asUser pspace_distinct' + (simp: crunch_simps wp: crunch_wps) + +lemma asUser_valid_objs [wp]: + "\valid_objs'\ asUser t f \\rv. valid_objs'\" + by (simp add: asUser_def split_def) + (wpsimp wp: threadSet_valid_objs' hoare_drop_imps + simp: valid_tcb'_def tcb_cte_cases_def valid_arch_tcb'_def cteSizeBits_def + atcbContextSet_def)+ + +lemma asUser_valid_pspace'[wp]: + "\valid_pspace'\ asUser t m \\rv. valid_pspace'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_valid_pspace' hoare_drop_imps + simp: atcbContextSet_def valid_arch_tcb'_def)+ + done + +lemma asUser_valid_queues[wp]: + "\Invariants_H.valid_queues\ asUser t m \\rv. Invariants_H.valid_queues\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_valid_queues hoare_drop_imps | simp)+ + done + +lemma asUser_ifunsafe'[wp]: + "\if_unsafe_then_cap'\ asUser t m \\rv. if_unsafe_then_cap'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_ifunsafe' hoare_drop_imps | simp)+ + done + +lemma asUser_st_refs_of'[wp]: + "\\s. P (state_refs_of' s)\ + asUser t m + \\rv s. P (state_refs_of' s)\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_state_refs_of' hoare_drop_imps | simp)+ + done + +lemma asUser_st_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ + asUser t m + \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_state_hyp_refs_of' hoare_drop_imps | simp add: atcbContextSet_def atcbVCPUPtr_atcbContext_update)+ + done + +lemma asUser_iflive'[wp]: + "\if_live_then_nonz_cap'\ asUser t m \\rv. if_live_then_nonz_cap'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_iflive' hoare_drop_imps | clarsimp | auto)+ + done + +lemma asUser_cur_tcb[wp]: + "\cur_tcb'\ asUser t m \\rv. cur_tcb'\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_cur hoare_drop_imps | simp)+ + done + +lemma asUser_cte_wp_at'[wp]: + "\cte_wp_at' P p\ asUser t m \\rv. cte_wp_at' P p\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_cte_wp_at' hoare_drop_imps | simp)+ + done + +lemma asUser_cap_to'[wp]: + "\ex_nonz_cap_to' p\ asUser t m \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma asUser_pred_tcb_at' [wp]: + "\pred_tcb_at' proj P t\ asUser t' f \\_. pred_tcb_at' proj P t\" + apply (simp add: asUser_def split_def) + apply (wp threadSet_pred_tcb_no_state) + apply (case_tac tcb) + apply (simp add: tcb_to_itcb'_def) + apply (wpsimp wp: select_f_inv)+ + done + +crunches asUser + for ct[wp]: "\s. P (ksCurThread s)" + and cur_domain[wp]: "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: hoare_drop_imps getObject_inv_tcb setObject_ct_inv) + +lemma asUser_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ asUser t m \\_. tcb_in_cur_domain' t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp | wpc | simp)+ + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma asUser_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbDomain tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbDomain tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma asUser_tcbPriority_inv[wp]: + "\obj_at' (\tcb. P (tcbPriority tcb)) t'\ asUser t m \\_. obj_at' (\tcb. P (tcbPriority tcb)) t'\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma asUser_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + asUser t m \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift) + +lemma asUser_idle'[wp]: + "\valid_idle'\ asUser t m \\rv. valid_idle'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_idle' select_f_inv) + done + +lemma no_fail_asUser [wp]: + "no_fail \ f \ no_fail (tcb_at' t) (asUser t f)" + apply (simp add: asUser_def split_def) + apply wp + apply (simp add: no_fail_def) + apply (wp hoare_drop_imps) + apply simp + done + +lemma asUser_setRegister_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (as_user t (setRegister r v)) + (asUser t (setRegister r v))" + apply (simp add: setRegister_def) + apply (rule asUser_corres') + apply (rule corres_modify'; simp) + done + +lemma getThreadState_corres: + "corres thread_state_relation (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (getThreadState t)" + apply (simp add: get_thread_state_def getThreadState_def) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + done + +lemma gts_wf'[wp]: "\tcb_at' t and invs'\ getThreadState t \valid_tcb_state'\" + apply (simp add: getThreadState_def threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply clarsimp + apply (drule obj_at_ko_at', clarsimp) + apply (frule ko_at_valid_objs', fastforce, simp) + apply (fastforce simp: valid_obj'_def valid_tcb'_def) + done + +lemma gts_st_tcb_at'[wp]: "\st_tcb_at' P t\ getThreadState t \\rv s. P rv\" + apply (simp add: getThreadState_def threadGet_def liftM_def) + apply wp + apply (rule hoare_chain) + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + apply assumption + apply simp + apply (simp add: pred_tcb_at'_def) + done + +lemma gts_inv'[wp]: "\P\ getThreadState t \\rv. P\" + by (simp add: getThreadState_def) wp + +lemma getBoundNotification_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_bound_notification t) (getBoundNotification t)" + apply (simp add: get_bound_notification_def getBoundNotification_def) + apply (rule threadGet_corres) + apply (simp add: tcb_relation_def) + done + +lemma gbn_bound_tcb_at'[wp]: "\bound_tcb_at' P t\ getBoundNotification t \\rv s. P rv\" + apply (simp add: getBoundNotification_def threadGet_def liftM_def) + apply wp + apply (rule hoare_strengthen_post) + apply (rule obj_at_getObject) + apply (clarsimp simp: loadObject_default_def in_monad) + apply simp + apply (simp add: pred_tcb_at'_def) + done + +lemma gbn_inv'[wp]: "\P\ getBoundNotification t \\rv. P\" + by (simp add: getBoundNotification_def) wp + +lemma isStopped_def2: + "isStopped t = liftM (Not \ activatable') (getThreadState t)" + apply (unfold isStopped_def fun_app_def) + apply (fold liftM_def) + apply (rule arg_cong [where f="\f. liftM f (getThreadState t)"]) + apply (rule ext) + apply (simp split: Structures_H.thread_state.split) + done + +lemma isRunnable_def2: + "isRunnable t = liftM runnable' (getThreadState t)" + apply (simp add: isRunnable_def isStopped_def2 liftM_def) + apply (rule bind_eqI, rule ext, rule arg_cong) + apply (case_tac state) + apply (clarsimp)+ + done + +lemma isStopped_inv[wp]: + "\P\ isStopped t \\rv. P\" + by (simp add: isStopped_def2 | wp gts_inv')+ + +lemma isRunnable_inv[wp]: + "\P\ isRunnable t \\rv. P\" + by (simp add: isRunnable_def2 | wp gts_inv')+ + +lemma isRunnable_wp[wp]: + "\\s. Q (st_tcb_at' (runnable') t s) s\ isRunnable t \Q\" + apply (simp add: isRunnable_def2) + apply (wpsimp simp: getThreadState_def threadGet_def wp: getObject_tcb_wp) + apply (clarsimp simp: getObject_def valid_def in_monad st_tcb_at'_def + loadObject_default_def obj_at'_def + split_def objBits_simps in_magnitude_check) + done + +lemma setQueue_obj_at[wp]: + "\obj_at' P t\ setQueue d p q \\rv. obj_at' P t\" + apply (simp add: setQueue_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +lemma setQueue_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + setQueue d p ts + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setQueue_def) + apply wp + apply simp + done + +lemma gq_wp[wp]: "\\s. Q (ksReadyQueues s (d, p)) s\ getQueue d p \Q\" + by (simp add: getQueue_def, wp) + +lemma no_fail_getQueue [wp]: + "no_fail \ (getQueue d p)" + by (simp add: getQueue_def) + +lemma no_fail_setQueue [wp]: + "no_fail \ (setQueue d p xs)" + by (simp add: setQueue_def) + +lemma in_magnitude_check': + "\ is_aligned x n; (1 :: machine_word) < 2 ^ n; ksPSpace s x = Some y; ps = ksPSpace s \ + \ ((v, s') \ fst (magnitudeCheck x (snd (lookupAround2 x ps)) n s)) = + (s' = s \ ps_clear x n s)" + by (simp add: in_magnitude_check) + +lemma cdt_relation_trans_state[simp]: + "cdt_relation (swp cte_at (trans_state f s)) m m' = cdt_relation (swp cte_at s) m m'" + by (simp add: cdt_relation_def) + + +lemma getObject_obj_at_tcb: + "\obj_at' (\t. P t t) p\ getObject p \\t::tcb. obj_at' (P t) p\" + apply (wp getObject_tcb_wp) + apply (drule obj_at_ko_at') + apply clarsimp + apply (rule exI, rule conjI, assumption) + apply (erule obj_at'_weakenE) + apply simp + done + +lemma threadGet_obj_at': + "\obj_at' (\t. P (f t) t) t\ threadGet f t \\rv. obj_at' (P rv) t\" + by (simp add: threadGet_def o_def | wp getObject_obj_at_tcb)+ + +lemma fun_if_triv[simp]: + "(\x. if x = y then f y else f x) = f" + by (force) + +lemma corres_get_etcb: + "corres (etcb_relation) (is_etcb_at t) (tcb_at' t) + (gets_the (get_etcb t)) (getObject t)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp add: get_etcb_def gets_the_def gets_def + get_def assert_opt_def bind_def + return_def fail_def + split: option.splits + ) + apply (frule in_inv_by_hoareD [OF getObject_inv_tcb]) + apply (clarsimp simp add: is_etcb_at_def obj_at'_def projectKO_def + projectKO_opt_tcb split_def + getObject_def loadObject_default_def in_monad) + apply (case_tac bb) + apply (simp_all add: fail_def return_def) + apply (clarsimp simp add: state_relation_def ekheap_relation_def) + apply (drule bspec) + apply clarsimp + apply blast + apply (clarsimp simp add: other_obj_relation_def lookupAround2_known1) + done + + +lemma ethreadget_corres: + assumes x: "\etcb tcb'. etcb_relation etcb tcb' \ r (f etcb) (f' tcb')" + shows "corres r (is_etcb_at t) (tcb_at' t) (ethread_get f t) (threadGet f' t)" + apply (simp add: ethread_get_def threadGet_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule corres_get_etcb) + apply (simp add: x) + done + +lemma setQueue_corres: + "corres dc \ \ (set_tcb_queue d p q) (setQueue d p q)" + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: setQueue_def in_monad set_tcb_queue_def return_def simpler_modify_def) + apply (fastforce simp: state_relation_def ready_queues_relation_def) + done + + +lemma getQueue_corres: "corres (=) \ \ (get_tcb_queue qdom prio) (getQueue qdom prio)" + apply (clarsimp simp add: getQueue_def state_relation_def ready_queues_relation_def get_tcb_queue_def gets_def) + apply (fold gets_def) + apply simp + done + +lemma no_fail_return: + "no_fail x (return y)" + by wp + +lemma addToBitmap_noop_corres: + "corres dc \ \ (return ()) (addToBitmap d p)" + unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def getReadyQueuesL1Bitmap_def + modifyReadyQueuesL2Bitmap_def getReadyQueuesL2Bitmap_def + by (rule corres_noop) + (wp | simp add: state_relation_def | rule no_fail_pre)+ + +lemma addToBitmap_if_null_noop_corres: (* used this way in Haskell code *) + "corres dc \ \ (return ()) (if null queue then addToBitmap d p else return ())" + by (cases "null queue", simp_all add: addToBitmap_noop_corres) + +lemma removeFromBitmap_corres_noop: + "corres dc \ \ (return ()) (removeFromBitmap tdom prioa)" + unfolding removeFromBitmap_def + by (rule corres_noop) + (wp | simp add: bitmap_fun_defs state_relation_def | rule no_fail_pre)+ + +crunch typ_at'[wp]: addToBitmap "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps setCTE_typ_at') + +crunch typ_at'[wp]: removeFromBitmap "\s. P (typ_at' T p s)" + (wp: hoare_drop_imps setCTE_typ_at') + +lemmas addToBitmap_typ_ats [wp] = typ_at_lifts [OF addToBitmap_typ_at'] +lemmas removeFromBitmap_typ_ats [wp] = typ_at_lifts [OF removeFromBitmap_typ_at'] + +lemma tcbSchedEnqueue_corres: + "corres dc (tcb_at t and is_etcb_at t and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues') + (tcb_sched_action (tcb_sched_enqueue) t) (tcbSchedEnqueue t)" +proof - + have ready_queues_helper: + "\t tcb a b. \ ekheap a t = Some tcb; obj_at' tcbQueued t b ; valid_queues' b ; + ekheap_relation (ekheap a) (ksPSpace b) \ + \ t \ set (ksReadyQueues b (tcb_domain tcb, tcb_priority tcb))" + unfolding valid_queues'_def + by (fastforce dest: ekheap_relation_absD simp: obj_at'_def inQ_def etcb_relation_def) + + show ?thesis unfolding tcbSchedEnqueue_def tcb_sched_action_def + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, + where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) + defer + apply (wp threadGet_obj_at'; simp_all) + apply (rule no_fail_pre, wp, blast) + apply (case_tac queued; simp_all) + apply (rule corres_no_failI; simp add: no_fail_return) + apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc + assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def + set_tcb_queue_def simpler_modify_def ready_queues_relation_def + state_relation_def tcb_sched_enqueue_def) + apply (rule ready_queues_helper; auto) + apply (clarsimp simp: when_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[where r'="(=)", OF ethreadget_corres]) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)", OF ethreadget_corres]) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply simp + apply (rule getQueue_corres) + apply (rule corres_split_noop_rhs2) + apply simp + apply (simp add: tcb_sched_enqueue_def split del: if_split) + apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) + apply simp + apply (rule setQueue_corres[unfolded dc_def]) + apply (rule corres_split_noop_rhs2) + apply (fastforce intro: addToBitmap_noop_corres) + apply (fastforce intro: threadSet_corres_noop simp: tcb_relation_def exst_same_def) + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def + project_inject) + done +qed + +definition + weak_sch_act_wf :: "scheduler_action \ kernel_state \ bool" +where + "weak_sch_act_wf sa = (\s. \t. sa = SwitchToThread t \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s)" + +lemma weak_sch_act_wf_updateDomainTime[simp]: + "weak_sch_act_wf m (ksDomainTime_update f s) = weak_sch_act_wf m s" + by (simp add:weak_sch_act_wf_def tcb_in_cur_domain'_def ) + +lemma setSchedulerAction_corres: + "sched_act_relation sa sa' + \ corres dc \ \ (set_scheduler_action sa) (setSchedulerAction sa')" + apply (simp add: setSchedulerAction_def set_scheduler_action_def) + apply (rule corres_no_failI) + apply wp + apply (clarsimp simp: in_monad simpler_modify_def state_relation_def) + done + +lemma getSchedulerAction_corres: + "corres sched_act_relation \ \ (gets scheduler_action) getSchedulerAction" + apply (simp add: getSchedulerAction_def) + apply (clarsimp simp: state_relation_def) + done + +lemma rescheduleRequired_corres: + "corres dc (weak_valid_sched_action and valid_etcbs and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues') + (reschedule_required) rescheduleRequired" + apply (simp add: rescheduleRequired_def reschedule_required_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule_tac P="case action of switch_thread t \ P t | _ \ \" + and P'="case actiona of SwitchToThread t \ P' t | _ \ \" for P P' + in corres_split[where r'=dc]) + apply (case_tac action) + apply simp + apply simp + apply (rule tcbSchedEnqueue_corres) + apply simp + apply (rule setSchedulerAction_corres) + apply simp + apply (wp | wpc | simp)+ + apply (force dest: st_tcb_weakenE simp: in_monad weak_valid_sched_action_def valid_etcbs_def st_tcb_at_def obj_at_def is_tcb + split: Deterministic_A.scheduler_action.split) + apply (clarsimp split: scheduler_action.splits) + done + +lemma rescheduleRequired_corres_simple: + "corres dc \ sch_act_simple + (set_scheduler_action choose_new_thread) rescheduleRequired" + apply (simp add: rescheduleRequired_def) + apply (rule corres_symb_exec_r[where Q'="\rv s. rv = ResumeCurrentThread \ rv = ChooseNewThread"]) + apply (rule corres_symb_exec_r) + apply (rule setSchedulerAction_corres, simp) + apply (wp | clarsimp split: scheduler_action.split)+ + apply (wp | clarsimp simp: sch_act_simple_def split: scheduler_action.split)+ + apply (simp add: getSchedulerAction_def) + done + +lemma weak_sch_act_wf_lift: + assumes pre: "\P. \\s. P (sa s)\ f \\rv s. P (sa s)\" + "\t. \st_tcb_at' runnable' t\ f \\rv. st_tcb_at' runnable' t\" + "\t. \tcb_in_cur_domain' t\ f \\rv. tcb_in_cur_domain' t\" + shows "\\s. weak_sch_act_wf (sa s) s\ f \\rv s. weak_sch_act_wf (sa s) s\" + apply (simp only: weak_sch_act_wf_def imp_conv_disj) + apply (intro hoare_vcg_all_lift hoare_vcg_conj_lift hoare_vcg_disj_lift pre | simp)+ + done + +lemma asUser_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + asUser t m \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (wp weak_sch_act_wf_lift) + +lemma doMachineOp_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + doMachineOp m \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + by (simp add: doMachineOp_def split_def tcb_in_cur_domain'_def | wp weak_sch_act_wf_lift)+ + +lemma weak_sch_act_wf_setQueue[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s \ + setQueue qdom prio queue + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s \" + by (simp add: setQueue_def weak_sch_act_wf_def tcb_in_cur_domain'_def | wp)+ + +lemma threadSet_tcbDomain_triv: + assumes "\tcb. tcbDomain (f tcb) = tcbDomain tcb" + shows "\tcb_in_cur_domain' t'\ threadSet f t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp add: assms)+ + done + +lemmas threadSet_weak_sch_act_wf + = weak_sch_act_wf_lift[OF threadSet_nosch threadSet_pred_tcb_no_state threadSet_tcbDomain_triv, simplified] + +lemma removeFromBitmap_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ removeFromBitmap d p \\rv s. P (ksSchedulerAction s)\" + unfolding removeFromBitmap_def + by (simp add: bitmap_fun_defs|wp setObject_nosch)+ + +lemma addToBitmap_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ addToBitmap d p \\rv s. P (ksSchedulerAction s)\" + unfolding addToBitmap_def + by (simp add: bitmap_fun_defs|wp setObject_nosch)+ + +lemmas removeFromBitmap_weak_sch_act_wf[wp] + = weak_sch_act_wf_lift[OF removeFromBitmap_nosch] + +lemmas addToBitmap_weak_sch_act_wf[wp] + = weak_sch_act_wf_lift[OF addToBitmap_nosch] + +crunch st_tcb_at'[wp]: removeFromBitmap "st_tcb_at' P t" +crunch pred_tcb_at'[wp]: removeFromBitmap "pred_tcb_at' proj P t" + +crunch not_st_tcb_at'[wp]: removeFromBitmap "\s. \ (st_tcb_at' P' t) s" +crunch not_pred_tcb_at'[wp]: removeFromBitmap "\s. \ (pred_tcb_at' proj P' t) s" + +crunch st_tcb_at'[wp]: addToBitmap "st_tcb_at' P' t" +crunch pred_tcb_at'[wp]: addToBitmap "pred_tcb_at' proj P' t" + +crunch not_st_tcb_at'[wp]: addToBitmap "\s. \ (st_tcb_at' P' t) s" +crunch not_pred_tcb_at'[wp]: addToBitmap "\s. \ (pred_tcb_at' proj P' t) s" + +crunch obj_at'[wp]: removeFromBitmap "obj_at' P t" + +crunch obj_at'[wp]: addToBitmap "obj_at' P t" + +lemma removeFromBitmap_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ removeFromBitmap tdom prio \\ya. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def removeFromBitmap_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp add: bitmap_fun_defs)+ + done + +lemma addToBitmap_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ addToBitmap tdom prio \\ya. tcb_in_cur_domain' t\" + unfolding tcb_in_cur_domain'_def addToBitmap_def + apply (rule_tac f="\s. ksCurDomain s" in hoare_lift_Pf) + apply (wp setObject_cte_obj_at_tcb' | simp add: bitmap_fun_defs)+ + done + +lemma tcbSchedDequeue_weak_sch_act_wf[wp]: + "\ \s. weak_sch_act_wf (ksSchedulerAction s) s \ tcbSchedDequeue a \ \_ s. weak_sch_act_wf (ksSchedulerAction s) s \" + apply (simp add: tcbSchedDequeue_def) + apply (wp threadSet_weak_sch_act_wf removeFromBitmap_weak_sch_act_wf | simp add: crunch_simps)+ + done + +lemma dequeue_nothing_eq[simp]: + "t \ set list \ tcb_sched_dequeue t list = list" + apply (clarsimp simp: tcb_sched_dequeue_def) + apply (induct list) + apply simp + apply clarsimp + done + +lemma gets_the_exec: "f s \ None \ (do x \ gets_the f; g x od) s = g (the (f s)) s" + apply (clarsimp simp add: gets_the_def bind_def gets_def get_def + return_def assert_opt_def) + done + +lemma tcbSchedDequeue_corres: + "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues) + (tcb_sched_action tcb_sched_dequeue t) (tcbSchedDequeue t)" + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp only: tcbSchedDequeue_def tcb_sched_action_def) + apply (rule corres_symb_exec_r[OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and obj_at' (\obj. tcbQueued obj = rv) t"]) + defer + apply (wp threadGet_obj_at', simp, simp) + apply (rule no_fail_pre, wp, simp) + apply (case_tac queued) + defer + apply (simp add: when_def) + apply (rule corres_no_failI) + apply (wp) + apply (clarsimp simp: in_monad ethread_get_def set_tcb_queue_def is_etcb_at_def state_relation_def) + apply (subgoal_tac "t \ set (ready_queues a (tcb_domain y) (tcb_priority y))") + prefer 2 + subgoal by (force simp: tcb_sched_dequeue_def Invariants_H.valid_queues_def valid_queues_no_bitmap_def + ready_queues_relation_def obj_at'_def inQ_def project_inject) + apply (subst gets_the_exec) + apply (simp add: get_etcb_def) + apply (subst gets_the_exec) + apply (simp add: get_etcb_def) + apply (simp add: exec_gets simpler_modify_def get_etcb_def ready_queues_relation_def cong: if_cong get_tcb_queue_def) + apply (simp add: when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres, simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres, simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply (simp, rule getQueue_corres) + apply (rule corres_split_noop_rhs2) + apply (simp add: tcb_sched_dequeue_def) + apply (rule setQueue_corres) + apply (rule corres_split_noop_rhs) + apply (clarsimp, rule removeFromBitmap_corres_noop) + apply (rule threadSet_corres_noop; simp_all add: tcb_relation_def exst_same_def) + apply (wp | simp)+ + done + +lemma thread_get_test: "do cur_ts \ get_thread_state cur; g (test cur_ts) od = + do t \ (thread_get (\tcb. test (tcb_state tcb)) cur); g t od" + apply (simp add: get_thread_state_def thread_get_def) + done + +lemma thread_get_isRunnable_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (\tcb. runnable (tcb_state tcb)) t) (isRunnable t)" + apply (simp add: isRunnable_def getThreadState_def threadGet_def + thread_get_def) + apply (fold liftM_def) + apply simp + apply (rule corres_rel_imp) + apply (rule getObject_TCB_corres) + apply (clarsimp simp add: tcb_relation_def thread_state_relation_def) + apply (case_tac "tcb_state x",simp_all) + done + +lemma setThreadState_corres: + "thread_state_relation ts ts' \ + corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (set_thread_state t ts) (setThreadState ts' t)" + (is "?tsr \ corres dc ?Pre ?Pre' ?sts ?sts'") + apply (simp add: set_thread_state_def setThreadState_def) + apply (simp add: set_thread_state_ext_def[abs_def]) + apply (subst bind_assoc[symmetric], subst thread_set_def[simplified, symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'=dc]) + apply (rule threadset_corres, (simp add: tcb_relation_def exst_same_def)+) + apply (subst thread_get_test[where test="runnable"]) + apply (rule corres_split[OF thread_get_isRunnable_corres]) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (simp only: when_def) + apply (rule corres_if[where Q=\ and Q'=\]) + apply (rule iffI) + apply clarsimp+ + apply (case_tac rva,simp_all)[1] + apply (wp rescheduleRequired_corres_simple corres_return_trivial | simp)+ + apply (wp hoare_vcg_conj_lift[where Q'="\\"] | simp add: sch_act_simple_def)+ + done + +lemma setBoundNotification_corres: + "corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (set_bound_notification t ntfn) (setBoundNotification ntfn t)" + apply (simp add: set_bound_notification_def setBoundNotification_def) + apply (subst thread_set_def[simplified, symmetric]) + apply (rule threadset_corres, simp_all add:tcb_relation_def exst_same_def) + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for tcb'[wp]: "tcb_at' addr" + +crunches rescheduleRequired, removeFromBitmap + for valid_objs'[wp]: valid_objs' + (simp: unless_def crunch_simps) + + +lemma tcbSchedDequeue_valid_objs' [wp]: "\ valid_objs' \ tcbSchedDequeue t \\_. valid_objs' \" + unfolding tcbSchedDequeue_def + apply (wp threadSet_valid_objs') + apply (clarsimp simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + apply wp + apply (simp add: if_apply_def2) + apply (wp hoare_drop_imps) + apply (wp | simp cong: if_cong add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def if_apply_def2)+ + done + +lemma sts_valid_objs': + "\valid_objs' and valid_tcb_state' st\ + setThreadState st t + \\rv. valid_objs'\" + apply (simp add: setThreadState_def setQueue_def isRunnable_def isStopped_def) + apply (wp threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + apply (wp threadSet_valid_objs' | simp)+ + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma sbn_valid_objs': + "\valid_objs' and valid_bound_ntfn' ntfn\ + setBoundNotification ntfn t + \\rv. valid_objs'\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_valid_objs') + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma ssa_wp[wp]: + "\\s. P (s \ksSchedulerAction := sa\)\ setSchedulerAction sa \\_. P\" + by (wpsimp simp: setSchedulerAction_def) + +crunches rescheduleRequired, tcbSchedDequeue + for aligned'[wp]: "pspace_aligned'" + and distinct'[wp]: "pspace_distinct'" + and ctes_of[wp]: "\s. P (ctes_of s)" + +crunches rescheduleRequired, tcbSchedDequeue + for no_0_obj'[wp]: "no_0_obj'" + +lemma sts'_valid_pspace'_inv[wp]: + "\ valid_pspace' and tcb_at' t and valid_tcb_state' st \ + setThreadState st t + \ \rv. valid_pspace' \" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp sts_valid_objs') + apply (simp add: setThreadState_def threadSet_def + setQueue_def bind_assoc valid_mdb'_def) + apply (wp getObject_obj_at_tcb | simp)+ + apply (clarsimp simp: valid_mdb'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (erule obj_at'_weakenE) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + done + +crunch ct[wp]: setQueue "\s. P (ksCurThread s)" + +crunch cur_domain[wp]: setQueue "\s. P (ksCurDomain s)" + +crunch ct'[wp]: addToBitmap "\s. P (ksCurThread s)" +crunch ct'[wp]: removeFromBitmap "\s. P (ksCurThread s)" + +lemma setQueue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ setQueue d p xs \\_. tcb_in_cur_domain' t\" + apply (simp add: setQueue_def tcb_in_cur_domain'_def) + apply wp + apply (simp add: ps_clear_def obj_at'_def) + done + +lemma sbn'_valid_pspace'_inv[wp]: + "\ valid_pspace' and tcb_at' t and valid_bound_ntfn' ntfn \ + setBoundNotification ntfn t + \ \rv. valid_pspace' \" + apply (simp add: valid_pspace'_def) + apply (rule hoare_pre) + apply (wp sbn_valid_objs') + apply (simp add: setBoundNotification_def threadSet_def bind_assoc valid_mdb'_def) + apply (wp getObject_obj_at_tcb | simp)+ + apply (clarsimp simp: valid_mdb'_def) + apply (drule obj_at_ko_at') + apply clarsimp + apply (erule obj_at'_weakenE) + apply (simp add: tcb_cte_cases_def cteSizeBits_def) + done + +crunch pred_tcb_at'[wp]: setQueue "\s. P (pred_tcb_at' proj P' t s)" + +lemma setQueue_sch_act: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setQueue d p xs + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (wp sch_act_wf_lift) + +lemma setQueue_valid_bitmapQ_except[wp]: + "\ valid_bitmapQ_except d p \ + setQueue d p ts + \\_. valid_bitmapQ_except d p \" + unfolding setQueue_def bitmapQ_defs + by (wp, clarsimp simp: bitmapQ_def) + +lemma setQueue_valid_bitmapQ: (* enqueue only *) + "\ valid_bitmapQ and (\s. (ksReadyQueues s (d, p) = []) = (ts = [])) \ + setQueue d p ts + \\_. valid_bitmapQ \" + unfolding setQueue_def bitmapQ_defs + by (wp, clarsimp simp: bitmapQ_def) + +lemma setQueue_valid_queues': + "\valid_queues' and (\s. \t. obj_at' (inQ d p) t s \ t \ set ts)\ + setQueue d p ts \\_. valid_queues'\" + by (wp | simp add: valid_queues'_def setQueue_def)+ + +lemma setQueue_cur: + "\\s. cur_tcb' s\ setQueue d p ts \\rv s. cur_tcb' s\" + unfolding setQueue_def cur_tcb'_def + by (wp, clarsimp) + +lemma ssa_sch_act[wp]: + "\sch_act_wf sa\ setSchedulerAction sa + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + by (simp add: setSchedulerAction_def | wp)+ + +lemma threadSet_runnable_sch_act: + "(\tcb. runnable' (tcbState (F tcb)) \ tcbDomain (F tcb) = tcbDomain tcb \ tcbPriority (F tcb) = tcbPriority tcb) \ + \\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (clarsimp simp: valid_def) + apply (frule_tac P1="(=) (ksSchedulerAction s)" + in use_valid [OF _ threadSet_nosch], + rule refl) + apply (frule_tac P1="(=) (ksCurThread s)" + in use_valid [OF _ threadSet_ct], + rule refl) + apply (frule_tac P1="(=) (ksCurDomain s)" + in use_valid [OF _ threadSet_cd], + rule refl) + apply (case_tac "ksSchedulerAction b", + simp_all add: sch_act_simple_def ct_in_state'_def pred_tcb_at'_def) + apply (drule_tac t'1="ksCurThread s" + and P1="activatable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp elim!: obj_at'_weakenE) + apply (simp add: o_def) + apply (rename_tac word) + apply (rule conjI) + apply (frule_tac t'1=word + and P1="runnable' \ tcbState" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp elim!: obj_at'_weakenE, clarsimp simp: obj_at'_def) + apply (simp add: tcb_in_cur_domain'_def) + apply (frule_tac t'1=word + and P1="\tcb. ksCurDomain b = tcbDomain tcb" + in use_valid [OF _ threadSet_obj_at'_really_strongest]) + apply (clarsimp simp: o_def tcb_in_cur_domain'_def) + apply clarsimp + done + +lemma threadSet_pred_tcb_at_state: + "\\s. tcb_at' t s \ (if p = t + then obj_at' (\tcb. P (proj (tcb_to_itcb' (f tcb)))) t s + else pred_tcb_at' proj P p s)\ + threadSet f t \\_. pred_tcb_at' proj P p\" + apply (rule hoare_chain) + apply (rule threadSet_obj_at'_really_strongest) + prefer 2 + apply (simp add: pred_tcb_at'_def) + apply (clarsimp split: if_splits simp: pred_tcb_at'_def o_def) + done + +lemma threadSet_tcbDomain_triv': + "\tcb_in_cur_domain' t' and K (t \ t')\ threadSet f t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_assume_pre) + apply simp + apply (rule_tac f="ksCurDomain" in hoare_lift_Pf) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | simp)+ + done + +lemma threadSet_sch_act_wf: + "\\s. sch_act_wf (ksSchedulerAction s) s \ sch_act_not t s \ + (ksCurThread s = t \ \(\tcb. activatable' (tcbState (F tcb))) \ + ksSchedulerAction s \ ResumeCurrentThread) \ + threadSet F t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (rule hoare_lift_Pf2 [where f=ksSchedulerAction]) + prefer 2 + apply wp + apply (case_tac x, simp_all) + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf2 [where f=ksCurThread]) + prefer 2 + apply wp[1] + apply (wp threadSet_pred_tcb_at_state) + apply clarsimp + apply wp + apply (clarsimp) + apply (wp threadSet_pred_tcb_at_state threadSet_tcbDomain_triv' | clarsimp)+ + done + +lemma rescheduleRequired_sch_act'[wp]: + "\\\ + rescheduleRequired + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + done + +lemma setObject_queued_pred_tcb_at'[wp]: + "\pred_tcb_at' proj P t' and obj_at' ((=) tcb) t\ + setObject t (tcbQueued_update f tcb) + \\_. pred_tcb_at' proj P t'\" + apply (simp add: pred_tcb_at'_def) + apply (rule hoare_pre) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def tcb_to_itcb'_def) + done + +lemma setObject_queued_ct_activatable'[wp]: + "\ct_in_state' activatable' and obj_at' ((=) tcb) t\ + setObject t (tcbQueued_update f tcb) + \\_. ct_in_state' activatable'\" + apply (clarsimp simp: ct_in_state'_def pred_tcb_at'_def) + apply (rule hoare_pre) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_queued_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + threadSet (tcbQueued_update f) t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + including no_pre + apply (simp add: sch_act_wf_cases + split: scheduler_action.split) + apply (wp hoare_vcg_conj_lift) + apply (simp add: threadSet_def) + apply (wp hoare_weak_lift_imp) + apply (wps setObject_sa_unchanged) + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ + apply (clarsimp simp: obj_at'_def) + apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ + apply (simp add: threadSet_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + apply (wp tcb_in_cur_domain'_lift | simp add: obj_at'_def)+ + done + +lemma tcbSchedEnqueue_pred_tcb_at'[wp]: + "\\s. pred_tcb_at' proj P' t' s \ tcbSchedEnqueue t \\_ s. pred_tcb_at' proj P' t' s\" + apply (simp add: tcbSchedEnqueue_def when_def unless_def) + apply (wp threadSet_pred_tcb_no_state crunch_wps | clarsimp simp: tcb_to_itcb'_def)+ + done + +lemma tcbSchedDequeue_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + tcbSchedDequeue t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + unfolding tcbSchedDequeue_def + by (wp setQueue_sch_act | wp sch_act_wf_lift | simp add: if_apply_def2)+ + +crunch nosch: tcbSchedDequeue "\s. P (ksSchedulerAction s)" + +lemma sts_sch_act': + "\\s. (\ runnable' st \ sch_act_not t s) \ sch_act_wf (ksSchedulerAction s) s\ + setThreadState st t \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setThreadState_def) + apply (wp | simp)+ + prefer 2 + apply assumption + apply (case_tac "runnable' st") + apply ((wp threadSet_runnable_sch_act hoare_drop_imps | simp)+)[1] + apply (rule_tac Q="\rv s. st_tcb_at' (Not \ runnable') t s \ + (ksCurThread s \ t \ ksSchedulerAction s \ ResumeCurrentThread \ + sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (simp only: imp_conv_disj) + apply (wp threadSet_pred_tcb_at_state threadSet_sch_act_wf + hoare_vcg_disj_lift|simp)+ + done + +lemma sts_sch_act[wp]: + "\\s. (\ runnable' st \ sch_act_simple s) \ sch_act_wf (ksSchedulerAction s) s\ + setThreadState st t + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setThreadState_def) + apply wp + apply simp + prefer 2 + apply assumption + apply (case_tac "runnable' st") + apply (rule_tac Q="\s. sch_act_wf (ksSchedulerAction s) s" + in hoare_pre_imp, simp) + apply ((wp hoare_drop_imps threadSet_runnable_sch_act | simp)+)[1] + apply (rule_tac Q="\rv s. st_tcb_at' (Not \ runnable') t s \ + (ksCurThread s \ t \ ksSchedulerAction s \ ResumeCurrentThread \ + sch_act_wf (ksSchedulerAction s) s)" + in hoare_post_imp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + apply (simp only: imp_conv_disj) + apply (rule hoare_pre) + apply (wp threadSet_pred_tcb_at_state threadSet_sch_act_wf + hoare_vcg_disj_lift|simp)+ + apply (auto simp: sch_act_simple_def) + done + +lemma sbn_sch_act': + "\\s. sch_act_wf (ksSchedulerAction s) s\ + setBoundNotification ntfn t \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_sch_act | simp)+ + done + +lemma ssa_sch_act_simple[wp]: + "sa = ResumeCurrentThread \ sa = ChooseNewThread \ + \\\ setSchedulerAction sa \\rv. sch_act_simple\" + unfolding setSchedulerAction_def sch_act_simple_def + by (wp | simp)+ + +lemma sch_act_simple_lift: + "(\P. \\s. P (ksSchedulerAction s)\ f \\rv s. P (ksSchedulerAction s)\) \ + \sch_act_simple\ f \\rv. sch_act_simple\" + by (simp add: sch_act_simple_def) assumption + +lemma rescheduleRequired_sch_act_simple[wp]: + "\sch_act_simple\ rescheduleRequired \\rv. sch_act_simple\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + done + +crunch no_sa[wp]: tcbSchedDequeue "\s. P (ksSchedulerAction s)" + +lemma sts_sch_act_simple[wp]: + "\sch_act_simple\ setThreadState st t \\rv. sch_act_simple\" + apply (simp add: setThreadState_def) + apply (wp hoare_drop_imps | rule sch_act_simple_lift | simp)+ + done + +lemma setQueue_after: + "(setQueue d p q >>= (\rv. threadSet f t)) = + (threadSet f t >>= (\rv. setQueue d p q))" + apply (simp add: setQueue_def) + apply (rule oblivious_modify_swap) + apply (simp add: threadSet_def getObject_def setObject_def + loadObject_default_def + split_def projectKO_def2 alignCheck_assert + magnitudeCheck_assert updateObject_default_def) + apply (intro oblivious_bind, simp_all) + done + +lemma tcbSchedEnqueue_sch_act[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ + tcbSchedEnqueue t + \\_ s. sch_act_wf (ksSchedulerAction s) s\" + by (simp add: tcbSchedEnqueue_def unless_def) + (wp setQueue_sch_act | wp sch_act_wf_lift | clarsimp)+ + +lemma tcbSchedEnqueue_weak_sch_act[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + tcbSchedEnqueue t + \\_ s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp setQueue_sch_act threadSet_weak_sch_act_wf | clarsimp)+ + done + +lemma threadGet_wp: "\\s. tcb_at' t s \ (\tcb. ko_at' tcb t s \ P (f tcb) s)\ threadGet f t \P\" + apply (simp add: threadGet_def) + apply (wp getObject_tcb_wp) + apply clarsimp + done + +lemma threadGet_const: + "\\s. tcb_at' t s \ obj_at' (P \ f) t s\ threadGet f t \\rv s. P (rv)\" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +schematic_goal l2BitmapSize_def': (* arch specific consequence *) + "l2BitmapSize = numeral ?X" + by (simp add: l2BitmapSize_def wordBits_def word_size numPriorities_def) + +lemma prioToL1Index_size [simp]: + "prioToL1Index w < l2BitmapSize" + unfolding prioToL1Index_def wordRadix_def l2BitmapSize_def' + by (fastforce simp: shiftr_div_2n' nat_divide_less_eq + intro: order_less_le_trans[OF unat_lt2p]) + +lemma prioToL1Index_max: + "prioToL1Index p < 2 ^ wordRadix" + unfolding prioToL1Index_def wordRadix_def + by (insert unat_lt2p[where x=p], simp add: shiftr_div_2n') + +lemma prioToL1Index_bit_set: + "((2 :: machine_word) ^ prioToL1Index p) !! prioToL1Index p" + using l2BitmapSize_def' + by (fastforce simp: nth_w2p_same intro: order_less_le_trans[OF prioToL1Index_size]) + +lemma prioL2Index_bit_set: + fixes p :: priority + shows "((2::machine_word) ^ unat (ucast p && (mask wordRadix :: machine_word))) !! unat (p && mask wordRadix)" + apply (simp add: nth_w2p wordRadix_def ucast_and_mask[symmetric] unat_ucast_upcast is_up) + apply (rule unat_less_helper) + apply (insert and_mask_less'[where w=p and n=wordRadix], simp add: wordRadix_def) + done + +lemma addToBitmap_bitmapQ: + "\ \s. True \ addToBitmap d p \\_. bitmapQ d p \" + unfolding addToBitmap_def + modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def + by (wpsimp simp: bitmap_fun_defs bitmapQ_def prioToL1Index_bit_set prioL2Index_bit_set + simp_del: bit_exp_iff) + +lemma addToBitmap_valid_queues_no_bitmap_except: +" \ valid_queues_no_bitmap_except t \ + addToBitmap d p + \\_. valid_queues_no_bitmap_except t \" + unfolding addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def valid_queues_no_bitmap_except_def + by (wp, clarsimp) + +crunch norq[wp]: addToBitmap "\s. P (ksReadyQueues s)" + (wp: updateObject_cte_inv hoare_drop_imps) +crunch norq[wp]: removeFromBitmap "\s. P (ksReadyQueues s)" + (wp: updateObject_cte_inv hoare_drop_imps) + +lemma prioToL1Index_lt: + "2 ^ wordRadix \ x \ prioToL1Index p < x" + unfolding prioToL1Index_def wordRadix_def + by (insert unat_lt2p[where x=p], simp add: shiftr_div_2n') + +lemma prioToL1Index_bits_low_high_eq: + "\ pa \ p; prioToL1Index pa = prioToL1Index (p::priority) \ + \ unat (pa && mask wordRadix) \ unat (p && mask wordRadix)" + unfolding prioToL1Index_def + by (fastforce simp: nth_w2p wordRadix_def is_up bits_low_high_eq) + +lemma prioToL1Index_bit_not_set: + "\ (~~ ((2 :: machine_word) ^ prioToL1Index p)) !! prioToL1Index p" + apply (subst word_ops_nth_size, simp_all add: prioToL1Index_bit_set del: bit_exp_iff) + apply (fastforce simp: prioToL1Index_def wordRadix_def word_size + intro: order_less_le_trans[OF word_shiftr_lt]) + done + +lemma prioToL1Index_complement_nth_w2p: + fixes p pa :: priority + shows "(~~ ((2 :: machine_word) ^ prioToL1Index p)) !! prioToL1Index p' + = (prioToL1Index p \ prioToL1Index p')" + by (fastforce simp: complement_nth_w2p prioToL1Index_lt wordRadix_def word_size)+ + +lemma valid_bitmapQ_exceptE: + "\ valid_bitmapQ_except d' p' s ; d \ d' \ p \ p' \ + \ bitmapQ d p s = (ksReadyQueues s (d, p) \ [])" + unfolding valid_bitmapQ_except_def + by blast + +lemma invertL1Index_eq_cancelD: + "\ invertL1Index i = invertL1Index j ; i < l2BitmapSize ; j < l2BitmapSize \ + \ i = j" + by (simp add: invertL1Index_def l2BitmapSize_def') + +lemma invertL1Index_eq_cancel: + "\ i < l2BitmapSize ; j < l2BitmapSize \ + \ (invertL1Index i = invertL1Index j) = (i = j)" + by (rule iffI, simp_all add: invertL1Index_eq_cancelD) + +lemma removeFromBitmap_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ removeFromBitmap d p \\_. bitmapQ_no_L1_orphans \" + unfolding bitmap_fun_defs + apply (wp | simp add: bitmap_fun_defs bitmapQ_no_L1_orphans_def)+ + apply (fastforce simp: invertL1Index_eq_cancel prioToL1Index_bit_not_set + prioToL1Index_complement_nth_w2p) + done + +lemma removeFromBitmap_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ + removeFromBitmap d p + \\_. bitmapQ_no_L2_orphans \" + unfolding bitmap_fun_defs + apply (wp, clarsimp simp: bitmap_fun_defs bitmapQ_no_L2_orphans_def)+ + apply (rule conjI, clarsimp) + apply (clarsimp simp: complement_nth_w2p l2BitmapSize_def') + apply clarsimp + apply metis + done + +lemma removeFromBitmap_valid_bitmapQ_except: + "\ valid_bitmapQ_except d p \ + removeFromBitmap d p + \\_. valid_bitmapQ_except d p \" +proof - + have unat_ucast_mask[simp]: + "\x. unat ((ucast (p::priority) :: machine_word) && mask x) = unat (p && mask x)" + by (simp add: ucast_and_mask[symmetric] unat_ucast_upcast is_up) + + note bit_exp_iff[simp del] bit_not_iff[simp del] bit_not_exp_iff[simp del] + show ?thesis + unfolding removeFromBitmap_def + apply (simp add: let_into_return[symmetric]) + unfolding bitmap_fun_defs when_def + apply wp + apply clarsimp + apply (rule conjI) + (* after clearing bit in L2, all bits in L2 field are clear *) + apply clarsimp + apply (subst valid_bitmapQ_except_def, clarsimp)+ + apply (clarsimp simp: bitmapQ_def) + apply (rule conjI; clarsimp) + apply (rename_tac p') + apply (rule conjI; clarsimp simp: invertL1Index_eq_cancel) + apply (drule_tac p=p' in valid_bitmapQ_exceptE[where d=d], clarsimp) + apply (clarsimp simp: bitmapQ_def) + apply (drule_tac n'="unat (p' && mask wordRadix)" in no_other_bits_set) + apply (erule (1) prioToL1Index_bits_low_high_eq) + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[2] + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[3] + apply (drule_tac p=p' and d=d in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def prioToL1Index_complement_nth_w2p) + apply (drule_tac p=pa and d=da in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def prioToL1Index_complement_nth_w2p) + (* after clearing bit in L2, some bits in L2 field are still set *) + apply clarsimp + apply (subst valid_bitmapQ_except_def, clarsimp)+ + apply (clarsimp simp: bitmapQ_def invertL1Index_eq_cancel) + apply (rule conjI; clarsimp) + apply (frule (1) prioToL1Index_bits_low_high_eq) + apply (drule_tac d=d and p=pa in valid_bitmapQ_exceptE, simp) + apply (clarsimp simp: bitmapQ_def) + apply (subst complement_nth_w2p) + apply (rule order_less_le_trans[OF word_unat_mask_lt]) + apply ((simp add: wordRadix_def' word_size)+)[3] + apply (clarsimp simp: valid_bitmapQ_except_def bitmapQ_def) + done +qed + +lemma addToBitmap_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ addToBitmap d p \\_. bitmapQ_no_L1_orphans \" + unfolding bitmap_fun_defs bitmapQ_defs + using word_unat_mask_lt[where w=p and m=wordRadix] + apply wp + apply (clarsimp simp: word_or_zero prioToL1Index_bit_set ucast_and_mask[symmetric] + unat_ucast_upcast is_up wordRadix_def' word_size nth_w2p + wordBits_def numPriorities_def) + done + +lemma addToBitmap_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ addToBitmap d p \\_. bitmapQ_no_L2_orphans \" + unfolding bitmap_fun_defs bitmapQ_defs + supply bit_exp_iff[simp del] + apply wp + apply clarsimp + apply (fastforce simp: invertL1Index_eq_cancel prioToL1Index_bit_set) + done + +lemma addToBitmap_valid_bitmapQ_except: + "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans \ + addToBitmap d p + \\_. valid_bitmapQ_except d p \" + unfolding bitmap_fun_defs bitmapQ_defs + apply wp + apply (clarsimp simp: bitmapQ_def invertL1Index_eq_cancel + ucast_and_mask[symmetric] unat_ucast_upcast is_up nth_w2p) + apply (fastforce simp: priority_mask_wordRadix_size[simplified wordBits_def'] + dest: prioToL1Index_bits_low_high_eq) + done + +lemma addToBitmap_valid_bitmapQ: +" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and + (\s. ksReadyQueues s (d,p) \ []) \ + addToBitmap d p + \\_. valid_bitmapQ \" +proof - + have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and + (\s. ksReadyQueues s (d,p) \ []) \ + addToBitmap d p + \\_. valid_bitmapQ_except d p and + bitmapQ_no_L2_orphans and (\s. bitmapQ d p s \ ksReadyQueues s (d,p) \ []) \" + by (wp addToBitmap_valid_queues_no_bitmap_except addToBitmap_valid_bitmapQ_except + addToBitmap_bitmapQ_no_L2_orphans addToBitmap_bitmapQ; simp) + + thus ?thesis + by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) +qed + +lemma threadGet_const_tcb_at: + "\\s. tcb_at' t s \ obj_at' (P s \ f) t s\ threadGet f t \\rv s. P s rv \" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadGet_const_tcb_at_imp_lift: + "\\s. tcb_at' t s \ obj_at' (P s \ f) t s \ obj_at' (Q s \ f) t s \ + threadGet f t + \\rv s. P s rv \ Q s rv \" + apply (simp add: threadGet_def liftM_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma valid_queues_no_bitmap_objD: + "\ valid_queues_no_bitmap s; t \ set (ksReadyQueues s (d, p))\ + \ obj_at' (inQ d p and runnable' \ tcbState) t s" + unfolding valid_queues_no_bitmap_def + by metis + +lemma setQueue_bitmapQ_no_L1_orphans[wp]: + "\ bitmapQ_no_L1_orphans \ + setQueue d p ts + \\rv. bitmapQ_no_L1_orphans \" + unfolding setQueue_def bitmapQ_no_L1_orphans_def null_def + by (wp, auto) + +lemma setQueue_bitmapQ_no_L2_orphans[wp]: + "\ bitmapQ_no_L2_orphans \ + setQueue d p ts + \\rv. bitmapQ_no_L2_orphans \" + unfolding setQueue_def bitmapQ_no_L2_orphans_def null_def + by (wp, auto) + +lemma setQueue_sets_queue[wp]: + "\d p ts P. \ \s. P ts \ setQueue d p ts \\rv s. P (ksReadyQueues s (d, p)) \" + unfolding setQueue_def + by (wp, simp) + +lemma tcbSchedEnqueueOrAppend_valid_queues: + (* f is either (t#ts) or (ts @ [t]), so we define its properties generally *) + assumes f_set[simp]: "\ts. t \ set (f ts)" + assumes f_set_insert[simp]: "\ts. set (f ts) = insert t (set ts)" + assumes f_not_empty[simp]: "\ts. f ts \ []" + assumes f_distinct: "\ts. \ distinct ts ; t \ set ts \ \ distinct (f ts)" + shows "\Invariants_H.valid_queues and st_tcb_at' runnable' t and valid_objs' \ + do queued \ threadGet tcbQueued t; + unless queued $ + do tdom \ threadGet tcbDomain t; + prio \ threadGet tcbPriority t; + queue \ getQueue tdom prio; + setQueue tdom prio $ f queue; + when (null queue) $ addToBitmap tdom prio; + threadSet (tcbQueued_update (\_. True)) t + od + od + \\_. Invariants_H.valid_queues\" +proof - + + define could_run where "could_run == + \d p t. obj_at' (\tcb. inQ d p (tcbQueued_update (\_. True) tcb) \ runnable' (tcbState tcb)) t" + + have addToBitmap_could_run: + "\d p. \\s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\ + addToBitmap d p + \\_ s. \d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s\" + unfolding bitmap_fun_defs + by (wp, clarsimp simp: could_run_def) + + have setQueue_valid_queues_no_bitmap_except: + "\d p ts. + \ valid_queues_no_bitmap_except t and + (\s. ksReadyQueues s (d, p) = ts \ p \ maxPriority \ d \ maxDomain \ t \ set ts) \ + setQueue d p (f ts) + \\rv. valid_queues_no_bitmap_except t\" + unfolding setQueue_def valid_queues_no_bitmap_except_def null_def + by (wp, auto intro: f_distinct) + + have threadSet_valid_queues_could_run: + "\f. \ valid_queues_no_bitmap_except t and + (\s. \d p. t \ set (ksReadyQueues s (d,p)) \ could_run d p t s) and + valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans \ + threadSet (tcbQueued_update (\_. True)) t + \\rv. Invariants_H.valid_queues \" + unfolding threadSet_def could_run_def + apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) + apply (rule hoare_pre) + apply (simp add: valid_queues_def valid_queues_no_bitmap_def) + apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift + setObject_tcb_strongest) + apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def) + done + + have setQueue_could_run: "\d p ts. + \ valid_queues and (\_. t \ set ts) and + (\s. could_run d p t s) \ + setQueue d p ts + \\rv s. (\d p. t \ set (ksReadyQueues s (d, p)) \ could_run d p t s)\" + unfolding setQueue_def valid_queues_def could_run_def + by wp (fastforce dest: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def) + + note hoare_vcg_if_lift[wp] hoare_vcg_conj_lift[wp] hoare_vcg_const_imp_lift[wp] + + show ?thesis + unfolding tcbSchedEnqueue_def null_def + apply (rule hoare_pre) + apply (rule hoare_seq_ext) + apply (simp add: unless_def) + apply (wp threadSet_valid_queues_could_run) + apply (wp addToBitmap_could_run addToBitmap_valid_bitmapQ + addToBitmap_valid_queues_no_bitmap_except addToBitmap_bitmapQ_no_L2_orphans)+ + apply (wp setQueue_valid_queues_no_bitmap_except setQueue_could_run + setQueue_valid_bitmapQ_except setQueue_sets_queue setQueue_valid_bitmapQ)+ + apply (wp threadGet_const_tcb_at_imp_lift | simp add: if_apply_def2)+ + apply clarsimp + apply (frule pred_tcb_at') + apply (frule (1) valid_objs'_maxDomain) + apply (frule (1) valid_objs'_maxPriority) + apply (clarsimp simp: valid_queues_def st_tcb_at'_def obj_at'_def valid_queues_no_bitmap_exceptI) + apply (fastforce dest!: valid_queues_no_bitmap_objD simp: obj_at'_def inQ_def could_run_def) + done +qed + +lemma tcbSchedEnqueue_valid_queues[wp]: + "\Invariants_H.valid_queues + and st_tcb_at' runnable' t + and valid_objs' \ + tcbSchedEnqueue t + \\_. Invariants_H.valid_queues\" + unfolding tcbSchedEnqueue_def + by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) + +lemma tcbSchedAppend_valid_queues[wp]: + "\Invariants_H.valid_queues + and st_tcb_at' runnable' t + and valid_objs' \ + tcbSchedAppend t + \\_. Invariants_H.valid_queues\" + unfolding tcbSchedAppend_def + by (fastforce intro: tcbSchedEnqueueOrAppend_valid_queues) + +lemma rescheduleRequired_valid_queues[wp]: + "\\s. Invariants_H.valid_queues s \ valid_objs' s \ + weak_sch_act_wf (ksSchedulerAction s) s\ + rescheduleRequired + \\_. Invariants_H.valid_queues\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + apply (fastforce simp: weak_sch_act_wf_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + done + +lemma rescheduleRequired_valid_queues_sch_act_simple: + "\Invariants_H.valid_queues and sch_act_simple\ + rescheduleRequired + \\_. Invariants_H.valid_queues\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp | fastforce simp: Invariants_H.valid_queues_def sch_act_simple_def)+ + done + +lemma rescheduleRequired_valid_bitmapQ_sch_act_simple: + "\ valid_bitmapQ and sch_act_simple\ + rescheduleRequired + \\_. valid_bitmapQ \" + including no_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac B="\rv s. valid_bitmapQ s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply wpsimp + apply (case_tac x; simp) + apply (wp, fastforce) + done + +lemma rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple: + "\ bitmapQ_no_L1_orphans and sch_act_simple\ + rescheduleRequired + \\_. bitmapQ_no_L1_orphans \" + including no_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac B="\rv s. bitmapQ_no_L1_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply wpsimp + apply (case_tac x; simp) + apply (wp, fastforce) + done + +lemma rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple: + "\ bitmapQ_no_L2_orphans and sch_act_simple\ + rescheduleRequired + \\_. bitmapQ_no_L2_orphans \" + including no_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac B="\rv s. bitmapQ_no_L2_orphans s \ + (rv = ResumeCurrentThread \ rv = ChooseNewThread)" in hoare_seq_ext) + apply wpsimp + apply (case_tac x; simp) + apply (wp, fastforce) + done + +lemma sts_valid_bitmapQ_sch_act_simple: + "\valid_bitmapQ and sch_act_simple\ + setThreadState st t + \\_. valid_bitmapQ \" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_valid_bitmapQ_sch_act_simple + threadSet_valid_bitmapQ [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma sts_valid_bitmapQ_no_L2_orphans_sch_act_simple: + "\ bitmapQ_no_L2_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L2_orphans \" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_bitmapQ_no_L2_orphans_sch_act_simple + threadSet_valid_bitmapQ_no_L2_orphans [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma sts_valid_bitmapQ_no_L1_orphans_sch_act_simple: + "\ bitmapQ_no_L1_orphans and sch_act_simple\ + setThreadState st t + \\_. bitmapQ_no_L1_orphans \" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_bitmapQ_no_L1_orphans_sch_act_simple + threadSet_valid_bitmapQ_no_L1_orphans [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma sts_valid_queues: + "\\s. Invariants_H.valid_queues s \ + ((\p. t \ set(ksReadyQueues s p)) \ runnable' st)\ + setThreadState st t \\rv. Invariants_H.valid_queues\" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_valid_queues_sch_act_simple + threadSet_valid_queues [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + +lemma sbn_valid_queues: + "\\s. Invariants_H.valid_queues s\ + setBoundNotification ntfn t \\rv. Invariants_H.valid_queues\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_valid_queues [THEN hoare_strengthen_post]) + apply (clarsimp simp: sch_act_simple_def Invariants_H.valid_queues_def inQ_def)+ + done + + + +lemma addToBitmap_valid_queues'[wp]: + "\ valid_queues' \ addToBitmap d p \\_. valid_queues' \" + unfolding valid_queues'_def addToBitmap_def + modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def + by (wp, simp) + +lemma tcbSchedEnqueue_valid_queues'[wp]: + "\valid_queues' and st_tcb_at' runnable' t \ + tcbSchedEnqueue t + \\_. valid_queues'\" + apply (simp add: tcbSchedEnqueue_def) + apply (rule hoare_pre) + apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + apply (rename_tac queued) + apply (case_tac queued; simp_all add: unless_def when_def) + apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ + apply (subst conj_commute, wp) + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) + apply wp + apply fastforce + apply wp + apply (subst conj_commute) + apply clarsimp + apply (rule_tac Q="\rv. valid_queues' + and obj_at' (\obj. \ tcbQueued obj) t + and obj_at' (\obj. tcbPriority obj = prio) t + and obj_at' (\obj. tcbDomain obj = tdom) t + and (\s. t \ set (ksReadyQueues s (tdom, prio)))" + in hoare_post_imp) + apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def) + apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def inQ_def valid_queues'_def) + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma rescheduleRequired_valid_queues'_weak[wp]: + "\\s. valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s\ + rescheduleRequired + \\_. valid_queues'\" + apply (simp add: rescheduleRequired_def) + apply wpsimp + apply (clarsimp simp: weak_sch_act_wf_def) + done + +lemma rescheduleRequired_valid_queues'_sch_act_simple: + "\valid_queues' and sch_act_simple\ + rescheduleRequired + \\_. valid_queues'\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp | fastforce simp: valid_queues'_def sch_act_simple_def)+ + done + +lemma setThreadState_valid_queues'[wp]: + "\\s. valid_queues' s\ setThreadState st t \\rv. valid_queues'\" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_valid_queues'_sch_act_simple) + apply (rule_tac Q="\_. valid_queues'" in hoare_post_imp) + apply (clarsimp simp: sch_act_simple_def) + apply (wp threadSet_valid_queues') + apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) + done + +lemma setBoundNotification_valid_queues'[wp]: + "\\s. valid_queues' s\ setBoundNotification ntfn t \\rv. valid_queues'\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_valid_queues') + apply (fastforce simp: inQ_def obj_at'_def pred_tcb_at'_def) + done + +lemma valid_tcb'_tcbState_update: + "\ valid_tcb_state' st s; valid_tcb' tcb s \ \ valid_tcb' (tcbState_update (\_. st) tcb) s" + apply (clarsimp simp: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def valid_tcb_state'_def) + done + +lemma setThreadState_valid_objs'[wp]: + "\ valid_tcb_state' st and valid_objs' \ setThreadState st t \ \_. valid_objs' \" + apply (simp add: setThreadState_def) + apply (wp threadSet_valid_objs' | clarsimp simp: valid_tcb'_tcbState_update)+ + done + +lemma rescheduleRequired_ksQ: + "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ + rescheduleRequired + \\_ s. P (ksReadyQueues s p)\" + including no_pre + apply (simp add: rescheduleRequired_def sch_act_simple_def) + apply (rule_tac B="\rv s. (rv = ResumeCurrentThread \ rv = ChooseNewThread) + \ P (ksReadyQueues s p)" in hoare_seq_ext) + apply wpsimp + apply (case_tac x; simp) + apply wp + done + +lemma setSchedulerAction_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ setSchedulerAction act \\_ s. P (ksReadyQueues s)\" + by (wp, simp) + +lemma threadSet_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ threadSet f t \\rv s. P (ksReadyQueues s)\" + by (simp add: threadSet_def | wp updateObject_default_inv)+ + +lemma sbn_ksQ: + "\\s. P (ksReadyQueues s p)\ setBoundNotification ntfn t \\rv s. P (ksReadyQueues s p)\" + by (simp add: setBoundNotification_def, wp) + +lemma sts_ksQ: + "\\s. sch_act_simple s \ P (ksReadyQueues s p)\ + setThreadState st t + \\_ s. P (ksReadyQueues s p)\" + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_ksQ) + apply (rule_tac Q="\_ s. P (ksReadyQueues s p)" in hoare_post_imp) + apply (clarsimp simp: sch_act_simple_def)+ + apply (wp, simp) + done + +lemma setQueue_ksQ[wp]: + "\\s. P ((ksReadyQueues s)((d, p) := q))\ + setQueue d p q + \\rv s. P (ksReadyQueues s)\" + by (simp add: setQueue_def fun_upd_def[symmetric] + | wp)+ + +lemma tcbSchedEnqueue_ksQ: + "\\s. t' \ set (ksReadyQueues s p) \ t' \ t \ + tcbSchedEnqueue t \\_ s. t' \ set (ksReadyQueues s p)\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wpsimp wp: hoare_vcg_imp_lift threadGet_wp) + apply (drule obj_at_ko_at') + apply fastforce + done + +lemma rescheduleRequired_ksQ': + "\\s. t \ set (ksReadyQueues s p) \ sch_act_not t s \ + rescheduleRequired \\_ s. t \ set (ksReadyQueues s p)\" + apply (simp add: rescheduleRequired_def) + apply (wpsimp wp: tcbSchedEnqueue_ksQ) + done + +lemma threadSet_tcbState_st_tcb_at': + "\\s. P st \ threadSet (tcbState_update (\_. st)) t \\_. st_tcb_at' P t\" + apply (simp add: threadSet_def pred_tcb_at'_def) + apply (wpsimp wp: setObject_tcb_strongest) + done + +lemma isRunnable_const: + "\st_tcb_at' runnable' t\ isRunnable t \\runnable _. runnable \" + by (rule isRunnable_wp) + +lemma sts_ksQ': + "\\s. (runnable' st \ ksCurThread s \ t) \ P (ksReadyQueues s p)\ + setThreadState st t + \\_ s. P (ksReadyQueues s p)\" + apply (simp add: setThreadState_def) + apply (rule hoare_pre_disj') + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] + threadSet_ksQ]]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) + apply (clarsimp simp: when_def) + apply (case_tac x) + apply (clarsimp, wp)[1] + apply (clarsimp) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_ct threadSet_ksQ]]) + apply (rule hoare_seq_ext [OF _ isRunnable_inv]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF gct_wp gct_wp]]) + apply (rename_tac ct) + apply (case_tac "ct\t") + apply (clarsimp simp: when_def) + apply (wp)[1] + apply (clarsimp) + done + +lemma valid_ipc_buffer_ptr'D: + assumes yv: "y < unat max_ipc_words" + and buf: "valid_ipc_buffer_ptr' a s" + shows "pointerInUserData (a + of_nat y * 8) s" + using buf unfolding valid_ipc_buffer_ptr'_def pointerInUserData_def + apply clarsimp + apply (subgoal_tac + "(a + of_nat y * 8) && ~~ mask pageBits = a && ~~ mask pageBits") + apply simp + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (erule is_aligned_add_helper [THEN conjunct2]) + apply (rule word_less_power_trans_ofnat [where k = 3, simplified]) + apply (rule order_less_le_trans [OF yv]) + apply (simp add: msg_align_bits max_ipc_words) + apply (simp add: msg_align_bits) + apply (simp_all add: msg_align_bits pageBits_def) + done + +lemma in_user_frame_eq: + assumes y: "y < unat max_ipc_words" + and al: "is_aligned a msg_align_bits" + shows "in_user_frame (a + of_nat y * 8) s = in_user_frame a s" +proof - + have "\sz. (a + of_nat y * 8) && ~~ mask (pageBitsForSize sz) = + a && ~~ mask (pageBitsForSize sz)" + apply (rule mask_out_first_mask_some [where n = msg_align_bits]) + apply (rule is_aligned_add_helper [OF al, THEN conjunct2]) + apply (rule word_less_power_trans_ofnat [where k = 3, simplified]) + apply (rule order_less_le_trans [OF y]) + apply (simp add: msg_align_bits max_ipc_words) + apply (simp add: msg_align_bits) + apply (simp add: msg_align_bits pageBits_def) + apply (case_tac sz, simp_all add: msg_align_bits bit_simps) + done + + thus ?thesis by (simp add: in_user_frame_def) +qed + +lemma loadWordUser_corres: + assumes y: "y < unat max_ipc_words" + shows "corres (=) \ (valid_ipc_buffer_ptr' a) (load_word_offs a y) (loadWordUser (a + of_nat y * 8))" + unfolding loadWordUser_def + apply (rule corres_stateAssert_assume [rotated]) + apply (erule valid_ipc_buffer_ptr'D[OF y]) + apply (rule corres_guard_imp) + apply (simp add: load_word_offs_def word_size_def) + apply (rule_tac F = "is_aligned a msg_align_bits" in corres_gen_asm2) + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_pre) + apply wp + apply (erule aligned_add_aligned) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: word_bits_conv msg_align_bits)+ + apply (simp add: valid_ipc_buffer_ptr'_def msg_align_bits) + done + +lemma storeWordUser_corres: + assumes y: "y < unat max_ipc_words" + shows "corres dc (in_user_frame a) (valid_ipc_buffer_ptr' a) + (store_word_offs a y w) (storeWordUser (a + of_nat y * 8) w)" + apply (simp add: storeWordUser_def bind_assoc[symmetric] + store_word_offs_def word_size_def) + apply (rule corres_guard2_imp) + apply (rule_tac F = "is_aligned a msg_align_bits" in corres_gen_asm2) + apply (rule corres_guard1_imp) + apply (rule_tac r'=dc in corres_split) + apply (simp add: stateAssert_def) + apply (rule_tac r'=dc in corres_split) + apply (rule corres_trivial) + apply simp + apply (rule corres_assert) + apply wp+ + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl]) + apply simp + apply (rule no_fail_pre) + apply (wp no_fail_storeWord) + apply (erule_tac n=msg_align_bits in aligned_add_aligned) + apply (rule is_aligned_mult_triv2 [where n = 3, simplified]) + apply (simp add: word_bits_conv msg_align_bits)+ + apply wp+ + apply (simp add: in_user_frame_eq[OF y]) + apply simp + apply (rule conjI) + apply (frule (1) valid_ipc_buffer_ptr'D [OF y]) + apply (simp add: valid_ipc_buffer_ptr'_def) + done + +lemma load_word_corres: + "corres (=) \ + (typ_at' UserDataT (a && ~~ mask pageBits) and (\s. is_aligned a word_size_bits)) + (do_machine_op (loadWord a)) (loadWordUser a)" + unfolding loadWordUser_def + apply (rule corres_gen_asm2) + apply (rule corres_stateAssert_assume [rotated]) + apply (simp add: pointerInUserData_def) + apply (rule corres_guard_imp) + apply simp + apply (rule corres_machine_op) + apply (rule corres_Id [OF refl refl]) + apply (rule no_fail_pre) + apply (wpsimp simp: word_size_bits_def)+ + done + +lemmas msgRegisters_unfold + = AARCH64_H.msgRegisters_def + msg_registers_def + AARCH64.msgRegisters_def + [unfolded upto_enum_def, simplified, + unfolded fromEnum_def enum_register, simplified, + unfolded toEnum_def enum_register, simplified] + +lemma thread_get_registers: + "thread_get (arch_tcb_get_registers \ tcb_arch) t = as_user t (gets user_regs)" + apply (simp add: thread_get_def as_user_def arch_tcb_get_registers_def + arch_tcb_context_get_def arch_tcb_context_set_def) + apply (rule bind_cong [OF refl]) + apply (clarsimp simp: gets_the_member) + apply (simp add: get_def the_run_state_def set_object_def get_object_def + put_def bind_def return_def gets_def) + apply (drule get_tcb_SomeD) + apply (clarsimp simp: map_upd_triv select_f_def image_def return_def) + done + +lemma getMRs_corres: + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct) + (case_option \ valid_ipc_buffer_ptr' buf) + (get_mrs t buf mi) (getMRs t buf (message_info_map mi))" + proof - + have S: "get = gets id" + by (simp add: gets_def) + have T: "corres (\con regs. regs = map con msg_registers) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (thread_get (arch_tcb_get_registers o tcb_arch) t) + (asUser t (mapM getRegister AARCH64_H.msgRegisters))" + apply (subst thread_get_registers) + apply (rule asUser_corres') + apply (subst mapM_gets) + apply (simp add: getRegister_def) + apply (simp add: S AARCH64_H.msgRegisters_def msg_registers_def) + done + show ?thesis + apply (case_tac mi, simp add: get_mrs_def getMRs_def split del: if_split) + apply (case_tac buf) + apply (rule corres_guard_imp) + apply (rule corres_split [where R = "\_. \" and R' = "\_. \", OF T]) + apply simp + apply wp+ + apply simp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF T]) + apply (simp only: option.simps return_bind fun_app_def + load_word_offs_def doMachineOp_mapM loadWord_empty_fail) + apply (rule corres_split_eqr) + apply (simp only: mapM_map_simp msgMaxLength_def msgLengthBits_def + msg_max_length_def o_def upto_enum_word) + apply (rule corres_mapM [where r'="(=)" and S="{a. fst a = snd a \ fst a < unat max_ipc_words}"]) + apply simp + apply simp + apply (simp add: word_size wordSize_def wordBits_def) + apply (rule loadWordUser_corres) + apply simp + apply wp+ + apply simp + apply (unfold msgRegisters_unfold)[1] + apply simp + apply (clarsimp simp: set_zip) + apply (simp add: msgRegisters_unfold max_ipc_words nth_append) + apply (rule corres_trivial, simp) + apply (wp hoare_vcg_all_lift | simp add: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemmas doMachineOp_return = submonad_doMachineOp.return + +lemma doMachineOp_bind: + "\ empty_fail a; \x. empty_fail (b x) \ \ doMachineOp (a >>= b) = (doMachineOp a >>= (\rv. doMachineOp (b rv)))" + by (blast intro: submonad_bind submonad_doMachineOp) + +lemma zipWithM_x_corres: + assumes x: "\x x' y y'. ((x, y), (x', y')) \ S \ corres dc P P' (f x y) (f' x' y')" + assumes y: "\x x' y y'. ((x, y), (x', y')) \ S \ \P\ f x y \\rv. P\" + and z: "\x x' y y'. ((x, y), (x', y')) \ S \ \P'\ f' x' y' \\rv. P'\" + and a: "set (zip (zip xs ys) (zip xs' ys')) \ S" + and b: "length (zip xs ys) = length (zip xs' ys')" + shows "corres dc P P' (zipWithM_x f xs ys) (zipWithM_x f' xs' ys')" + apply (simp add: zipWithM_x_mapM) + apply (rule corres_underlying_split) + apply (rule corres_mapM) + apply (rule dc_simp)+ + apply clarsimp + apply (rule x) + apply assumption + apply (clarsimp simp: y) + apply (clarsimp simp: z) + apply (rule b) + apply (rule a) + apply (rule corres_trivial, simp) + apply (rule hoare_post_taut)+ + done + + +lemma valid_ipc_buffer_ptr'_def2: + "valid_ipc_buffer_ptr' = (\p s. (is_aligned p msg_align_bits \ typ_at' UserDataT (p && ~~ mask pageBits) s))" + apply (rule ext, rule ext) + apply (simp add: valid_ipc_buffer_ptr'_def) + done + +lemma storeWordUser_valid_ipc_buffer_ptr' [wp]: + "\valid_ipc_buffer_ptr' p\ storeWordUser p' w \\_. valid_ipc_buffer_ptr' p\" + unfolding valid_ipc_buffer_ptr'_def2 + by (wp hoare_vcg_all_lift storeWordUser_typ_at') + +lemma thread_set_as_user_registers: + "thread_set (\tcb. tcb \ tcb_arch := arch_tcb_set_registers (f (arch_tcb_get_registers (tcb_arch tcb))) + (tcb_arch tcb) \) t + = as_user t (modify (modify_registers f))" +proof - + have P: "\f. det (modify f)" + by (simp add: modify_def) + thus ?thesis + apply (simp add: as_user_def P thread_set_def) + apply (clarsimp simp: select_f_def simpler_modify_def bind_def image_def modify_registers_def + arch_tcb_set_registers_def arch_tcb_get_registers_def + arch_tcb_context_set_def arch_tcb_context_get_def) + done +qed + +lemma UserContext_fold: + "UserContext (fpu_state s) (foldl (\s (x, y). s(x := y)) (user_regs s) xs) = + foldl (\s (r, v). UserContext (fpu_state s) ((user_regs s)(r := v))) s xs" + apply (induct xs arbitrary: s; simp) + apply (clarsimp split: prod.splits) + apply (metis user_context.sel) + done + +lemma setMRs_corres: + assumes m: "mrs' = mrs" + shows + "corres (=) (tcb_at t and pspace_aligned and pspace_distinct and case_option \ in_user_frame buf) + (case_option \ valid_ipc_buffer_ptr' buf) + (set_mrs t buf mrs) (setMRs t buf mrs')" +proof - + have setRegister_def2: + "setRegister = (\r v. modify (\s. UserContext (fpu_state s) ((user_regs s)(r := v))))" + by ((rule ext)+, simp add: setRegister_def) + + have S: "\xs ys n m. m - n \ length xs \ (zip xs (drop n (take m ys))) = zip xs (drop n ys)" + by (simp add: zip_take_triv2 drop_take) + + note upt.simps[simp del] upt_rec_numeral[simp del] + + show ?thesis using m + unfolding setMRs_def set_mrs_def + apply (clarsimp cong: option.case_cong split del: if_split) + apply (subst bind_assoc[symmetric]) + apply (fold thread_set_def[simplified]) + apply (subst thread_set_as_user_registers) + apply (cases buf) + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify + take_min_len zip_take_triv2 min.commute) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF asUser_corres']) + apply (rule corres_modify') + apply (fastforce simp: fold_fun_upd[symmetric] msgRegisters_unfold UserContext_fold + modify_registers_def + cong: if_cong simp del: the_index.simps) + apply simp + apply (rule corres_trivial, simp) + apply ((wp |simp)+)[4] + \ \buf = Some a\ + using if_split[split del] + apply (clarsimp simp: msgRegisters_unfold setRegister_def2 zipWithM_x_modify + take_min_len zip_take_triv2 min.commute + msgMaxLength_def msgLengthBits_def) + apply (simp add: msg_max_length_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF asUser_corres']) + apply (rule corres_modify') + apply (simp only: msgRegisters_unfold cong: if_cong) + apply (fastforce simp: fold_fun_upd[symmetric] modify_registers_def UserContext_fold) + apply simp + apply (rule corres_split_nor) + apply (rule_tac S="{((x, y), (x', y')). y = y' \ x' = (a + (of_nat x * 8)) \ x < unat max_ipc_words}" + in zipWithM_x_corres) + apply (fastforce intro: storeWordUser_corres) + apply wp+ + apply (clarsimp simp add: S msgMaxLength_def msg_max_length_def set_zip) + apply (simp add: wordSize_def wordBits_def word_size max_ipc_words + upt_Suc_append[symmetric] upto_enum_word) + apply simp + apply (rule corres_trivial, clarsimp simp: min.commute) + apply wp+ + apply (wp | clarsimp simp: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemma copyMRs_corres: + "corres (=) (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct + and case_option \ in_user_frame sb + and case_option \ in_user_frame rb + and K (unat n \ msg_max_length)) + (case_option \ valid_ipc_buffer_ptr' sb + and case_option \ valid_ipc_buffer_ptr' rb) + (copy_mrs s sb r rb n) (copyMRs s sb r rb n)" +proof - + have U: "unat n \ msg_max_length \ + map (toEnum :: nat \ machine_word) [7 ..< Suc (unat n)] = map of_nat [7 ..< Suc (unat n)]" + unfolding msg_max_length_def by auto + note R'=msgRegisters_unfold[THEN meta_eq_to_obj_eq, THEN arg_cong[where f=length]] + note R=R'[simplified] + + have as_user_bit: + "\v :: machine_word. + corres dc (tcb_at s and tcb_at r and pspace_aligned and pspace_distinct) + \ + (mapM + (\ra. do v \ as_user s (getRegister ra); + as_user r (setRegister ra v) + od) + (take (unat n) msg_registers)) + (mapM + (\ra. do v \ asUser s (getRegister ra); + asUser r (setRegister ra v) + od) + (take (unat n) msgRegisters))" + apply (rule corres_guard_imp) + apply (rule_tac S=Id in corres_mapM, simp+) + apply (rule corres_split_eqr[OF asUser_getRegister_corres asUser_setRegister_corres]) + apply (wp | clarsimp simp: msg_registers_def msgRegisters_def)+ + done + + have wordSize[simp]: "of_nat wordSize = 8" + by (simp add: wordSize_def wordBits_def word_size) + + show ?thesis + apply (rule corres_assume_pre) + apply (simp add: copy_mrs_def copyMRs_def word_size + cong: option.case_cong + split del: if_split del: upt.simps) + apply (cases sb) + apply (simp add: R) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_trivial, simp) + apply wp+ + apply simp + apply simp + apply (cases rb) + apply (simp add: R) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_trivial, simp) + apply wp+ + apply simp + apply simp + apply (simp add: R del: upt.simps) + apply (rule corres_guard_imp) + apply (rename_tac sb_ptr rb_ptr) + apply (rule corres_split_nor[OF as_user_bit]) + apply (rule corres_split_eqr) + apply (rule_tac S="{(x, y). y = of_nat x \ x < unat max_ipc_words}" + in corres_mapM, simp+) + apply (rule corres_split_eqr) + apply (rule loadWordUser_corres) + apply simp + apply (rule storeWordUser_corres) + apply simp + apply (wp hoare_vcg_all_lift | simp)+ + apply (clarsimp simp: upto_enum_def) + apply arith + apply (subst set_zip) + apply (simp add: upto_enum_def U del: upt.simps) + apply (clarsimp simp del: upt.simps) + apply (clarsimp simp: msg_max_length_def word_le_nat_alt nth_append + max_ipc_words) + apply (erule order_less_trans) + apply simp + apply (rule corres_trivial, simp) + apply (wp hoare_vcg_all_lift mapM_wp' + | simp add: valid_ipc_buffer_ptr'_def)+ + done +qed + +lemma cte_at_tcb_at_32': + "tcb_at' t s \ cte_at' (t + 32) s" + apply (simp add: cte_at'_obj_at') + apply (rule disjI2, rule bexI[where x=32]) + apply simp + apply fastforce + done + +lemma get_tcb_cap_corres: + "tcb_cap_cases ref = Some (getF, v) \ + corres cap_relation (tcb_at t and valid_objs) (tcb_at' t and pspace_aligned' and pspace_distinct') + (liftM getF (gets_the (get_tcb t))) + (getSlotCap (cte_map (t, ref)))" + apply (simp add: getSlotCap_def liftM_def[symmetric]) + apply (rule corres_no_failI) + apply (rule no_fail_pre, wp) + apply (cases v, simp) + apply (frule tcb_cases_related) + apply (clarsimp simp: cte_at'_obj_at') + apply (drule spec[where x=t]) + apply (drule bspec, erule domI) + apply simp + apply clarsimp + apply (clarsimp simp: gets_the_def simpler_gets_def + bind_def assert_opt_def tcb_at_def + return_def + dest!: get_tcb_SomeD) + apply (drule use_valid [OF _ getCTE_sp[where P="(=) s'" for s'], OF _ refl]) + apply (clarsimp simp: get_tcb_def return_def) + apply (drule pspace_relation_ctes_ofI[OF state_relation_pspace_relation]) + apply (rule cte_wp_at_tcbI[where t="(t, ref)"], fastforce+)[1] + apply assumption+ + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemmas get_vtable_cap_corres = + get_tcb_cap_corres[where ref="tcb_cnode_index 1", simplified, OF conjI [OF refl refl]] + +lemma pspace_dom_dom: + "dom ps \ pspace_dom ps" + unfolding pspace_dom_def + apply clarsimp + apply (rule rev_bexI [OF domI], assumption) + apply (simp add: obj_relation_cuts_def2 image_Collect cte_map_def range_composition [symmetric] + split: Structures_A.kernel_object.splits arch_kernel_obj.splits + cong: arch_kernel_obj.case_cong) + apply safe + (* CNode *) + apply (force dest: wf_cs_0 simp: of_bl_def) + (* PageTable *) + apply (fastforce simp add: image_Collect image_image intro: image_eqI[where x=0]) + (* DataPage *) + apply (rule exI[where x=0]) + apply (simp add: pageBitsForSize_def bit_simps split: vmpage_size.split) + done + +lemma no_0_obj_kheap: + assumes no0: "no_0_obj' s'" + and psr: "pspace_relation (kheap s) (ksPSpace s')" + shows "kheap s 0 = None" +proof (rule ccontr) + assume "kheap s 0 \ None" + hence "0 \ dom (kheap s)" .. + hence "0 \ pspace_dom (kheap s)" by (rule set_mp [OF pspace_dom_dom]) + moreover + from no0 have "0 \ dom (ksPSpace s')" + unfolding no_0_obj'_def by clarsimp + ultimately show False using psr + by (clarsimp simp: pspace_relation_def) +qed + +lemmas valid_ipc_buffer_cap_simps = valid_ipc_buffer_cap_def [split_simps cap.split arch_cap.split] + +lemma lookupIPCBuffer_corres': + "corres (=) (tcb_at t and valid_objs and pspace_aligned and pspace_distinct) + (no_0_obj') + (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + apply (rule corres_cross_over_guard[where P'=Q and + Q="pspace_aligned' and pspace_distinct' and Q" for Q]) + apply (fastforce simp: pspace_aligned_cross pspace_distinct_cross state_relation_def) + apply (simp add: lookup_ipc_buffer_def AARCH64_H.lookupIPCBuffer_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF threadGet_corres]) + apply (simp add: tcb_relation_def) + apply (simp add: getThreadBufferSlot_def locateSlot_conv) + apply (rule corres_split[OF getSlotCap_corres]) + apply (simp add: cte_map_def tcb_cnode_index_def cte_level_bits_def tcbIPCBufferSlot_def) + apply (rule_tac F="valid_ipc_buffer_cap rv buffer_ptr" + in corres_gen_asm) + apply (rule_tac P="valid_cap rv" and Q="no_0_obj'" + in corres_assume_pre) + apply (simp add: Let_def split: cap.split arch_cap.split + split del: if_split cong: if_cong) + apply (safe, simp_all add: isCap_simps valid_ipc_buffer_cap_simps split:bool.split_asm)[1] + apply (rename_tac word rights vmpage_size d option) + apply (subgoal_tac "word + (buffer_ptr && + mask (pageBitsForSize vmpage_size)) \ 0") + apply (simp add: cap_aligned_def + valid_ipc_buffer_cap_def + vmrights_map_def vm_read_only_def vm_read_write_def) + apply auto[1] + apply (subgoal_tac "word \ 0") + apply (subgoal_tac "word \ word + (buffer_ptr && + mask (pageBitsForSize vmpage_size))") + apply fastforce + apply (rule_tac b="2 ^ (pageBitsForSize vmpage_size) - 1" + in word_plus_mono_right2) + apply (clarsimp simp: valid_cap_def cap_aligned_def + intro!: is_aligned_no_overflow') + apply (clarsimp simp: word_bits_def bit_simps + intro!: word_less_sub_1 and_mask_less') + apply (case_tac vmpage_size, simp_all add: bit_simps)[1] + apply (drule state_relation_pspace_relation) + apply (clarsimp simp: valid_cap_def obj_at_def no_0_obj_kheap + obj_relation_cuts_def3 no_0_obj'_def + split: if_split_asm) + apply (wp get_cap_valid_ipc get_cap_aligned)+ + apply (wp thread_get_obj_at_eq)+ + apply (clarsimp elim!: tcb_at_cte_at) + apply clarsimp + done + +lemma lookupIPCBuffer_corres: + "corres (=) (tcb_at t and invs) (no_0_obj') (lookup_ipc_buffer w t) (lookupIPCBuffer w t)" + using lookupIPCBuffer_corres' + by (rule corres_guard_imp, auto simp: invs'_def valid_state'_def) + + +crunch inv[wp]: lookupIPCBuffer P + (wp: crunch_wps simp: crunch_simps) + +crunch pred_tcb_at'[wp]: rescheduleRequired "pred_tcb_at' proj P t" + +lemma setThreadState_st_tcb': + "\\\ setThreadState st t \\rv. st_tcb_at' (\s. s = st) t\" + apply (simp add: setThreadState_def) + apply (wp threadSet_pred_tcb_at_state | simp add: if_apply_def2)+ + done + +lemma setThreadState_st_tcb: + "\\s. P st\ setThreadState st t \\rv. st_tcb_at' P t\" + apply (cases "P st") + apply simp + apply (rule hoare_post_imp [OF _ setThreadState_st_tcb']) + apply (erule pred_tcb'_weakenE, simp) + apply simp + done + +lemma setBoundNotification_bound_tcb': + "\\\ setBoundNotification ntfn t \\rv. bound_tcb_at' (\s. s = ntfn) t\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_pred_tcb_at_state | simp add: if_apply_def2)+ + done + +lemma setBoundNotification_bound_tcb: + "\\s. P ntfn\ setBoundNotification ntfn t \\rv. bound_tcb_at' P t\" + apply (cases "P ntfn") + apply simp + apply (rule hoare_post_imp [OF _ setBoundNotification_bound_tcb']) + apply (erule pred_tcb'_weakenE, simp) + apply simp + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for ct'[wp]: "\s. P (ksCurThread s)" + +lemma ct_in_state'_decomp: + assumes x: "\\s. t = (ksCurThread s)\ f \\rv s. t = (ksCurThread s)\" + assumes y: "\Pre\ f \\rv. st_tcb_at' Prop t\" + shows "\\s. Pre s \ t = (ksCurThread s)\ f \\rv. ct_in_state' Prop\" + apply (rule hoare_post_imp [where Q="\rv s. t = ksCurThread s \ st_tcb_at' Prop t s"]) + apply (clarsimp simp add: ct_in_state'_def) + apply (rule hoare_vcg_precond_imp) + apply (wp x y) + apply simp + done + +lemma ct_in_state'_set: + "\\s. tcb_at' t s \ P st \ t = ksCurThread s\ setThreadState st t \\rv. ct_in_state' P\" + apply (rule hoare_vcg_precond_imp) + apply (rule ct_in_state'_decomp[where t=t]) + apply (wp setThreadState_ct') + apply (wp setThreadState_st_tcb) + apply clarsimp + done + +crunches setQueue, rescheduleRequired, tcbSchedDequeue + for idle'[wp]: "valid_idle'" + (simp: crunch_simps) + +lemma sts_valid_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. t = ksIdleThread s \ idle' ts)\ + setThreadState ts t + \\rv. valid_idle'\" + apply (simp add: setThreadState_def) + apply (wpsimp wp: threadSet_idle' simp: idle_tcb'_def) + done + +lemma sbn_valid_idle'[wp]: + "\valid_idle' and valid_pspace' and + (\s. t = ksIdleThread s \ \bound ntfn)\ + setBoundNotification ntfn t + \\rv. valid_idle'\" + apply (simp add: setBoundNotification_def) + apply (wpsimp wp: threadSet_idle' simp: idle_tcb'_def) + done + +lemma gts_sp': + "\P\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t and P\" + apply (simp add: getThreadState_def threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma gbn_sp': + "\P\ getBoundNotification t \\rv. bound_tcb_at' (\st. st = rv) t and P\" + apply (simp add: getBoundNotification_def threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma tcbSchedDequeue_tcbState_obj_at'[wp]: + "\obj_at' (P \ tcbState) t'\ tcbSchedDequeue t \\rv. obj_at' (P \ tcbState) t'\" + apply (simp add: tcbSchedDequeue_def) + apply (wp | simp add: o_def split del: if_split cong: if_cong)+ + done + +crunch typ_at'[wp]: setQueue "\s. P' (typ_at' P t s)" + +lemma setQueue_pred_tcb_at[wp]: + "\\s. P' (pred_tcb_at' proj P t s)\ setQueue d p q \\rv s. P' (pred_tcb_at' proj P t s)\" + unfolding pred_tcb_at'_def + apply (rule_tac P=P' in P_bool_lift) + apply (rule setQueue_obj_at) + apply (rule_tac Q="\_ s. \typ_at' TCBT t s \ obj_at' (Not \ (P \ proj \ tcb_to_itcb')) t s" + in hoare_post_imp, simp add: not_obj_at' o_def) + apply (wp hoare_vcg_disj_lift) + apply (clarsimp simp: not_obj_at' o_def) + done + +lemma tcbSchedDequeue_pred_tcb_at'[wp]: + "\\s. P' (pred_tcb_at' proj P t' s)\ tcbSchedDequeue t \\_ s. P' (pred_tcb_at' proj P t' s)\" + apply (rule_tac P=P' in P_bool_lift) + apply (simp add: tcbSchedDequeue_def) + apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + apply (simp add: tcbSchedDequeue_def) + apply (wp threadSet_pred_tcb_no_state | clarsimp simp: tcb_to_itcb'_def)+ + done + +lemma sts_st_tcb': + "\if t = t' then K (P st) else st_tcb_at' P t\ + setThreadState st t' + \\_. st_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sts_bound_tcb_at': + "\bound_tcb_at' P t\ + setThreadState st t' + \\_. bound_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setThreadState_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sbn_st_tcb': + "\st_tcb_at' P t\ + setBoundNotification ntfn t' + \\_. st_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setBoundNotification_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +lemma sbn_bound_tcb_at': + "\if t = t' then K (P ntfn) else bound_tcb_at' P t\ + setBoundNotification ntfn t' + \\_. bound_tcb_at' P t\" + apply (cases "t = t'", + simp_all add: setBoundNotification_def + split del: if_split) + apply ((wp threadSet_pred_tcb_at_state | simp)+)[1] + apply (wp threadSet_obj_at'_really_strongest + | simp add: pred_tcb_at'_def)+ + done + +crunches rescheduleRequired, tcbSchedDequeue, setThreadState, setBoundNotification + for typ_at'[wp]: "\s. P (typ_at' T p s)" + +lemmas setThreadState_typ_ats[wp] = typ_at_lifts [OF setThreadState_typ_at'] +lemmas setBoundNotification_typ_ats[wp] = typ_at_lifts [OF setBoundNotification_typ_at'] + +crunches setThreadState, setBoundNotification + for aligned'[wp]: pspace_aligned' + and distinct'[wp]: pspace_distinct' + and cte_wp_at'[wp]: "cte_wp_at' P p" + +crunch refs_of'[wp]: rescheduleRequired "\s. P (state_refs_of' s)" + (wp: threadSet_state_refs_of') + +lemma setThreadState_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_st_refs_of' st + \ {r \ state_refs_of' s t. snd r = TCBBound}))\ + setThreadState st t + \\rv s. P (state_refs_of' s)\" + by (simp add: setThreadState_def fun_upd_def + | wp threadSet_state_refs_of')+ + +crunch hyp_refs_of'[wp]: rescheduleRequired "\s. P (state_hyp_refs_of' s)" + (simp: unless_def crunch_simps wp: threadSet_state_hyp_refs_of' ignore: threadSet) + +lemma setThreadState_state_hyp_refs_of'[wp]: + "\\s. P ((state_hyp_refs_of' s))\ + setThreadState st t + \\rv s. P (state_hyp_refs_of' s)\" + apply (simp add: setThreadState_def fun_upd_def + | wp threadSet_state_hyp_refs_of')+ + done + +lemma setBoundNotification_state_refs_of'[wp]: + "\\s. P ((state_refs_of' s) (t := tcb_bound_refs' ntfn + \ {r \ state_refs_of' s t. snd r \ TCBBound}))\ + setBoundNotification ntfn t + \\rv s. P (state_refs_of' s)\" + by (simp add: setBoundNotification_def Un_commute fun_upd_def + | wp threadSet_state_refs_of' )+ + +lemma setBoundNotification_state_hyp_refs_of'[wp]: + "\\s. P (state_hyp_refs_of' s)\ + setBoundNotification ntfn t + \\rv s. P (state_hyp_refs_of' s)\" + by (simp add: setBoundNotification_def fun_upd_def + | wp threadSet_state_hyp_refs_of')+ + +lemma sts_cur_tcb'[wp]: + "\cur_tcb'\ setThreadState st t \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +lemma sbn_cur_tcb'[wp]: + "\cur_tcb'\ setBoundNotification ntfn t \\rv. cur_tcb'\" + by (wp cur_tcb_lift) + +crunch iflive'[wp]: setQueue if_live_then_nonz_cap' +crunch nonz_cap[wp]: setQueue "ex_nonz_cap_to' t" +crunch iflive'[wp]: addToBitmap if_live_then_nonz_cap' +crunch nonz_cap[wp]: addToBitmap "ex_nonz_cap_to' t" +crunch iflive'[wp]: removeFromBitmap if_live_then_nonz_cap' +crunch nonz_cap[wp]: removeFromBitmap "ex_nonz_cap_to' t" + +lemma tcbSchedEnqueue_iflive'[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ + tcbSchedEnqueue tcb \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + done + +lemma rescheduleRequired_iflive'[wp]: + "\if_live_then_nonz_cap' + and (\s. \t. ksSchedulerAction s = SwitchToThread t + \ st_tcb_at' runnable' t s)\ + rescheduleRequired + \\rv. if_live_then_nonz_cap'\" + apply (simp add: rescheduleRequired_def) + apply (wp | wpc | simp)+ + apply (clarsimp simp: pred_tcb_at'_def obj_at'_real_def) + apply (erule(1) if_live_then_nonz_capD') + apply (fastforce simp: live'_def) + done + +lemma sts_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (st \ Inactive \ \ idle' st \ ex_nonz_cap_to' t s)\ + setThreadState st t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setThreadState_def setQueue_def) + apply (rule hoare_pre) + apply (wp | simp)+ + apply (rule_tac Q="\rv. if_live_then_nonz_cap'" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_iflive' | simp)+ + apply auto + done + +lemma sbn_iflive'[wp]: + "\\s. if_live_then_nonz_cap' s + \ (bound ntfn \ ex_nonz_cap_to' t s)\ + setBoundNotification ntfn t + \\rv. if_live_then_nonz_cap'\" + apply (simp add: setBoundNotification_def) + apply (rule hoare_pre) + apply (wp threadSet_iflive' | simp)+ + apply auto + done + +crunches setThreadState, setBoundNotification + for ifunsafe'[wp]: "if_unsafe_then_cap'" + +lemma st_tcb_ex_cap'': + "\ st_tcb_at' P t s; if_live_then_nonz_cap' s; + \st. P st \ st \ Inactive \ \ idle' st \ \ ex_nonz_cap_to' t s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_real_def live'_def + elim!: ko_wp_at'_weakenE + if_live_then_nonz_capE') + +lemma bound_tcb_ex_cap'': + "\ bound_tcb_at' P t s; if_live_then_nonz_cap' s; + \ntfn. P ntfn \ bound ntfn \ \ ex_nonz_cap_to' t s" + by (clarsimp simp: pred_tcb_at'_def obj_at'_real_def live'_def + elim!: ko_wp_at'_weakenE + if_live_then_nonz_capE') + +crunches setThreadState, setBoundNotification + for arch' [wp]: "\s. P (ksArchState s)" + (simp: unless_def crunch_simps) + +crunches setThreadState, setBoundNotification + for it' [wp]: "\s. P (ksIdleThread s)" + (wp: getObject_inv_tcb + simp: updateObject_default_def unless_def crunch_simps) + +crunch it' [wp]: removeFromBitmap "\s. P (ksIdleThread s)" + +lemma sts_ctes_of [wp]: + "\\s. P (ctes_of s)\ setThreadState st t \\rv s. P (ctes_of s)\" + apply (simp add: setThreadState_def) + apply (wp threadSet_ctes_ofT | simp add: tcb_cte_cases_def cteSizeBits_def)+ + done + +lemma sbn_ctes_of [wp]: + "\\s. P (ctes_of s)\ setBoundNotification ntfn t \\rv s. P (ctes_of s)\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_ctes_ofT | simp add: tcb_cte_cases_def cteSizeBits_def)+ + done + +crunches setThreadState, setBoundNotification + for ksInterruptState[wp]: "\s. P (ksInterruptState s)" + (simp: unless_def crunch_simps) + +crunches setThreadState, setBoundNotification + for gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + (simp: unless_def crunch_simps wp: setObject_ksPSpace_only updateObject_default_inv) + +lemmas setThreadState_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF sts_ctes_of setThreadState_ksInterruptState] + +lemmas setBoundNotification_irq_handlers[wp] + = valid_irq_handlers_lift'' [OF sbn_ctes_of setBoundNotification_ksInterruptState] + +lemma sts_global_reds' [wp]: + "\valid_global_refs'\ setThreadState st t \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +lemma sbn_global_reds' [wp]: + "\valid_global_refs'\ setBoundNotification ntfn t \\_. valid_global_refs'\" + by (rule valid_global_refs_lift'; wp) + +crunches setThreadState, setBoundNotification + for irq_states' [wp]: valid_irq_states' + (simp: unless_def crunch_simps) + +lemma addToBitmap_ksMachine[wp]: + "\\s. P (ksMachineState s)\ addToBitmap d p \\rv s. P (ksMachineState s)\" + unfolding bitmap_fun_defs + by (wp, simp) + +lemma removeFromBitmap_ksMachine[wp]: + "\\s. P (ksMachineState s)\ removeFromBitmap d p \\rv s. P (ksMachineState s)\" + unfolding bitmap_fun_defs + by (wp|simp add: bitmap_fun_defs)+ + +lemma tcbSchedEnqueue_ksMachine[wp]: + "\\s. P (ksMachineState s)\ tcbSchedEnqueue x \\_ s. P (ksMachineState s)\" + by (simp add: tcbSchedEnqueue_def unless_def setQueue_def | wp)+ + +crunches setThreadState, setBoundNotification + for ksMachine[wp]: "\s. P (ksMachineState s)" + and pspace_domain_valid[wp]: "pspace_domain_valid" + +lemma setThreadState_vms'[wp]: + "\valid_machine_state'\ setThreadState F t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + done + +lemma ct_not_inQ_addToBitmap[wp]: + "\ ct_not_inQ \ addToBitmap d p \\_. ct_not_inQ \" + unfolding bitmap_fun_defs + by (wp, clarsimp simp: ct_not_inQ_def) + +lemma ct_not_inQ_removeFromBitmap[wp]: + "\ ct_not_inQ \ removeFromBitmap d p \\_. ct_not_inQ \" + unfolding bitmap_fun_defs + by (wp|simp add: bitmap_fun_defs ct_not_inQ_def comp_def)+ + +lemma setBoundNotification_vms'[wp]: + "\valid_machine_state'\ setBoundNotification ntfn t \\rv. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (intro hoare_vcg_all_lift hoare_vcg_disj_lift; wp) + done + +lemma tcbSchedEnqueue_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + proof - + have ts: "\?PRE\ threadSet (tcbQueued_update (\_. True)) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ksCurThread s \ t" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (rule hoare_weaken_pre) + apply (wps setObject_ct_inv) + apply (rule threadSet_obj_at'_strongish) + apply (clarsimp simp: comp_def) + done + have sq: "\d p q. \ct_not_inQ\ setQueue d p q \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def setQueue_def) + apply (wp) + apply (clarsimp) + done + show ?thesis + apply (simp add: tcbSchedEnqueue_def unless_def null_def) + apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply wp + apply assumption + done + qed + +lemma tcbSchedAppend_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + proof - + have ts: "\?PRE\ threadSet (tcbQueued_update (\_. True)) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule_tac Q="\s. ksSchedulerAction s = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s \ ksCurThread s \ t" + in hoare_pre_imp, clarsimp) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (rule hoare_weaken_pre) + apply (wps setObject_ct_inv) + apply (rule threadSet_obj_at'_strongish) + apply (clarsimp simp: comp_def) + done + have sq: "\d p q. \ct_not_inQ\ setQueue d p q \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def setQueue_def) + apply (wp) + apply (clarsimp) + done + show ?thesis + apply (simp add: tcbSchedAppend_def unless_def null_def) + apply (wp ts sq hoare_convert_imp [OF addToBitmap_nosch addToBitmap_ct'])+ + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply (wp sq hoare_convert_imp [OF setQueue_nosch setQueue_ct])+ + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply wp + apply assumption + done + qed + +lemma setSchedulerAction_direct: + "\\\ setSchedulerAction sa \\_ s. ksSchedulerAction s = sa\" + by (wpsimp simp: setSchedulerAction_def) + +lemma rescheduleRequired_ct_not_inQ: + "\\\ rescheduleRequired \\_. ct_not_inQ\" + apply (simp add: rescheduleRequired_def ct_not_inQ_def) + apply (rule_tac Q="\_ s. ksSchedulerAction s = ChooseNewThread" + in hoare_post_imp, clarsimp) + apply (wp setSchedulerAction_direct) + done + +crunch nosch[wp]: tcbSchedEnqueue "\s. P (ksSchedulerAction s)" + (simp: unless_def) +crunch nosch[wp]: tcbSchedAppend "\s. P (ksSchedulerAction s)" + (simp: unless_def) + +lemma rescheduleRequired_sa_cnt[wp]: + "\\s. True \ rescheduleRequired \\_ s. ksSchedulerAction s = ChooseNewThread \" + unfolding rescheduleRequired_def setSchedulerAction_def + by wpsimp + +lemma possibleSwitchTo_ct_not_inQ: + "\ct_not_inQ and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + possibleSwitchTo t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + apply (simp add: possibleSwitchTo_def curDomain_def) + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + threadGet_wp + | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ + apply (fastforce simp: obj_at'_def) + done + +lemma threadSet_tcbState_update_ct_not_inQ[wp]: + "\ct_not_inQ\ threadSet (tcbState_update f) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (rule setObject_tcb_strongest) + prefer 2 + apply assumption + apply (clarsimp) + apply (rule hoare_conjI) + apply (rule hoare_weaken_pre) + apply (wps, wp hoare_weak_lift_imp) + apply (wp OMG_getObject_tcb)+ + apply (clarsimp simp: comp_def) + apply (wp hoare_drop_imp) + done + +lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: + "\ct_not_inQ\ threadSet (tcbBoundNotification_update f) t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (rule hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (rule setObject_tcb_strongest) + prefer 2 + apply assumption + apply (clarsimp) + apply (rule hoare_conjI) + apply (rule hoare_weaken_pre) + apply wps + apply (wp hoare_weak_lift_imp) + apply (wp OMG_getObject_tcb) + apply (clarsimp simp: comp_def) + apply (wp hoare_drop_imp) + done + +lemma setThreadState_ct_not_inQ: + "\ct_not_inQ\ setThreadState st t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + including no_pre + apply (simp add: setThreadState_def) + apply (wp rescheduleRequired_ct_not_inQ) + apply (rule_tac Q="\_. ?PRE" in hoare_post_imp, clarsimp) + apply (wp) + done + +lemma setBoundNotification_ct_not_inQ: + "\ct_not_inQ\ setBoundNotification ntfn t \\_. ct_not_inQ\" + (is "\?PRE\ _ \_\") + by (simp add: setBoundNotification_def, wp) + +crunch ct_not_inQ[wp]: setQueue "ct_not_inQ" + +lemma tcbSchedDequeue_ct_not_inQ[wp]: + "\ct_not_inQ\ tcbSchedDequeue t \\_. ct_not_inQ\" + proof - + have TSNIQ: "\F t. + \ct_not_inQ and (\_. \tcb. \tcbQueued (F tcb))\ + threadSet F t \\_. ct_not_inQ\" + apply (simp add: ct_not_inQ_def) + apply (wp hoare_convert_imp [OF threadSet_nosch]) + apply (simp add: threadSet_def) + apply (wp) + apply (wps setObject_ct_inv) + apply (wp setObject_tcb_strongest getObject_tcb_wp)+ + apply (case_tac "t = ksCurThread s") + apply (clarsimp simp: obj_at'_def)+ + done + show ?thesis + apply (simp add: tcbSchedDequeue_def) + apply (wp TSNIQ | simp cong: if_cong)+ + done + qed + +lemma tcbSchedEnqueue_not_st: + "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) + \ \obj_at' P t'\ tcbSchedEnqueue t \\_. obj_at' P t'\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp threadGet_wp | simp)+ + apply (clarsimp simp: obj_at'_def) + apply (case_tac obja) + apply fastforce + done + +lemma setThreadState_not_st: + "(\tcb st qd. P (tcb\tcbState := st, tcbQueued := qd\) \ P tcb) + \ \obj_at' P t'\ setThreadState st t \\_. obj_at' P t'\" + apply (simp add: setThreadState_def rescheduleRequired_def) + apply (wp hoare_vcg_conj_lift tcbSchedEnqueue_not_st + | wpc + | rule hoare_drop_imps + | simp)+ + apply (clarsimp simp: obj_at'_def) + apply (case_tac obj) + apply fastforce + done + +crunch ct_idle_or_in_cur_domain'[wp]: setQueue ct_idle_or_in_cur_domain' + (simp: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +crunch ksDomSchedule[wp]: setQueue "\s. P (ksDomSchedule s)" + +crunch ksCurDomain[wp]: addToBitmap "\s. P (ksCurDomain s)" + (wp: crunch_wps ) +crunch ksDomSchedule[wp]: addToBitmap "\s. P (ksDomSchedule s)" + (wp: crunch_wps ) +crunch ksCurDomain[wp]: removeFromBitmap "\s. P (ksCurDomain s)" + (wp: crunch_wps ) +crunch ksDomSchedule[wp]: removeFromBitmap "\s. P (ksDomSchedule s)" + (wp: crunch_wps ) + +lemma addToBitmap_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ addToBitmap d p \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +lemma removeFromBitmap_ct_idle_or_in_cur_domain'[wp]: + "\ ct_idle_or_in_cur_domain' \ removeFromBitmap d p \ \_. ct_idle_or_in_cur_domain' \" + apply (rule ct_idle_or_in_cur_domain'_lift) + apply (wp hoare_vcg_disj_lift| rule obj_at_setObject2 + | clarsimp simp: updateObject_default_def in_monad setNotification_def)+ + done + +lemma tcbSchedEnqueue_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s)\ tcbSchedEnqueue tptr \\_ s. P (ksCurDomain s)\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply wpsimp + done + +lemma tcbSchedEnqueue_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s)\ tcbSchedEnqueue tptr \\_ s. P (ksDomSchedule s)\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply wpsimp + done + +lemma tcbSchedEnqueue_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ tcbSchedEnqueue tptr \\_. ct_idle_or_in_cur_domain'\" + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' | simp)+ + done + +lemma setSchedulerAction_spec: + "\\\setSchedulerAction ChooseNewThread + \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add:setSchedulerAction_def) + apply wp + apply (simp add:ct_idle_or_in_cur_domain'_def) + done + +lemma rescheduleRequired_ct_idle_or_in_cur_domain'[wp]: + "\\\ rescheduleRequired \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: rescheduleRequired_def) + apply (wp setSchedulerAction_spec) + done + +lemma rescheduleRequired_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ rescheduleRequired \\_ s. P (ksCurDomain s) \" + apply (simp add: rescheduleRequired_def) + apply wpsimp + done + +lemma rescheduleRequired_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ rescheduleRequired \\_ s. P (ksDomSchedule s) \" + by (simp add: rescheduleRequired_def) wpsimp + +lemma setThreadState_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ setThreadState st tptr \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: setThreadState_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps | simp)+ + done + +lemma setThreadState_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setThreadState st tptr \\_ s. P (ksCurDomain s) \" + apply (simp add: setThreadState_def) + apply wpsimp + done + +lemma setThreadState_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ setThreadState st tptr \\_ s. P (ksDomSchedule s) \" + apply (simp add: setThreadState_def) + apply wpsimp + done + +lemma setBoundNotification_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ setBoundNotification t a \\rv. ct_idle_or_in_cur_domain'\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_ct_idle_or_in_cur_domain' hoare_drop_imps | simp)+ + done + +lemma setBoundNotification_ksCurDomain[wp]: + "\ \s. P (ksCurDomain s) \ setBoundNotification st tptr \\_ s. P (ksCurDomain s) \" + apply (simp add: setBoundNotification_def) + apply wpsimp + done + +lemma setBoundNotification_ksDomSchedule[wp]: + "\ \s. P (ksDomSchedule s) \ setBoundNotification st tptr \\_ s. P (ksDomSchedule s) \" + apply (simp add: setBoundNotification_def) + apply wpsimp + done + +crunches rescheduleRequired, setBoundNotification, setThreadState + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + +lemma sts_utr[wp]: + "\untyped_ranges_zero'\ setThreadState st t \\_. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (wp untyped_ranges_zero_lift) + done + +lemma sts_invs_minor': + "\st_tcb_at' (\st'. tcb_st_refs_of' st' = tcb_st_refs_of' st + \ (st \ Inactive \ \ idle' st \ + st' \ Inactive \ \ idle' st')) t + and (\s. t = ksIdleThread s \ idle' st) + and (\s. (\p. t \ set(ksReadyQueues s p)) \ runnable' st) + and (\s. runnable' st \ obj_at' tcbQueued t s \ st_tcb_at' runnable' t s) + and sch_act_simple + and invs'\ + setThreadState st t + \\rv. invs'\" + including no_pre + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp sts_valid_queues valid_irq_node_lift irqs_masked_lift + setThreadState_ct_not_inQ + | simp add: cteCaps_of_def o_def)+ + apply (clarsimp simp: sch_act_simple_def) + apply (intro conjI) + apply clarsimp + defer + apply (clarsimp dest!: st_tcb_at_state_refs_ofD' + elim!: rsubst[where P=sym_refs] + intro!: ext) + apply (clarsimp elim!: st_tcb_ex_cap'') + apply (frule tcb_in_valid_state', clarsimp+) + apply (cases st, simp_all add: valid_tcb_state'_def + split: Structures_H.thread_state.split_asm) + done + +lemma sts_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setThreadState st t \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma sts_pred_tcb_neq': + "\pred_tcb_at' proj P t and K (t \ t')\ + setThreadState st t' + \\_. pred_tcb_at' proj P t\" + apply (simp add: setThreadState_def) + apply (wp threadSet_pred_tcb_at_state | simp)+ + done + +lemma sbn_pred_tcb_neq': + "\pred_tcb_at' proj P t and K (t \ t')\ + setBoundNotification ntfn t' + \\_. pred_tcb_at' proj P t\" + apply (simp add: setBoundNotification_def) + apply (wp threadSet_pred_tcb_at_state | simp)+ + done + +lemmas isTS_defs = + isRunning_def isBlockedOnSend_def isBlockedOnReceive_def + isBlockedOnNotification_def isBlockedOnReply_def + isRestart_def isInactive_def + isIdleThreadState_def + +lemma sts_st_tcb_at'_cases: + "\\s. ((t = t') \ (P ts \ tcb_at' t' s)) \ ((t \ t') \ st_tcb_at' P t' s)\ + setThreadState ts t + \\rv. st_tcb_at' P t'\" + apply (wp sts_st_tcb') + apply fastforce + done + +lemma threadSet_ct_running': + "(\tcb. tcbState (f tcb) = tcbState tcb) \ + \ct_running'\ threadSet f t \\rv. ct_running'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp threadSet_pred_tcb_no_state; simp) + apply wp + done + +lemma setThreadState_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ setThreadState st t \\_. tcb_in_cur_domain' t'\" + apply (simp add: tcb_in_cur_domain'_def) + apply (rule hoare_pre) + apply wps + apply (wp setThreadState_not_st | simp)+ + done + +lemma asUser_global_refs': "\valid_global_refs'\ asUser t f \\rv. valid_global_refs'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_global_refs select_f_inv) + done + +lemma sch_act_sane_lift: + assumes "\P. \\s. P (ksSchedulerAction s)\ f \\rv s. P (ksSchedulerAction s)\" + assumes "\P. \\s. P (ksCurThread s)\ f \\rv s. P (ksCurThread s)\" + shows "\sch_act_sane\ f \\rv. sch_act_sane\" + apply (simp add: sch_act_sane_def) + apply (rule hoare_vcg_all_lift) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp assms)+ + done + +lemma storeWord_invs'[wp]: + "\pointerInUserData p and invs'\ doMachineOp (storeWord p w) \\rv. invs'\" +proof - + have aligned_offset_ignore: + "\l. l<8 \ p && mask word_size_bits = 0 \ p + l && ~~ mask 12 = p && ~~ mask 12" + proof - + fix l + assume al: "p && mask word_size_bits = 0" + assume "(l::machine_word) < 8" hence less: "l<2^word_size_bits" by (simp add: word_size_bits_def) + have le: "(word_size_bits::nat) \ 12" by (simp add: word_size_bits_def) + show "?thesis l" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, OF al less le]) + qed + + show ?thesis + apply (wp dmo_invs' no_irq_storeWord no_irq) + apply (clarsimp simp: storeWord_def invs'_def valid_state'_def) + apply (clarsimp simp: valid_machine_state'_def pointerInUserData_def + assert_def simpler_modify_def fail_def bind_def return_def + aligned_offset_ignore bit_simps upto0_7_def + split: if_split_asm) + done +qed + +lemma storeWord_invs_no_cicd'[wp]: + "\pointerInUserData p and invs_no_cicd'\ doMachineOp (storeWord p w) \\rv. invs_no_cicd'\" +proof - + have aligned_offset_ignore: + "\l. l<8 \ p && mask 3 = 0 \ p + l && ~~ mask 12 = p && ~~ mask 12" + proof - + fix l + assume al: "p && mask 3 = 0" + assume "(l::machine_word) < 8" hence less: "l<2^3" by simp + have le: "(3::nat) \ 12" by simp + show "?thesis l" + by (rule is_aligned_add_helper[simplified is_aligned_mask, + THEN conjunct2, THEN mask_out_first_mask_some, OF al less le]) + qed + + show ?thesis + apply (wp dmo_invs_no_cicd' no_irq_storeWord no_irq) + apply (clarsimp simp: storeWord_def invs'_def valid_state'_def) + apply (clarsimp simp: valid_machine_state'_def pointerInUserData_def + assert_def simpler_modify_def fail_def bind_def return_def + pageBits_def aligned_offset_ignore upto0_7_def + split: if_split_asm) + done +qed + +lemma storeWordUser_invs[wp]: + "\invs'\ storeWordUser p w \\rv. invs'\" + by (simp add: storeWordUser_def | wp)+ + +lemma hoare_valid_ipc_buffer_ptr_typ_at': + "(\q. \typ_at' UserDataT q\ a \\_. typ_at' UserDataT q\) + \ \valid_ipc_buffer_ptr' p\ a \\_. valid_ipc_buffer_ptr' p\" + unfolding valid_ipc_buffer_ptr'_def2 including no_pre + apply wp + apply assumption + done + +lemma gts_wp': + "\\s. \st. st_tcb_at' ((=) st) t s \ P st s\ getThreadState t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule gts_sp') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemma gbn_wp': + "\\s. \ntfn. bound_tcb_at' ((=) ntfn) t s \ P ntfn s\ getBoundNotification t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule gbn_sp') + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemmas threadSet_irq_handlers' = valid_irq_handlers_lift'' [OF threadSet_ctes_ofT] + +lemma get_cap_corres_all_rights_P: + "cte_ptr' = cte_map cte_ptr \ + corres (\x y. cap_relation x y \ P x) + (cte_wp_at P cte_ptr) (pspace_aligned' and pspace_distinct') + (get_cap cte_ptr) (getSlotCap cte_ptr')" + apply (simp add: getSlotCap_def mask_cap_def) + apply (subst bind_return [symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres_P [where P=P]]) + apply (insert cap_relation_masks, simp) + apply (wp getCTE_wp')+ + apply simp + apply fastforce + done + +lemma asUser_irq_handlers': + "\valid_irq_handlers'\ asUser t f \\rv. valid_irq_handlers'\" + apply (simp add: asUser_def split_def) + apply (wpsimp wp: threadSet_irq_handlers' [OF all_tcbI, OF ball_tcb_cte_casesI] select_f_inv) + done + +(* the brave can try to move this up to near setObject_update_TCB_corres' *) + +definition non_exst_same :: "Structures_H.tcb \ Structures_H.tcb \ bool" +where + "non_exst_same tcb tcb' \ \d p ts. tcb' = tcb\tcbDomain := d, tcbPriority := p, tcbTimeSlice := ts\" + +fun non_exst_same' :: "Structures_H.kernel_object \ Structures_H.kernel_object \ bool" +where + "non_exst_same' (KOTCB tcb) (KOTCB tcb') = non_exst_same tcb tcb'" | + "non_exst_same' _ _ = True" + +lemma non_exst_same_prio_upd[simp]: + "non_exst_same tcb (tcbPriority_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma non_exst_same_timeSlice_upd[simp]: + "non_exst_same tcb (tcbTimeSlice_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma non_exst_same_domain_upd[simp]: + "non_exst_same tcb (tcbDomain_update f tcb)" + by (cases tcb, simp add: non_exst_same_def) + +lemma set_eobject_corres': + assumes e: "etcb_relation etcb tcb'" + assumes z: "\s. obj_at' P ptr s + \ map_to_ctes ((ksPSpace s) (ptr \ KOTCB tcb')) = map_to_ctes (ksPSpace s)" + shows "corres dc (tcb_at ptr and is_etcb_at ptr) + (obj_at' (\ko. non_exst_same ko tcb') ptr + and obj_at' P ptr) + (set_eobject ptr etcb) (setObject ptr tcb')" + apply (rule corres_no_failI) + apply (rule no_fail_pre) + apply wp + apply (clarsimp simp: obj_at'_def) + apply (unfold set_eobject_def setObject_def) + apply (clarsimp simp: in_monad split_def bind_def gets_def get_def Bex_def + put_def return_def modify_def get_object_def + updateObject_default_def in_magnitude_check objBits_simps') + apply (clarsimp simp add: state_relation_def z) + apply (clarsimp simp add: obj_at_def is_etcb_at_def) + apply (simp only: pspace_relation_def dom_fun_upd2 simp_thms) + apply (elim conjE) + apply (frule bspec, erule domI) + apply (rule conjI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: is_other_obj_relation_type) + apply (drule(1) bspec) + apply (clarsimp simp: non_exst_same_def) + apply (case_tac bb; simp) + apply (clarsimp simp: obj_at'_def other_obj_relation_def cte_relation_def tcb_relation_def + split: if_split_asm)+ + apply (clarsimp simp: aobj_relation_cuts_def split: AARCH64_A.arch_kernel_obj.splits) + apply (rename_tac arch_kernel_obj obj d p ts) + apply (case_tac arch_kernel_obj; simp) + apply (clarsimp simp: pte_relation_def is_tcb_def + split: if_split_asm)+ + apply (simp only: ekheap_relation_def dom_fun_upd2 simp_thms) + apply (frule bspec, erule domI) + apply (rule ballI, drule(1) bspec) + apply (drule domD) + apply (clarsimp simp: obj_at'_def) + apply (insert e) + apply (clarsimp simp: other_obj_relation_def etcb_relation_def is_other_obj_relation_type + split: Structures_A.kernel_object.splits kernel_object.splits arch_kernel_obj.splits) + done + +lemma set_eobject_corres: + assumes tcbs: "non_exst_same tcb' tcbu'" + assumes e: "etcb_relation etcb tcb' \ etcb_relation etcbu tcbu'" + assumes tables': "\(getF, v) \ ran tcb_cte_cases. getF tcbu' = getF tcb'" + assumes r: "r () ()" + shows "corres r (tcb_at add and (\s. ekheap s add = Some etcb)) + (ko_at' tcb' add) + (set_eobject add etcbu) (setObject add tcbu')" + apply (rule_tac F="non_exst_same tcb' tcbu' \ etcb_relation etcbu tcbu'" in corres_req) + apply (clarsimp simp: state_relation_def obj_at_def obj_at'_def) + apply (frule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def ekheap_relation_def e tcbs) + apply (drule bspec, erule domI) + apply (clarsimp simp: e) + apply (erule conjE) + apply (rule corres_guard_imp) + apply (rule corres_rel_imp) + apply (rule set_eobject_corres'[where P="(=) tcb'"]) + apply simp + defer + apply (simp add: r) + apply (fastforce simp: is_etcb_at_def elim!: obj_at_weakenE) + apply (subst(asm) eq_commute) + apply (clarsimp simp: obj_at'_def) + apply (clarsimp simp: obj_at'_def objBits_simps) + apply (subst map_to_ctes_upd_tcb, assumption+) + apply (simp add: ps_clear_def3 field_simps objBits_defs mask_def) + apply (subst if_not_P) + apply (fastforce dest: bspec [OF tables', OF ranI]) + apply simp + done + +lemma ethread_set_corresT: + assumes x: "\tcb'. non_exst_same tcb' (f' tcb')" + assumes z: "\tcb. \(getF, setF) \ ran tcb_cte_cases. + getF (f' tcb) = getF tcb" + assumes e: "\etcb tcb'. etcb_relation etcb tcb' \ + etcb_relation (f etcb) (f' tcb')" + shows "corres dc (tcb_at t and valid_etcbs) + (tcb_at' t) + (ethread_set f t) (threadSet f' t)" + apply (simp add: ethread_set_def threadSet_def bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_get_etcb set_eobject_corres]) + apply (rule x) + apply (erule e) + apply (simp add: z)+ + apply wp+ + apply clarsimp + apply (simp add: valid_etcbs_def tcb_at_st_tcb_at[symmetric]) + apply (force simp: tcb_at_def get_etcb_def obj_at_def) + apply simp + done + +lemmas ethread_set_corres = + ethread_set_corresT [OF _ all_tcbI, OF _ ball_tcb_cte_casesI] + +lemma archTcbUpdate_aux2: "(\tcb. tcb\ tcbArch := f (tcbArch tcb)\) = tcbArch_update f" + by (rule ext, case_tac tcb, simp) + +end +end diff --git a/proof/refine/AARCH64/Tcb_R.thy b/proof/refine/AARCH64/Tcb_R.thy new file mode 100644 index 0000000000..0c2df3888a --- /dev/null +++ b/proof/refine/AARCH64/Tcb_R.thy @@ -0,0 +1,2715 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Tcb_R +imports CNodeInv_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma asUser_setNextPC_corres: + "corres dc (tcb_at t and invs) (tcb_at' t and invs') + (as_user t (setNextPC v)) (asUser t (setNextPC v))" + apply (rule asUser_corres) + apply (rule corres_Id, simp, simp) + apply (rule no_fail_setNextPC) + done + +lemma activateIdleThread_corres: + "corres dc (invs and st_tcb_at idle t) + (invs' and st_tcb_at' idle' t) + (arch_activate_idle_thread t) (activateIdleThread t)" + by (simp add: arch_activate_idle_thread_def activateIdleThread_def) + +lemma activateThread_corres: + "corres dc (invs and ct_in_state activatable) (invs' and ct_in_state' activatable') + activate_thread activateThread" + supply subst_all [simp del] + apply (simp add: activate_thread_def activateThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule_tac R="\ts s. valid_tcb_state ts s \ (idle ts \ runnable ts) + \ invs s \ st_tcb_at ((=) ts) thread s" + and R'="\ts s. valid_tcb_state' ts s \ (idle' ts \ runnable' ts) + \ invs' s \ st_tcb_at' (\ts'. ts' = ts) thread s" + in corres_split[OF getThreadState_corres]) + apply (rule_tac F="idle rv \ runnable rv" in corres_req, simp) + apply (rule_tac F="idle' rv' \ runnable' rv'" in corres_req, simp) + apply (case_tac rv, simp_all add: + isRunning_def isRestart_def, + safe, simp_all)[1] + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule corres_split_nor[OF asUser_setNextPC_corres]) + apply (rule setThreadState_corres) + apply (simp | wp weak_sch_act_wf_lift_linear)+ + apply (clarsimp simp: st_tcb_at_tcb_at invs_distinct) + apply fastforce + apply (rule corres_guard_imp) + apply (rule activateIdleThread_corres) + apply (clarsimp elim!: st_tcb_weakenE) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply (wp gts_st_tcb gts_st_tcb' gts_st_tcb_at)+ + apply (clarsimp simp: ct_in_state_def tcb_at_invs invs_distinct invs_psp_aligned + elim!: st_tcb_weakenE) + apply (clarsimp simp: tcb_at_invs' ct_in_state'_def + elim!: pred_tcb'_weakenE) + done + + +lemma bindNotification_corres: + "corres dc + (invs and tcb_at t and ntfn_at a) (invs' and tcb_at' t and ntfn_at' a) + (bind_notification t a) (bindNotification t a)" + apply (simp add: bind_notification_def bindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getNotification_corres]) + apply (rule corres_split[OF setNotification_corres]) + apply (clarsimp simp: ntfn_relation_def split: Structures_A.ntfn.splits) + apply (rule setBoundNotification_corres) + apply (wp)+ + apply auto + done + + +abbreviation + "ct_idle' \ ct_in_state' idle'" + +lemma gts_st_tcb': + "\tcb_at' t\ getThreadState t \\rv. st_tcb_at' (\st. st = rv) t\" + apply (rule hoare_vcg_precond_imp) + apply (rule hoare_post_imp[where Q="\rv s. \rv'. rv = rv' \ st_tcb_at' (\st. st = rv') t s"]) + apply simp + apply (wp hoare_vcg_ex_lift) + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def) + done + +lemma activateIdle_invs: + "\invs' and ct_idle'\ + activateIdleThread thread + \\rv. invs' and ct_idle'\" + by (simp add: activateIdleThread_def) + +lemma activate_invs': + "\invs' and sch_act_simple and ct_in_state' activatable'\ + activateThread + \\rv. invs' and (ct_running' or ct_idle')\" + apply (simp add: activateThread_def) + apply (rule hoare_seq_ext) + apply (rule_tac B="\state s. invs' s \ sch_act_simple s + \ st_tcb_at' (\st. st = state) thread s + \ thread = ksCurThread s + \ (runnable' state \ idle' state)" in hoare_seq_ext) + apply (case_tac x; simp add: isTS_defs split del: if_split cong: if_cong) + apply (wp) + apply (clarsimp simp: ct_in_state'_def) + apply (rule_tac Q="\rv. invs' and ct_idle'" in hoare_post_imp, simp) + apply (wp activateIdle_invs) + apply (clarsimp simp: ct_in_state'_def) + apply (rule_tac Q="\rv. invs' and ct_running' and sch_act_simple" + in hoare_post_imp, simp) + apply (rule hoare_weaken_pre) + apply (wp ct_in_state'_set asUser_ct sts_invs_minor' + | wp (once) sch_act_simple_lift)+ + apply (rule_tac Q="\_. st_tcb_at' runnable' thread + and sch_act_simple and invs' + and (\s. thread = ksCurThread s)" + in hoare_post_imp, clarsimp) + apply (wp sch_act_simple_lift)+ + apply (clarsimp simp: valid_idle'_def invs'_def valid_state'_def + pred_tcb_at'_def obj_at'_def idle_tcb'_def + elim!: pred_tcb'_weakenE) + apply (wp gts_st_tcb')+ + apply (clarsimp simp: tcb_at_invs' ct_in_state'_def + pred_disj_def) + done + +declare not_psubset_eq[dest!] + +lemma setThreadState_runnable_simp: + "runnable' ts \ setThreadState ts t = + threadSet (tcbState_update (\x. ts)) t" + apply (simp add: setThreadState_def isRunnable_def isStopped_def liftM_def) + apply (subst bind_return[symmetric], rule bind_cong[OF refl]) + apply (drule use_valid[OF _ threadSet_pred_tcb_at_state[where proj="itcbState" and p=t and P="(=) ts"]]) + apply simp + apply (subst bind_known_operation_eq) + apply wp+ + apply clarsimp + apply (subst eq_commute, erule conjI[OF _ refl]) + apply (rule empty_fail_getThreadState) + apply (simp add: getCurThread_def getSchedulerAction_def exec_gets) + apply (auto simp: when_def split: Structures_H.thread_state.split) + done + +lemma activate_sch_act: + "\ct_in_state' activatable' and (\s. P (ksSchedulerAction s))\ + activateThread \\rv s. P (ksSchedulerAction s)\" + apply (simp add: activateThread_def getCurThread_def + cong: if_cong Structures_H.thread_state.case_cong) + apply (rule hoare_seq_ext [OF _ gets_sp]) + apply (rule hoare_seq_ext[where B="\st s. (runnable' or idle') st + \ P (ksSchedulerAction s)"]) + apply (rule hoare_pre) + apply (wp | wpc | simp add: setThreadState_runnable_simp)+ + apply (clarsimp simp: ct_in_state'_def cur_tcb'_def pred_tcb_at' + elim!: pred_tcb'_weakenE) + done + +lemma runnable_tsr: + "thread_state_relation ts ts' \ runnable' ts' = runnable ts" + by (case_tac ts, auto) + +lemma idle_tsr: + "thread_state_relation ts ts' \ idle' ts' = idle ts" + by (case_tac ts, auto) + +crunches cancelIPC, setupReplyMaster + for cur [wp]: cur_tcb' + (wp: crunch_wps simp: crunch_simps o_def) + +lemma setCTE_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setCTE c cte + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: weak_sch_act_wf_def) + apply (wp hoare_vcg_all_lift hoare_convert_imp setCTE_pred_tcb_at' setCTE_tcb_in_cur_domain') + done + +lemma setupReplyMaster_weak_sch_act_wf[wp]: + "\\s. weak_sch_act_wf (ksSchedulerAction s) s\ + setupReplyMaster thread + \\rv s. weak_sch_act_wf (ksSchedulerAction s) s\" + apply (simp add: setupReplyMaster_def) + apply (wp) + apply (rule_tac Q="\_ s. weak_sch_act_wf (ksSchedulerAction s) s" + in hoare_post_imp, clarsimp) + apply (wp)+ + apply assumption + done + +crunches setupReplyMaster + for valid_queues[wp]: "Invariants_H.valid_queues" + and valid_queues'[wp]: "valid_queues'" + (wp: crunch_wps simp: crunch_simps) + +crunches setup_reply_master, Tcb_A.restart, arch_post_modify_registers + for pspace_aligned[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (wp: crunch_wps simp: crunch_simps) + +lemma restart_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t) + (Tcb_A.restart t) (ThreadDecls_H.restart t)" + apply (simp add: Tcb_A.restart_def Thread_H.restart_def) + apply (simp add: isStopped_def2 liftM_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (clarsimp simp add: runnable_tsr idle_tsr when_def) + apply (rule corres_split_nor[OF cancel_ipc_corres]) + apply (rule corres_split_nor[OF setupReplyMaster_corres]) + apply (rule corres_split_nor[OF setThreadState_corres], simp) + apply (rule corres_split[OF tcbSchedEnqueue_corres possibleSwitchTo_corres]) + apply (wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + sts_valid_queues sts_st_tcb' + | clarsimp simp: valid_tcb_state'_def)+ + apply (rule_tac Q="\rv. valid_sched and cur_tcb and pspace_aligned and pspace_distinct" + in hoare_strengthen_post) + apply wp + apply (simp add: valid_sched_def valid_sched_action_def) + apply (rule_tac Q="\rv. invs' and tcb_at' t" in hoare_strengthen_post) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak valid_pspace'_def) + apply wp+ + apply (simp add: valid_sched_def invs_def tcb_at_is_etcb_at invs_psp_aligned invs_distinct) + apply (clarsimp simp add: invs'_def valid_state'_def sch_act_wf_weak) + done + +lemma restart_invs': + "\invs' and ex_nonz_cap_to' t and (\s. t \ ksIdleThread s)\ + ThreadDecls_H.restart t \\rv. invs'\" + apply (simp add: restart_def isStopped_def2) + apply (wp setThreadState_nonqueued_state_update + cancelIPC_simple setThreadState_st_tcb + | wp (once) sch_act_simple_lift)+ + apply (wp hoare_convert_imp) + apply (wp setThreadState_nonqueued_state_update + setThreadState_st_tcb) + apply (clarsimp) + apply (wp hoare_convert_imp)[1] + apply (clarsimp) + apply (wp)+ + apply (clarsimp simp: comp_def) + apply (rule hoare_strengthen_post, rule gts_sp') + prefer 2 + apply assumption + apply (clarsimp simp: pred_tcb_at' invs'_def valid_state'_def + ct_in_state'_def) + apply (fastforce simp: pred_tcb_at'_def obj_at'_def) + done + +lemma restart_tcb'[wp]: + "\tcb_at' t'\ ThreadDecls_H.restart t \\rv. tcb_at' t'\" + apply (simp add: restart_def isStopped_def2) + apply wpsimp + done + +lemma no_fail_setRegister: "no_fail \ (setRegister r v)" + by (simp add: setRegister_def) + +lemma suspend_cap_to'[wp]: + "\ex_nonz_cap_to' p\ suspend t \\rv. ex_nonz_cap_to' p\" + apply (simp add: suspend_def) + unfolding updateRestartPC_def + apply (wp threadSet_cap_to' | simp)+ + done + +declare det_getRegister[simp] +declare det_setRegister[simp] + +lemma + no_fail_getRegister[wp]: "no_fail \ (getRegister r)" + by (simp add: getRegister_def) + +lemma invokeTCB_ReadRegisters_corres: + "corres (dc \ (=)) + (einvs and tcb_at src and ex_nonz_cap_to src) + (invs' and sch_act_simple and tcb_at' src and ex_nonz_cap_to' src) + (invoke_tcb (tcb_invocation.ReadRegisters src susp n arch)) + (invokeTCB (tcbinvocation.ReadRegisters src susp n arch'))" + apply (simp add: invokeTCB_def performTransfer_def genericTake_def + frame_registers_def gp_registers_def + frameRegisters_def gpRegisters_def) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule suspend_corres) + apply (rule corres_split[OF getCurThread_corres]) + apply (simp add: liftM_def[symmetric]) + apply (rule asUser_corres) + apply (rule corres_Id) + apply simp + apply simp + apply (rule no_fail_mapM) + apply (simp add: no_fail_getRegister) + apply wp+ + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def + dest!: idle_no_ex_cap) + apply (clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + done + +lemma asUser_postModifyRegisters_corres: + "corres dc (tcb_at t) (tcb_at' t and tcb_at' ct) + (arch_post_modify_registers ct t) + (asUser t $ postModifyRegisters ct t)" + apply (rule corres_guard_imp) + apply (clarsimp simp: arch_post_modify_registers_def postModifyRegisters_def when_def) + apply safe + apply (subst submonad_asUser.return) + apply (rule corres_stateAssert_assume) + by simp+ + +lemma invokeTCB_WriteRegisters_corres: + "corres (dc \ (=)) (einvs and tcb_at dest and ex_nonz_cap_to dest) + (invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest) + (invoke_tcb (tcb_invocation.WriteRegisters dest resume values arch)) + (invokeTCB (tcbinvocation.WriteRegisters dest resume values arch'))" + apply (simp add: invokeTCB_def performTransfer_def arch_get_sanitise_register_info_def + sanitiseRegister_def sanitise_register_def getSanitiseRegisterInfo_def + frameRegisters_def gpRegisters_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (fold archThreadGet_def[simplified]) + apply (rule corres_split[OF archThreadGet_VCPU_corres]) + apply (rule corres_split_nor) + apply (rule asUser_corres) + apply (simp add: zipWithM_mapM getRestartPC_def setNextPC_def) + apply (rule corres_Id) + apply (clarsimp simp: mask_def user_vtop_def + cong: if_cong register.case_cong) + apply simp + apply (rule no_fail_pre, wp no_fail_mapM) + apply (clarsimp, (wp no_fail_setRegister | simp)+) + apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) + apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]]) + apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply simp + apply (wp+)[2] + apply ((wp hoare_weak_lift_imp restart_invs' + | strengthen valid_sched_weak_strg einvs_valid_etcbs invs_valid_queues' invs_queues + invs_weak_sch_act_wf + | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def + dest!: global'_no_ex_cap idle_no_ex_cap)+)[2] + apply (rule_tac Q="\_. einvs and tcb_at dest and ex_nonz_cap_to dest" in hoare_post_imp) + apply (fastforce simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def + dest!: idle_no_ex_cap) + prefer 2 + apply (rule_tac Q="\_. invs' and tcb_at' dest and ex_nonz_cap_to' dest" in hoare_post_imp) + apply (fastforce simp: sch_act_wf_weak invs'_def valid_state'_def dest!: global'_no_ex_cap) + apply (wpsimp simp: archThreadGet_def)+ + apply fastforce + apply fastforce + done + +lemma tcbSchedDequeue_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + tcbSchedDequeue t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + by (wp hoare_convert_imp) + +lemma updateRestartPC_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + updateRestartPC t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + unfolding updateRestartPC_def + apply (wp hoare_convert_imp) + done + +lemma suspend_ResumeCurrentThread_imp_notct[wp]: + "\\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\ + suspend t + \\rv s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t'\" + by (wpsimp simp: suspend_def) + +lemma invokeTCB_CopyRegisters_corres: + "corres (dc \ (=)) + (einvs and simple_sched_action and tcb_at dest and tcb_at src and ex_nonz_cap_to src and + ex_nonz_cap_to dest) + (invs' and sch_act_simple and tcb_at' dest and tcb_at' src + and ex_nonz_cap_to' src and ex_nonz_cap_to' dest) + (invoke_tcb (tcb_invocation.CopyRegisters dest src susp resume frames ints arch)) + (invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch'))" +proof - + have Q: "\src src' des des' r r'. \ src = src'; des = des' \ \ + corres dc (tcb_at src and tcb_at des and invs) + (tcb_at' src' and tcb_at' des' and invs') + (do v \ as_user src (getRegister r); + as_user des (setRegister r' v) + od) + (do v \ asUser src' (getRegister r); + asUser des' (setRegister r' v) + od)" + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split_eqr) + apply (rule asUser_getRegister_corres) + apply (simp add: setRegister_def) + apply (rule asUser_corres) + apply (rule corres_modify') + apply simp + apply simp + apply (simp add: invs_distinct invs_psp_aligned| wp)+ + done + have R: "\src src' des des' xs ys. \ src = src'; des = des'; xs = ys \ \ + corres dc (tcb_at src and tcb_at des and invs) + (tcb_at' src' and tcb_at' des' and invs') + (mapM_x (\r. do v \ as_user src (getRegister r); + as_user des (setRegister r v) + od) xs) + (mapM_x (\r'. do v \ asUser src' (getRegister r'); + asUser des' (setRegister r' v) + od) ys)" + apply (rule corres_mapM_x [where S=Id]) + apply simp + apply (rule Q) + apply (clarsimp simp: set_zip_same | wp)+ + done + have U: "\t. corres dc (tcb_at t and invs) (tcb_at' t and invs') + (do pc \ as_user t getRestartPC; as_user t (setNextPC pc) od) + (do pc \ asUser t getRestartPC; asUser t (setNextPC pc) od)" + apply (rule corres_guard_imp) + apply (rule corres_split_eqr[OF asUser_getRestartPC_corres]) + apply (rule asUser_setNextPC_corres) + apply wp+ + apply (simp add: invs_distinct invs_psp_aligned)+ + done + show ?thesis + apply (simp add: invokeTCB_def performTransfer_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_when [OF refl suspend_corres]], simp) + apply (rule corres_split[OF corres_when [OF refl restart_corres]], simp) + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule corres_split_nor) + apply (rule R[OF refl refl]) + apply (simp add: frame_registers_def frameRegisters_def) + apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) + apply (rule Q[OF refl refl]) + apply (wp mapM_x_wp' | simp)+ + apply (rule corres_split_nor) + apply (rule corres_when[OF refl]) + apply (rule R[OF refl refl]) + apply (simp add: gpRegisters_def) + apply (rule corres_split_eqr[OF getCurThread_corres]) + apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]]) + apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) + apply (rule_tac P=\ and P'=\ in corres_inst) + apply simp + apply (solves \wp hoare_weak_lift_imp\)+ + apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) + apply (clarsimp simp: invs_def valid_state_def valid_pspace_def valid_sched_weak_strg valid_sched_def) + prefer 2 + apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) + apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp+)+)[4] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ + apply (fastforce simp: invs_def valid_state_def valid_pspace_def + dest!: idle_no_ex_cap) + by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) +qed + +lemma readreg_invs': + "\invs' and sch_act_simple and tcb_at' src and ex_nonz_cap_to' src\ + invokeTCB (tcbinvocation.ReadRegisters src susp n arch) + \\rv. invs'\" + by (simp add: invokeTCB_def performTransfer_def | wp + | clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap)+ + +crunches getSanitiseRegisterInfo + for invs'[wp]: invs' + and ex_nonz_cap_to'[wp]: "ex_nonz_cap_to' d" + and it'[wp]: "\s. P (ksIdleThread s)" + +lemma writereg_invs': + "\invs' and sch_act_simple and tcb_at' dest and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) + \\rv. invs'\" + by (simp add: invokeTCB_def performTransfer_def | wp restart_invs' | rule conjI + | clarsimp + | clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap)+ + +lemma copyreg_invs'': + "\invs' and sch_act_simple and tcb_at' src and tcb_at' dest and ex_nonz_cap_to' src and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \\rv. invs' and tcb_at' dest\" + apply (simp add: invokeTCB_def performTransfer_def if_apply_def2) + apply (wpsimp wp: mapM_x_wp' restart_invs' hoare_drop_imps + split_del: if_split + simp: if_apply_def2 invs_cur' cur_tcb'_def[symmetric] + cong: rev_conj_cong) + by (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + +lemma copyreg_invs': + "\invs' and sch_act_simple and tcb_at' src and + tcb_at' dest and ex_nonz_cap_to' src and ex_nonz_cap_to' dest\ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \\rv. invs'\" + by (rule hoare_strengthen_post, rule copyreg_invs'', simp) + +lemma threadSet_valid_queues_no_state: + "\Invariants_H.valid_queues and (\s. \p. t \ set (ksReadyQueues s p))\ + threadSet f t \\_. Invariants_H.valid_queues\" + apply (simp add: threadSet_def) + apply wp + apply (simp add: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) + apply (wp setObject_queues_unchanged_tcb + hoare_Ball_helper + hoare_vcg_all_lift + setObject_tcb_strongest)[1] + apply (wp getObject_tcb_wp) + apply (clarsimp simp: valid_queues_def valid_queues_no_bitmap_def' pred_tcb_at'_def) + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_valid_queues'_no_state: + "(\tcb. tcbQueued tcb = tcbQueued (f tcb)) + \ \valid_queues' and (\s. \p. t \ set (ksReadyQueues s p))\ + threadSet f t \\_. valid_queues'\" + apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def + split del: if_split) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ + apply (wp getObject_tcb_wp updateObject_default_inv + | simp split del: if_split)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def + objBits_simps addToQs_def + split del: if_split cong: if_cong) + apply (fastforce simp: inQ_def split: if_split_asm) + done + +lemma isRunnable_corres: + "corres (\ts runn. runnable ts = runn) + (tcb_at t and pspace_aligned and pspace_distinct) \ + (get_thread_state t) (isRunnable t)" + apply (simp add: isRunnable_def) + apply (subst bind_return[symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (case_tac rv, clarsimp+) + apply (wp hoare_TrueI)+ + apply auto + done + +lemma tcbSchedDequeue_not_queued: + "\\\ tcbSchedDequeue t + \\rv. obj_at' (Not \ tcbQueued) t\" + apply (simp add: tcbSchedDequeue_def) + apply (wp | simp)+ + apply (rule_tac Q="\rv. obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_post_imp) + apply (clarsimp simp: obj_at'_def) + apply (wp tg_sp' [where P=\, simplified] | simp)+ + done + +lemma tcbSchedDequeue_not_in_queue: + "\p. \Invariants_H.valid_queues and tcb_at' t and valid_objs'\ tcbSchedDequeue t + \\rv s. t \ set (ksReadyQueues s p)\" + apply (rule_tac Q="\rv. Invariants_H.valid_queues and obj_at' (Not \ tcbQueued) t" + in hoare_post_imp) + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def ) + apply (wp tcbSchedDequeue_not_queued tcbSchedDequeue_valid_queues | + simp add: valid_objs'_maxDomain valid_objs'_maxPriority)+ + done + +lemma threadSet_ct_in_state': + "(\tcb. tcbState (f tcb) = tcbState tcb) \ + \ct_in_state' test\ threadSet f t \\rv. ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]) + apply (wp threadSet_pred_tcb_no_state)+ + apply simp+ + apply wp + done + +lemma valid_tcb'_tcbPriority_update: + "\valid_tcb' tcb s; f (tcbPriority tcb) \ maxPriority \ \ + valid_tcb' (tcbPriority_update f tcb) s" + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma threadSet_valid_objs_tcbPriority_update: + "\valid_objs' and (\_. x \ maxPriority)\ + threadSet (tcbPriority_update (\_. x)) t + \\_. valid_objs'\" + including no_pre + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (subgoal_tac "tcb_at' t s") + apply simp + apply (rule valid_tcb'_tcbPriority_update) + apply (fastforce simp: obj_at'_def)+ + done + +lemma tcbSchedDequeue_ct_in_state'[wp]: + "\ct_in_state' test\ tcbSchedDequeue t \\rv. ct_in_state' test\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_lift_Pf [where f=ksCurThread]; wp) + done + +crunch cur[wp]: tcbSchedDequeue cur_tcb' + +lemma sp_corres2: + "corres dc (valid_etcbs and weak_valid_sched_action and cur_tcb and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues' and tcb_at' t and + (\s. weak_sch_act_wf (ksSchedulerAction s) s) and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" + apply (simp add: setPriority_def set_priority_def thread_set_priority_def) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[OF tcbSchedDequeue_corres]) + apply (rule corres_split[OF ethread_set_corres], simp_all)[1] + apply (simp add: etcb_relation_def) + apply (rule corres_split[OF isRunnable_corres]) + apply (erule corres_when) + apply(rule corres_split[OF getCurThread_corres]) + apply (wp corres_if; clarsimp) + apply (rule rescheduleRequired_corres) + apply (rule possibleSwitchTo_corres) + apply ((clarsimp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp + isRunnable_wp)+)[4] + apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) + apply clarsimp + apply ((wp hoare_drop_imps hoare_vcg_if_lift hoare_vcg_all_lift + isRunnable_wp threadSet_pred_tcb_no_state threadSet_valid_queues_no_state + threadSet_valid_queues'_no_state threadSet_cur threadSet_valid_objs_tcbPriority_update + threadSet_weak_sch_act_wf threadSet_ct_in_state'[simplified ct_in_state'_def] + | simp add: etcb_relation_def)+)[1] + apply ((wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift hoare_vcg_disj_lift + tcbSchedDequeue_not_in_queue tcbSchedDequeue_valid_queues + tcbSchedDequeue_ct_in_state'[simplified ct_in_state'_def] + | simp add: etcb_relation_def)+)[2] + apply (force simp: valid_etcbs_def tcb_at_st_tcb_at[symmetric] state_relation_def + dest: pspace_relation_tcb_at intro: st_tcb_at_opeqI) + apply (force simp: state_relation_def elim: valid_objs'_maxDomain valid_objs'_maxPriority) + done + +lemma setPriority_corres: + "corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and (\_. x \ maxPriority)) + (set_priority t x) (setPriority t x)" + apply (rule corres_guard_imp) + apply (rule sp_corres2) + apply (simp add: valid_sched_def valid_sched_action_def invs_psp_aligned invs_distinct invs_def) + apply (clarsimp simp: invs'_def valid_state'_def sch_act_wf_weak) + done + +lemma setMCPriority_corres: + "corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_mcpriority t x) (setMCPriority t x)" + apply (rule corres_guard_imp) + apply (clarsimp simp: setMCPriority_def set_mcpriority_def) + apply (rule threadset_corresT) + by (clarsimp simp: tcb_relation_def tcb_cap_cases_tcb_mcpriority + tcb_cte_cases_def cteSizeBits_def exst_same_def)+ + +definition + "out_rel fn fn' v v' \ + ((v = None) = (v' = None)) \ + (\tcb tcb'. tcb_relation tcb tcb' \ + tcb_relation (case_option id fn v tcb) + (case_option id fn' v' tcb'))" + +lemma out_corresT: + assumes x: "\tcb v. \(getF, setF)\ran tcb_cap_cases. getF (fn v tcb) = getF tcb" + assumes y: "\v. \tcb. \(getF, setF)\ran tcb_cte_cases. getF (fn' v tcb) = getF tcb" + assumes e: "\tcb v. exst_same tcb (fn' v tcb)" + shows + "out_rel fn fn' v v' \ + corres dc (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t fn v) + (case_option (return ()) (\x. threadSet (fn' x) t) v')" + apply (case_tac v, simp_all add: out_rel_def + option_update_thread_def) + apply clarsimp + apply (clarsimp simp add: threadset_corresT [OF _ x y e]) + done + +lemmas out_corres = out_corresT [OF _ all_tcbI, OF ball_tcb_cap_casesI ball_tcb_cte_casesI] + +lemma tcbSchedDequeue_sch_act_simple[wp]: + "tcbSchedDequeue t \sch_act_simple\" + by (wpsimp simp: sch_act_simple_def) + +lemma setP_invs': + "\invs' and tcb_at' t and K (p \ maxPriority)\ setPriority t p \\rv. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: setPriority_def) + apply (wp rescheduleRequired_all_invs_but_ct_not_inQ) + apply simp + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift') + unfolding st_tcb_at'_def + apply (strengthen not_obj_at'_strengthen, wp) + apply (wp hoare_vcg_imp_lift') + apply (rule_tac Q="\rv s. invs' s" in hoare_post_imp) + apply (clarsimp simp: invs_sch_act_wf' invs'_def invs_queues) + apply (clarsimp simp: valid_state'_def) + apply (wp hoare_drop_imps threadSet_invs_trivial, + simp_all add: inQ_def cong: conj_cong)[1] + apply (wp hoare_drop_imps threadSet_invs_trivial, + simp_all add: inQ_def cong: conj_cong)[1] + apply (wp hoare_drop_imps threadSet_invs_trivial, + simp_all add: inQ_def cong: conj_cong)[1] + apply (rule_tac Q="\_. invs' and obj_at' (Not \ tcbQueued) t + and (\s. \d p. t \ set (ksReadyQueues s (d,p)))" + in hoare_post_imp) + apply (clarsimp simp: obj_at'_def inQ_def) + apply (wp tcbSchedDequeue_not_queued)+ + apply clarsimp + done + +crunches setPriority, setMCPriority + for typ_at'[wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps) + +lemmas setPriority_typ_ats [wp] = typ_at_lifts [OF setPriority_typ_at'] + +crunches setPriority, setMCPriority + for valid_cap[wp]: "valid_cap' c" + (wp: getObject_inv_tcb) + + +definition + newroot_rel :: "(cap \ cslot_ptr) option \ (capability \ machine_word) option \ bool" +where + "newroot_rel \ opt_rel (\(cap, ptr) (cap', ptr'). + cap_relation cap cap' + \ ptr' = cte_map ptr)" + +function recursive :: "nat \ ((nat \ nat), unit) nondet_monad" +where + "recursive (Suc n) s = (do f \ gets fst; s \ gets snd; put ((f + s), n); recursive n od) s" +| "recursive 0 s = (modify (\(a, b). (a, 0))) s" + by (case_tac "fst x", fastforce+) + +termination recursive + apply (rule recursive.termination) + apply (rule wf_measure [where f=fst]) + apply simp + done + +lemma cte_map_tcb_0: + "cte_map (t, tcb_cnode_index 0) = t" + by (simp add: cte_map_def tcb_cnode_index_def) + +lemma cte_map_tcb_1: + "cte_map (t, tcb_cnode_index 1) = t + 2^cteSizeBits" + by (simp add: cte_map_def tcb_cnode_index_def to_bl_1 objBits_defs cte_level_bits_def) + +lemma sameRegion_corres2: + "\ cap_relation c c'; cap_relation d d' \ + \ same_region_as c d = sameRegionAs c' d'" + by (erule(1) same_region_as_relation) + +lemma sameObject_corres2: + "\ cap_relation c c'; cap_relation d d' \ + \ same_object_as c d = sameObjectAs c' d'" + apply (frule(1) sameRegion_corres2[symmetric, where c=c and d=d]) + apply (case_tac c; simp add: sameObjectAs_def same_object_as_def + isCap_simps is_cap_simps bits_of_def) + apply (case_tac d; simp) + apply (case_tac d'; simp) + apply (rename_tac arch_cap) + apply clarsimp + apply (case_tac d, (simp_all split: arch_cap.split)[11]) + apply (rename_tac arch_capa) + apply (clarsimp simp add: AARCH64_H.sameObjectAs_def Let_def) + apply (intro conjI impI) + apply (case_tac arch_cap; simp add: sameRegionAs_def isCap_simps) + apply (case_tac arch_capa; fastforce simp add: add_mask_fold) + apply (case_tac arch_cap; simp add: sameRegionAs_def isCap_simps) + apply (case_tac arch_capa; simp) + done + +lemma checkCapAt_corres: + assumes r: "cap_relation cap cap'" + assumes c: "corres dc Q Q' f f'" + assumes Q: "\s. P s \ cte_wp_at (same_object_as cap) slot s \ Q s" + assumes Q': "\s. P' s \ cte_wp_at' (sameObjectAs cap' o cteCap) (cte_map slot) s \ Q' s" + shows "corres dc (P and cte_at slot and invs) (P' and pspace_aligned' and pspace_distinct') + (check_cap_at cap slot f) + (checkCapAt cap' (cte_map slot) f')" using r c + apply (simp add: check_cap_at_def checkCapAt_def liftM_def when_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (rule corres_if [unfolded if_apply_def2]) + apply (erule(1) sameObject_corres2) + apply assumption + apply (rule corres_trivial, simp) + apply (wp get_cap_wp getCTE_wp')+ + apply (fastforce elim: cte_wp_at_weakenE intro: Q) + apply (fastforce elim: cte_wp_at_weakenE' intro: Q') + done + +lemma checkCapAt_weak_corres: + assumes r: "cap_relation cap cap'" + assumes c: "corres dc P P' f f'" + shows "corres dc (P and cte_at slot and invs) (P' and pspace_aligned' and pspace_distinct') + (check_cap_at cap slot f) + (checkCapAt cap' (cte_map slot) f')" + apply (rule checkCapAt_corres, rule r, rule c) + apply auto + done + +defs + assertDerived_def: + "assertDerived src cap f \ + do stateAssert (\s. cte_wp_at' (is_derived' (ctes_of s) src cap o cteCap) src s) []; f od" + +lemma checkCapAt_cteInsert_corres: + "cap_relation new_cap newCap \ + corres dc (einvs and cte_wp_at (\c. c = cap.NullCap) (target, ref) + and cte_at slot and K (is_cnode_or_valid_arch new_cap + \ (is_pt_cap new_cap \ cap_asid new_cap \ None)) + and cte_wp_at (\c. obj_refs c = obj_refs new_cap + \ table_cap_ref c = table_cap_ref new_cap \ + vspace_asid c = vspace_asid new_cap) src_slot) + (invs' and cte_wp_at' (\cte. cteCap cte = NullCap) (cte_map (target, ref)) + and valid_cap' newCap) + (check_cap_at new_cap src_slot + (check_cap_at (cap.ThreadCap target) slot + (cap_insert new_cap src_slot (target, ref)))) + (checkCapAt newCap (cte_map src_slot) + (checkCapAt (ThreadCap target) (cte_map slot) + (assertDerived (cte_map src_slot) newCap (cteInsert newCap (cte_map src_slot) (cte_map (target, ref))))))" + apply (rule corres_guard_imp) + apply (rule_tac P="cte_wp_at (\c. c = cap.NullCap) (target, ref) and + cte_at slot and + cte_wp_at (\c. obj_refs c = obj_refs new_cap + \ table_cap_ref c = table_cap_ref new_cap \ vspace_asid c = vspace_asid new_cap) src_slot + and einvs and K (is_cnode_or_valid_arch new_cap + \ (is_pt_cap new_cap \ cap_asid new_cap \ None))" + and + P'="cte_wp_at' (\c. cteCap c = NullCap) (cte_map (target, ref)) + and invs' and valid_cap' newCap" + in checkCapAt_corres, assumption) + apply (rule checkCapAt_weak_corres, simp) + apply (unfold assertDerived_def)[1] + apply (rule corres_stateAssert_implied [where P'=\]) + apply simp + apply (erule cteInsert_corres [OF _ refl refl]) + apply clarsimp + apply (drule cte_wp_at_norm [where p=src_slot]) + apply (case_tac src_slot) + apply (clarsimp simp: state_relation_def) + apply (drule (1) pspace_relation_cte_wp_at) + apply fastforce + apply fastforce + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule (2) is_derived_eq [THEN iffD1]) + apply (erule cte_wp_at_weakenE, rule TrueI) + apply assumption + apply clarsimp + apply (rule conjI, fastforce)+ + apply (cases src_slot) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (rule conjI) + apply (frule same_object_as_cap_master) + apply (clarsimp simp: cap_master_cap_simps is_cnode_or_valid_arch_def + is_cap_simps is_valid_vtable_root_def + dest!: cap_master_cap_eqDs) + apply (erule(1) checked_insert_is_derived) + apply simp + apply simp + apply fastforce + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply clarsimp + apply fastforce + done + +lemma capBadgeNone_masters: + "capMasterCap cap = capMasterCap cap' + \ (capBadge cap = None) = (capBadge cap' = None)" + apply (rule master_eqI) + apply (auto simp add: capBadge_def capMasterCap_def isCap_simps + split: capability.split) + done + +definition + "vspace_asid' cap \ case cap of + ArchObjectCap (PageTableCap _ _ (Some (asid, _))) \ Some asid + | _ \ None" + +lemma untyped_derived_eq_from_sameObjectAs: + "sameObjectAs cap cap2 + \ untyped_derived_eq cap cap2" + by (clarsimp simp: untyped_derived_eq_def sameObjectAs_def2 isCap_Master) + +lemmas vspace_asid'_simps [simp] = + vspace_asid'_def [split_simps capability.split arch_capability.split option.split prod.split] + +lemma checked_insert_tcb_invs'[wp]: + "\invs' and cte_wp_at' (\cte. cteCap cte = NullCap) slot + and valid_cap' new_cap + and K (capBadge new_cap = None) + and K (slot \ cte_refs' (ThreadCap target) 0) + and K (\ isReplyCap new_cap \ \ isIRQControlCap new_cap)\ + checkCapAt new_cap src_slot + (checkCapAt (ThreadCap target) slot' + (assertDerived src_slot new_cap (cteInsert new_cap src_slot slot))) \\rv. invs'\" + supply option.case_cong[cong] + apply (simp add: checkCapAt_def liftM_def assertDerived_def stateAssert_def) + apply (wp getCTE_cteCap_wp cteInsert_invs) + apply (clarsimp split: option.splits) + apply (subst(asm) tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.splits) + apply (rule conjI) + apply (clarsimp simp: sameObjectAs_def3) + apply (clarsimp simp: tree_cte_cteCap_eq[unfolded o_def] + is_derived'_def untyped_derived_eq_from_sameObjectAs + ex_cte_cap_to'_cteCap) + apply (erule sameObjectAsE)+ + apply (clarsimp simp: badge_derived'_def) + apply (frule capBadgeNone_masters, simp) + apply (rule conjI) + apply (rule_tac x=slot' in exI) + subgoal by (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps cteCaps_of_def) + apply (erule(1) valid_irq_handlers_ctes_ofD) + apply (clarsimp simp: invs'_def valid_state'_def) + done + +lemma checkCap_inv: + assumes x: "\P\ f \\rv. P\" + shows "\P\ checkCapAt cap slot f \\rv. P\" + unfolding checkCapAt_def + by (wp x | simp)+ + +lemma isValidVTableRootD: + "isValidVTableRoot cap + \ isArchObjectCap cap \ isPageTableCap (capCap cap) + \ capPTMappedAddress (capCap cap) \ None" + by (simp add: isValidVTableRoot_def isVTableRoot_def isCap_simps + split: capability.split_asm arch_capability.split_asm + option.split_asm) + +lemma assertDerived_wp: + "\P and (\s. cte_wp_at' (is_derived' (ctes_of s) slot cap o cteCap) slot s)\ f \Q\ \ + \P\ assertDerived slot cap f \Q\" + unfolding assertDerived_def by wpsimp + +lemma setMCPriority_invs': + "\invs' and tcb_at' t and K (prio \ maxPriority)\ setMCPriority t prio \\rv. invs'\" + unfolding setMCPriority_def + apply (rule hoare_gen_asm) + apply (rule hoare_pre) + by (wp threadSet_invs_trivial, (clarsimp simp: inQ_def)+) + +lemma valid_tcb'_tcbMCP_update: + "\valid_tcb' tcb s \ f (tcbMCP tcb) \ maxPriority\ \ valid_tcb' (tcbMCP_update f tcb) s" + apply (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + done + +lemma setMCPriority_valid_objs'[wp]: + "\valid_objs' and K (prio \ maxPriority)\ setMCPriority t prio \\rv. valid_objs'\" + unfolding setMCPriority_def + including no_pre + apply (simp add: threadSet_def) + apply wp + prefer 2 + apply (rule getObject_tcb_sp) + apply (rule hoare_weaken_pre) + apply (rule setObject_tcb_valid_objs) + apply (clarsimp simp: valid_obj'_def) + apply (frule (1) ko_at_valid_objs') + apply simp + apply (simp add: valid_obj'_def) + apply (subgoal_tac "tcb_at' t s") + apply simp + apply (rule valid_tcb'_tcbMCP_update) + apply (fastforce simp: obj_at'_def)+ + done + +crunch sch_act_simple[wp]: setMCPriority sch_act_simple + (wp: ssa_sch_act_simple crunch_wps rule: sch_act_simple_lift simp: crunch_simps) + +(* For some reason, when this was embedded in a larger expression clarsimp wouldn't remove it. + Adding it as a simp rule does *) +lemma inQ_tc_corres_helper: + "(\d p. (\tcb. tcbQueued tcb \ tcbPriority tcb = p \ tcbDomain tcb = d \ + (tcbQueued tcb \ tcbDomain tcb \ d)) \ a \ set (ksReadyQueues s (d, p)))" + by clarsimp + +abbreviation "valid_option_prio \ case_option True (\(p, auth). p \ maxPriority)" + +definition valid_tcb_invocation :: "tcbinvocation \ bool" where + "valid_tcb_invocation i \ case i of + ThreadControl _ _ _ mcp p _ _ _ \ valid_option_prio p \ valid_option_prio mcp + | _ \ True" + +lemma thread_set_ipc_weak_valid_sched_action: + "\ einvs and simple_sched_action\ + thread_set (tcb_ipc_buffer_update f) a + \\x. weak_valid_sched_action\" + apply (rule hoare_pre) + apply (simp add: thread_set_def) + apply (wp set_object_wp) + apply (simp | intro impI | elim exE conjE)+ + apply (frule get_tcb_SomeD) + apply (erule ssubst) + apply (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def + get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + done + +lemma threadcontrol_corres_helper2: + "is_aligned a msg_align_bits \ + \invs' and tcb_at' t\ + threadSet (tcbIPCBuffer_update (\_. a)) t + \\x s. Invariants_H.valid_queues s \ valid_queues' s\" + by (wp threadSet_invs_trivial + | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf + | clarsimp simp: inQ_def )+ + +lemma threadcontrol_corres_helper3: + "\ einvs and simple_sched_action\ + check_cap_at cap p (check_cap_at (cap.ThreadCap cap') slot (cap_insert cap p (t, tcb_cnode_index 4))) + \\x. weak_valid_sched_action and valid_etcbs \" + apply (rule hoare_pre) + apply (wp check_cap_inv | simp add:)+ + by (clarsimp simp add: weak_valid_sched_action_def valid_etcbs_2_def st_tcb_at_kh_def + get_tcb_def obj_at_kh_def obj_at_def is_etcb_at'_def valid_sched_def valid_sched_action_def) + +lemma threadcontrol_corres_helper4: + "isArchObjectCap ac \ + \invs' and cte_wp_at' (\cte. cteCap cte = capability.NullCap) (cte_map (a, tcb_cnode_index 4)) + and valid_cap' ac \ + checkCapAt ac (cte_map (ab, ba)) + (checkCapAt (capability.ThreadCap a) (cte_map slot) + (assertDerived (cte_map (ab, ba)) ac (cteInsert ac (cte_map (ab, ba)) (cte_map (a, tcb_cnode_index 4))))) + \\x. Invariants_H.valid_queues and valid_queues'\" + apply (wp + | strengthen invs_valid_queues' invs_queues invs_weak_sch_act_wf + | clarsimp simp: )+ + by (case_tac ac; + clarsimp simp: capBadge_def isCap_simps tcb_cnode_index_def cte_map_def cte_wp_at'_def + cte_level_bits_def) + +crunches cap_delete + for pspace_alinged[wp]: "pspace_aligned :: det_ext state \ _" + and pspace_distinct[wp]: "pspace_distinct :: det_ext state \ _" + (simp: crunch_simps preemption_point_def wp: crunch_wps OR_choiceE_weak_wp) + +lemmas check_cap_pspace_aligned[wp] = check_cap_inv[of pspace_aligned] +lemmas check_cap_pspace_distinct[wp] = check_cap_inv[of pspace_distinct] + +lemma is_valid_vtable_root_simp: + "is_valid_vtable_root cap = + (\r asid vref. cap = cap.ArchObjectCap (arch_cap.PageTableCap r VSRootPT_T (Some (asid, vref))))" + by (simp add: is_valid_vtable_root_def + split: cap.splits arch_cap.splits option.splits pt_type.splits) + +lemma threadSet_invs_trivialT2: + assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes r: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows + "\\s. invs' s + \ tcb_at' t s \ (\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits) + \ (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) + \ (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) + \ ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) + \ (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ + threadSet F t + \\rv. invs'\" +proof - + from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast + note threadSet_sch_actT_P[where P=False, simplified] + have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ + valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" + by (auto simp: z) + show ?thesis + apply (simp add: invs'_def valid_state'_def split del: if_split) + apply (rule hoare_pre) + apply (rule hoare_gen_asm [where P="(\tcb. is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits)"]) + apply (wp x v u b + threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_valid_queues + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_valid_queues' + threadSet_cur + untyped_ranges_zero_lift + |clarsimp simp: y z a r domains cteCaps_of_def valid_arch_tcb'_def|rule refl)+ + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) + by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) +qed + +lemma threadSet_valid_queues'_no_state2: + "\ \tcb. tcbQueued tcb = tcbQueued (f tcb); + \tcb. tcbState tcb = tcbState (f tcb); + \tcb. tcbPriority tcb = tcbPriority (f tcb); + \tcb. tcbDomain tcb = tcbDomain (f tcb) \ + \ \valid_queues'\ threadSet f t \\_. valid_queues'\" + apply (simp add: valid_queues'_def threadSet_def obj_at'_real_def + split del: if_split) + apply (simp only: imp_conv_disj) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (wp setObject_ko_wp_at | simp add: objBits_simps')+ + apply (wp getObject_tcb_wp updateObject_default_inv + | simp split del: if_split)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def objBits_simps addToQs_def + split del: if_split cong: if_cong) + apply (fastforce simp: inQ_def split: if_split_asm) + done + +lemma getThreadBufferSlot_dom_tcb_cte_cases: + "\\\ getThreadBufferSlot a \\rv s. rv \ (+) a ` dom tcb_cte_cases\" + by (wpsimp simp: tcb_cte_cases_def getThreadBufferSlot_def locateSlot_conv cte_level_bits_def + tcbIPCBufferSlot_def cteSizeBits_def) + +lemma tcb_at'_cteInsert[wp]: + "\\s. tcb_at' (ksCurThread s) s\ cteInsert t x y \\_ s. tcb_at' (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps cteInsert_ct, wp, simp) + +lemma tcb_at'_asUser[wp]: + "\\s. tcb_at' (ksCurThread s) s\ asUser a (setTCBIPCBuffer b) \\_ s. tcb_at' (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps asUser_typ_ats(1), wp, simp) + +lemma tcb_at'_threadSet[wp]: + "\\s. tcb_at' (ksCurThread s) s\ threadSet (tcbIPCBuffer_update (\_. b)) a \\_ s. tcb_at' (ksCurThread s) s\" + by (rule hoare_weaken_pre, wps threadSet_tcb', wp, simp) + +lemma cteDelete_it [wp]: + "\\s. P (ksIdleThread s)\ cteDelete slot e \\_ s. P (ksIdleThread s)\" + by (rule cteDelete_preservation) (wp | clarsimp)+ + +lemmas threadSet_invs_trivial2 = + threadSet_invs_trivialT2 [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma valid_tcb_ipc_buffer_update: + "\buf s. is_aligned buf msg_align_bits + \ (\tcb. valid_tcb' tcb s \ valid_tcb' (tcbIPCBuffer_update (\_. buf) tcb) s)" + by (simp add: valid_tcb'_def tcb_cte_cases_def cteSizeBits_def) + +lemma checkCap_wp: + assumes x: "\P\ f \\rv. Q\" + and PQ: "\s. P s \ Q s" + shows "\P\ checkCapAt cap slot f \\rv. Q\" + unfolding checkCapAt_def + apply (wp x) + apply (rule hoare_strengthen_post[rotated]) + apply clarsimp + apply (strengthen PQ) + apply assumption + apply simp + apply (wp x | simp)+ + done + +lemma assertDerived_wp_weak: + "\P\ f \Q\ \ \P\ assertDerived slot cap f \Q\" + apply (wpsimp simp: assertDerived_def) + done + +crunches option_update_thread + for aligned[wp]: "pspace_aligned" + and distinct[wp]: "pspace_distinct" + +lemma transferCaps_corres: + assumes x: "newroot_rel e e'" and y: "newroot_rel f f'" + and z: "(case g of None \ g' = None + | Some (vptr, g'') \ \g'''. g' = Some (vptr, g''') + \ newroot_rel g'' g''')" + and u: "{e, f, option_map undefined g} \ {None} \ sl' = cte_map slot" + shows + "corres (dc \ (=)) + (einvs and simple_sched_action and tcb_at a and + (\s. {e, f, option_map undefined g} \ {None} \ cte_at slot s) and + case_option \ (valid_cap o fst) e and + case_option \ (cte_at o snd) e and + case_option \ (no_cap_to_obj_dr_emp o fst) e and + K (case_option True (is_cnode_cap o fst) e) and + case_option \ (valid_cap o fst) f and + case_option \ (cte_at o snd) f and + case_option \ (no_cap_to_obj_dr_emp o fst) f and + K (case_option True (is_valid_vtable_root o fst) f) + and case_option \ (case_option \ (cte_at o snd) o snd) g + and case_option \ (case_option \ (no_cap_to_obj_dr_emp o fst) o snd) g + and case_option \ (case_option \ (valid_cap o fst) o snd) g + and K (case_option True ((\v. is_aligned v msg_align_bits) o fst) g) + and K (case_option True (\v. case_option True ((swp valid_ipc_buffer_cap (fst v) + and is_arch_cap and is_cnode_or_valid_arch) o fst) (snd v)) g) + and (\s. case_option True (\(pr, auth). mcpriority_tcb_at (\m. pr \ m) auth s) p_auth) \ \only set prio \ mcp\ + and (\s. case_option True (\(mcp, auth). mcpriority_tcb_at (\m. mcp \ m) auth s) mcp_auth) \ \only set mcp \ prev_mcp\) + (invs' and sch_act_simple and case_option \ (valid_cap' o fst) e' and + (\s. {e', f', option_map undefined g'} \ {None} \ cte_at' (cte_map slot) s) and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + K (case_option True ((\v. is_aligned v msg_align_bits) o fst) g') and + K (case_option True (case_option True (isArchObjectCap o fst) o snd) g') and + case_option \ (case_option \ (valid_cap' o fst) o snd) g' and + tcb_at' a and ex_nonz_cap_to' a and K (valid_option_prio p_auth \ valid_option_prio mcp_auth) and + (\s. case_option True (\(pr, auth). mcpriority_tcb_at' ((\) pr) auth s) p_auth) and + (\s. case_option True (\(m, auth). mcpriority_tcb_at' ((\) m) auth s) mcp_auth)) + (invoke_tcb (tcb_invocation.ThreadControl a slot (option_map to_bl b') mcp_auth p_auth e f g)) + (invokeTCB (tcbinvocation.ThreadControl a sl' b' mcp_auth p_auth e' f' g'))" +proof - + have P: "\t v. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t (tcb_fault_handler_update o (%x _. x)) + (option_map to_bl v)) + (case v of None \ return () + | Some x \ threadSet (tcbFaultHandler_update (%_. x)) t)" + apply (rule out_corres, simp_all add: exst_same_def) + apply (case_tac v, simp_all add: out_rel_def) + apply (safe, case_tac tcb', simp add: tcb_relation_def split: option.split) + done + have R: "\t v. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (option_update_thread t (tcb_ipc_buffer_update o (%x _. x)) v) + (case v of None \ return () + | Some x \ threadSet (tcbIPCBuffer_update (%_. x)) t)" + apply (rule out_corres, simp_all add: exst_same_def) + apply (case_tac v, simp_all add: out_rel_def) + apply (safe, case_tac tcb', simp add: tcb_relation_def) + done + have S: "\t x. corres dc (einvs and tcb_at t) (invs' and tcb_at' t and valid_objs' and K (valid_option_prio p_auth)) + (case_option (return ()) (\(p, auth). set_priority t p) p_auth) + (case_option (return ()) (\p'. setPriority t (fst p')) p_auth)" + apply (case_tac p_auth; clarsimp simp: setPriority_corres) + done + have S': "\t x. corres dc + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (case_option (return ()) (\(mcp, auth). set_mcpriority t mcp) mcp_auth) + (case_option (return ()) (\mcp'. setMCPriority t (fst mcp')) mcp_auth)" + apply(case_tac mcp_auth; clarsimp simp: setMCPriority_corres) + done + have T: "\x x' ref getfn target. + \ newroot_rel x x'; getfn = return (cte_map (target, ref)); + x \ None \ {e, f, option_map undefined g} \ {None} \ \ + corres (dc \ dc) + (einvs and simple_sched_action and cte_at (target, ref) and emptyable (target, ref) and + (\s. \(sl, c) \ (case x of None \ {} | Some (c, sl) \ {(sl, c), (slot, c)}). + cte_at sl s \ no_cap_to_obj_dr_emp c s \ valid_cap c s) + and K (case x of None \ True + | Some (c, sl) \ is_cnode_or_valid_arch c)) + (invs' and sch_act_simple and cte_at' (cte_map (target, ref)) and + (\s. \cp \ (case x' of None \ {} | Some (c, sl) \ {c}). s \' cp)) + (case x of None \ returnOk () + | Some pr \ case_prod (\new_cap src_slot. + doE cap_delete (target, ref); + liftE $ check_cap_at new_cap src_slot $ + check_cap_at (cap.ThreadCap target) slot $ + cap_insert new_cap src_slot (target, ref) + odE) pr) + (case x' of + None \ returnOk () + | Some pr \ (\(newCap, srcSlot). + do slot \ getfn; + doE uu \ cteDelete slot True; + liftE (checkCapAt newCap srcSlot + (checkCapAt (capability.ThreadCap target) sl' + (assertDerived srcSlot newCap (cteInsert newCap srcSlot slot)))) + odE + od) pr)" + apply (case_tac "x = None") + apply (simp add: newroot_rel_def returnOk_def) + apply (drule(1) mp, drule mp [OF u]) + apply (clarsimp simp add: newroot_rel_def returnOk_def split_def) + apply (rule corres_gen_asm) + apply (rule corres_guard_imp) + apply (rule corres_split_norE[OF cteDelete_corres]) + apply (simp del: dc_simp) + apply (erule checkCapAt_cteInsert_corres) + apply (fold validE_R_def) + apply (wp cap_delete_deletes cap_delete_cte_at cap_delete_valid_cap + | strengthen use_no_cap_to_obj_asid_strg)+ + apply (wp cteDelete_invs' cteDelete_deletes) + apply (clarsimp dest!: is_cnode_or_valid_arch_cap_asid) + apply clarsimp + done + have U2: "getThreadBufferSlot a = return (cte_map (a, tcb_cnode_index 4))" + by (simp add: getThreadBufferSlot_def locateSlot_conv + cte_map_def tcb_cnode_index_def tcbIPCBufferSlot_def + cte_level_bits_def) + have T2: "corres (dc \ dc) + (einvs and simple_sched_action and tcb_at a and + (\s. \(sl, c) \ (case g of None \ {} | Some (x, v) \ {(slot, cap.NullCap)} \ + (case v of None \ {} | Some (c, sl) \ {(sl, c), (slot, c)})). + cte_at sl s \ no_cap_to_obj_dr_emp c s \ valid_cap c s) + and K (case g of None \ True | Some (x, v) \ (case v of + None \ True | Some (c, sl) \ is_cnode_or_valid_arch c + \ is_arch_cap c + \ valid_ipc_buffer_cap c x + \ is_aligned x msg_align_bits))) + (invs' and sch_act_simple and tcb_at' a and + (\s. \cp \ (case g' of None \ {} | Some (x, v) \ (case v of + None \ {} | Some (c, sl) \ {c})). s \' cp) and + K (case g' of None \ True | Some (x, v) \ is_aligned x msg_align_bits + \ (case v of None \ True | Some (ac, _) \ isArchObjectCap ac)) ) + (case_option (returnOk ()) + (case_prod + (\ptr frame. + doE cap_delete (a, tcb_cnode_index 4); + do y \ thread_set (tcb_ipc_buffer_update (\_. ptr)) a; + y \ case_option (return ()) + (case_prod + (\new_cap src_slot. + check_cap_at new_cap src_slot $ + check_cap_at (cap.ThreadCap a) slot $ + cap_insert new_cap src_slot (a, tcb_cnode_index 4))) + frame; + cur \ gets cur_thread; + liftE $ when (cur = a) (reschedule_required) + od + odE)) + g) + (case_option (returnOk ()) + (\(ptr, frame). + do bufferSlot \ getThreadBufferSlot a; + doE y \ cteDelete bufferSlot True; + do y \ threadSet (tcbIPCBuffer_update (\_. ptr)) a; + y \ (case_option (return ()) + (case_prod + (\newCap srcSlot. + checkCapAt newCap srcSlot $ + checkCapAt + (capability.ThreadCap a) + sl' $ + assertDerived srcSlot newCap $ + cteInsert newCap srcSlot bufferSlot)) + frame); + cur \ getCurThread; + liftE $ when (cur = a) rescheduleRequired + od odE od) + g')" (is "corres _ ?T2_pre ?T2_pre' _ _") + using z u + apply - + apply (rule corres_guard_imp[where P=P and P'=P' + and Q="P and cte_at (a, tcb_cnode_index 4)" + and Q'="P' and cte_at' (cte_map (a, cap))" for P P' a cap]) + apply (cases g) + apply (simp, simp add: returnOk_def) + apply (clarsimp simp: liftME_def[symmetric] U2 liftE_bindE) + apply (case_tac b, simp_all add: newroot_rel_def) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule cteDelete_corres) + apply (rule_tac F="is_aligned aa msg_align_bits" in corres_gen_asm2) + apply (rule corres_split_nor) + apply (rule threadset_corres, + (simp add: tcb_relation_def), (simp add: exst_same_def)+)[1] + apply (rule corres_split[OF getCurThread_corres], clarsimp) + apply (rule corres_when[OF refl rescheduleRequired_corres]) + apply (wpsimp wp: gct_wp)+ + apply (wp thread_set_ipc_weak_valid_sched_action|wp (once) hoare_drop_imp)+ + apply simp + apply (wp threadcontrol_corres_helper2 | wpc | simp)+ + apply (wp|strengthen einvs_valid_etcbs)+ + apply (wpsimp wp: cteDelete_invs' hoare_vcg_conj_lift) + apply (fastforce simp: emptyable_def) + apply fastforce + apply clarsimp + apply (rule corres_guard_imp) + apply (rule corres_split_norE[OF cteDelete_corres]) + apply (rule_tac F="is_aligned aa msg_align_bits" + in corres_gen_asm) + apply (rule_tac F="isArchObjectCap ac" in corres_gen_asm2) + apply (rule corres_split_nor) + apply (rule threadset_corres, + simp add: tcb_relation_def, (simp add: exst_same_def)+) + apply (rule corres_split) + apply (erule checkCapAt_cteInsert_corres) + apply (rule corres_split[OF getCurThread_corres], clarsimp) + apply (rule corres_when[OF refl rescheduleRequired_corres]) + apply (wp gct_wp)+ + apply (wp hoare_drop_imp threadcontrol_corres_helper3)[1] + apply (wp hoare_drop_imp threadcontrol_corres_helper4)[1] + apply (wp thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial thread_set_not_state_valid_sched + | simp add: ran_tcb_cap_cases)+ + apply (wp threadSet_invs_trivial + threadSet_cte_wp_at' | simp)+ + apply (wp cap_delete_deletes cap_delete_cte_at + cap_delete_valid_cap cteDelete_deletes + cteDelete_invs' + | strengthen use_no_cap_to_obj_asid_strg + | clarsimp simp: inQ_def inQ_tc_corres_helper)+ + apply (clarsimp simp: cte_wp_at_caps_of_state + dest!: is_cnode_or_valid_arch_cap_asid) + apply (fastforce simp: emptyable_def) + apply (clarsimp simp: inQ_def) + apply (clarsimp simp: obj_at_def is_tcb) + apply (rule cte_wp_at_tcbI, simp, fastforce, simp) + apply (clarsimp simp: cte_map_def tcb_cnode_index_def obj_at'_def objBits_simps) + apply (erule(2) cte_wp_at_tcbI', fastforce simp: objBits_defs cte_level_bits_def, simp) + done + have U: "getThreadCSpaceRoot a = return (cte_map (a, tcb_cnode_index 0))" + apply (clarsimp simp add: getThreadCSpaceRoot) + apply (simp add: cte_map_def tcb_cnode_index_def + cte_level_bits_def word_bits_def) + done + have V: "getThreadVSpaceRoot a = return (cte_map (a, tcb_cnode_index 1))" + apply (clarsimp simp add: getThreadVSpaceRoot) + apply (simp add: cte_map_def tcb_cnode_index_def to_bl_1 objBits_defs + cte_level_bits_def word_bits_def) + done + have X: "\x P Q R M. (\y. x = Some y \ \P y\ M y \Q\,\R\) + \ \case_option (Q ()) P x\ case_option (returnOk ()) M x \Q\,\R\" + by (case_tac x, simp_all, wp) + have Y: "\x P Q M. (\y. x = Some y \ \P y\ M y \Q\,-) + \ \case_option (Q ()) P x\ case_option (returnOk ()) M x \Q\,-" + by (case_tac x, simp_all, wp) + have Z: "\P f R Q x. \P\ f \\rv. Q and R\ \ \P\ f \\rv. case_option Q (\y. R) x\" + apply (rule hoare_post_imp) + defer + apply assumption + apply (case_tac x, simp_all) + done + have A: "\x P Q M. (\y. x = Some y \ \P y\ M y \Q\) + \ \case_option (Q ()) P x\ case_option (return ()) M x \Q\" + by (case_tac x, simp_all, wp) + have B: "\t v. \invs' and tcb_at' t\ threadSet (tcbFaultHandler_update v) t \\rv. invs'\" + by (wp threadSet_invs_trivial | clarsimp simp: inQ_def)+ + note stuff = Z B out_invs_trivial hoare_case_option_wp + hoare_vcg_const_Ball_lift hoare_vcg_const_Ball_lift_R + cap_delete_deletes cap_delete_valid_cap out_valid_objs + cap_insert_objs + cteDelete_deletes cteDelete_sch_act_simple + out_valid_cap out_cte_at out_tcb_valid out_emptyable + CSpaceInv_AI.cap_insert_valid_cap cap_insert_cte_at cap_delete_cte_at + cap_delete_tcb cteDelete_invs' checkCap_inv [where P="valid_cap' c0" for c0] + check_cap_inv[where P="tcb_at p0" for p0] checkCap_inv [where P="tcb_at' p0" for p0] + check_cap_inv[where P="cte_at p0" for p0] checkCap_inv [where P="cte_at' p0" for p0] + check_cap_inv[where P="valid_cap c" for c] checkCap_inv [where P="valid_cap' c" for c] + check_cap_inv[where P="tcb_cap_valid c p1" for c p1] + check_cap_inv[where P=valid_sched] + check_cap_inv[where P=simple_sched_action] + checkCap_inv [where P=sch_act_simple] + out_no_cap_to_trivial [OF ball_tcb_cap_casesI] + checked_insert_no_cap_to + note if_cong [cong] option.case_cong [cong] + show ?thesis + apply (simp add: invokeTCB_def liftE_bindE) + apply (simp only: eq_commute[where a= "a"]) + apply (rule corres_guard_imp) + apply (rule corres_split_nor[OF P]) + apply (rule corres_split_nor[OF S', simplified]) + apply (rule corres_split_norE[OF T [OF x U], simplified]) + apply (rule corres_split_norE[OF T [OF y V], simplified]) + apply (rule corres_split_norE) + apply (rule T2[simplified]) + apply (rule corres_split_nor[OF S, simplified]) + apply (rule corres_returnOkTT, simp) + apply wp + apply wp + apply (wpsimp wp: hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + hoare_vcg_all_lift_R hoare_vcg_all_lift + as_user_invs thread_set_ipc_tcb_cap_valid + thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_cte_wp_at_trivial + thread_set_valid_cap + reschedule_preserves_valid_sched + check_cap_inv[where P=valid_sched] (* from stuff *) + check_cap_inv[where P="tcb_at p0" for p0] + thread_set_not_state_valid_sched + cap_delete_deletes + cap_delete_valid_cap + simp: ran_tcb_cap_cases) + apply (strengthen use_no_cap_to_obj_asid_strg) + apply (wpsimp wp: cap_delete_cte_at cap_delete_valid_cap) + apply (wpsimp wp: hoare_drop_imps) + apply ((wpsimp wp: hoare_vcg_const_imp_lift hoare_vcg_imp_lift' hoare_vcg_all_lift + threadSet_cte_wp_at' threadSet_invs_trivialT2 cteDelete_invs' + simp: tcb_cte_cases_def cteSizeBits_def), (fastforce+)[6]) + apply wpsimp + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + threadSet_invs_trivialT2 threadSet_cte_wp_at' + simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) + apply wpsimp + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + rescheduleRequired_invs' threadSet_cte_wp_at' + simp: tcb_cte_cases_def) + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' + simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) + apply wpsimp + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + rescheduleRequired_invs' threadSet_invs_trivialT2 threadSet_cte_wp_at' + simp: tcb_cte_cases_def cteSizeBits_def, (fastforce+)[6]) + apply wpsimp + apply (wpsimp wp: hoare_vcg_const_imp_lift hoare_drop_imps hoare_vcg_all_lift + rescheduleRequired_invs' threadSet_cap_to' threadSet_invs_trivialT2 + threadSet_cte_wp_at' hoare_drop_imps + simp: tcb_cte_cases_def cteSizeBits_def) + apply (clarsimp) + apply ((wpsimp wp: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift + threadSet_valid_objs' thread_set_not_state_valid_sched + thread_set_tcb_ipc_buffer_cap_cleared_invs thread_set_cte_wp_at_trivial + thread_set_no_cap_to_trivial getThreadBufferSlot_dom_tcb_cte_cases + assertDerived_wp_weak threadSet_cap_to' out_pred_tcb_at_preserved + checkCap_wp assertDerived_wp_weak cap_insert_objs' + | simp add: ran_tcb_cap_cases split_def U V + emptyable_def + | strengthen tcb_cap_always_valid_strg + tcb_at_invs + use_no_cap_to_obj_asid_strg + | (erule exE, clarsimp simp: word_bits_def))+) + apply (strengthen valid_tcb_ipc_buffer_update) + apply (strengthen invs_valid_objs') + apply (wpsimp wp: cteDelete_invs' hoare_vcg_imp_lift' hoare_vcg_all_lift) + apply wpsimp + apply wpsimp + apply (clarsimp cong: imp_cong conj_cong simp: emptyable_def) + apply (rule_tac Q'="\_. ?T2_pre" in hoare_post_imp_R[simplified validE_R_def, rotated]) + (* beginning to deal with is_nondevice_page_cap *) + apply (clarsimp simp: emptyable_def is_cap_simps + is_cnode_or_valid_arch_def obj_ref_none_no_asid cap_asid_def + cong: conj_cong imp_cong + split: option.split_asm) + apply (simp add: case_bool_If valid_ipc_buffer_cap_def + split: arch_cap.splits if_splits) + (* is_nondevice_page_cap discharged *) + apply ((wp stuff checkCap_wp assertDerived_wp_weak cap_insert_objs' + | simp add: ran_tcb_cap_cases split_def U V emptyable_def + | wpc | strengthen tcb_cap_always_valid_strg use_no_cap_to_obj_asid_strg)+)[1] + apply (clarsimp cong: imp_cong conj_cong) + apply (rule_tac Q'="\_. ?T2_pre' and (\s. valid_option_prio p_auth)" + in hoare_post_imp_R[simplified validE_R_def, rotated]) + apply (case_tac g'; clarsimp simp: isCap_simps ; clarsimp cong:imp_cong) + apply (wp add: stuff hoare_vcg_all_lift_R hoare_vcg_all_lift + hoare_vcg_const_imp_lift_R hoare_vcg_const_imp_lift setMCPriority_invs' + threadSet_valid_objs' thread_set_not_state_valid_sched setP_invs' + typ_at_lifts [OF setPriority_typ_at'] + typ_at_lifts [OF setMCPriority_typ_at'] + threadSet_cap_to' out_pred_tcb_at_preserved assertDerived_wp + del: cteInsert_invs + | simp add: ran_tcb_cap_cases split_def U V + emptyable_def + | wpc | strengthen tcb_cap_always_valid_strg + use_no_cap_to_obj_asid_strg + | wp (once) add: sch_act_simple_lift hoare_drop_imps del: cteInsert_invs + | (erule exE, clarsimp simp: word_bits_def))+ + apply (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] tcb_at_st_tcb_at[symmetric] + tcb_cap_valid_def is_cnode_or_valid_arch_def invs_valid_objs emptyable_def + obj_ref_none_no_asid no_cap_to_obj_with_diff_ref_Null is_valid_vtable_root_simp + is_cap_simps cap_asid_def vs_cap_ref_def arch_cap_fun_lift_def + invs_psp_aligned invs_distinct + cong: conj_cong imp_cong + split: option.split_asm) + by (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def objBits_defs + cte_map_tcb_0 cte_map_tcb_1[simplified] tcb_at_cte_at' cte_at_tcb_at_32' + isCap_simps domIff valid_tcb'_def tcb_cte_cases_def + split: option.split_asm + dest!: isValidVTableRootD) +qed + + +lemmas threadSet_ipcbuffer_trivial + = threadSet_invs_trivial[where F="tcbIPCBuffer_update F'" for F', + simplified inQ_def, simplified] + +crunches setPriority, setMCPriority + for cap_to'[wp]: "ex_nonz_cap_to' a" + (simp: crunch_simps) + +lemma cteInsert_sa_simple[wp]: + "cteInsert newCap srcSlot destSlot \sch_act_simple\" + by (simp add: sch_act_simple_def, wp) + +lemma isReplyCapD: + "isReplyCap cap \ \ptr master grant. cap = capability.ReplyCap ptr master grant" + by (simp add: isCap_simps) + +lemma tc_invs': + "\invs' and sch_act_simple and tcb_at' a and ex_nonz_cap_to' a and + K (valid_option_prio d \ valid_option_prio mcp) and + case_option \ (valid_cap' o fst) e' and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + case_option \ (valid_cap') (case_option None (case_option None (Some o fst) o snd) g) and + K (case_option True isArchObjectCap (case_option None (case_option None (Some o fst) o snd) g)) + and K (case_option True (swp is_aligned msg_align_bits o fst) g) \ + invokeTCB (tcbinvocation.ThreadControl a sl b' mcp d e' f' g) + \\rv. invs'\" + apply (rule hoare_gen_asm) + apply (simp add: split_def invokeTCB_def getThreadCSpaceRoot getThreadVSpaceRoot + getThreadBufferSlot_def locateSlot_conv + cong: option.case_cong) + apply (simp only: eq_commute[where a="a"]) + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp + hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' + typ_at_lifts[OF setMCPriority_typ_at'] + hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ + apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) + apply clarsimp + apply wpfix + apply assumption + apply (rule case_option_wp_None_returnOk) + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift + checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ + apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] + checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes + hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R + cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ + apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) + by (auto dest!: isCapDs isReplyCapD isValidVTableRootD simp: isCap_simps) + +lemma setSchedulerAction_invs'[wp]: + "\invs' and sch_act_wf sa + and (\s. sa = ResumeCurrentThread + \ obj_at' (Not \ tcbQueued) (ksCurThread s) s) + and (\s. sa = ResumeCurrentThread + \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s)\ + setSchedulerAction sa + \\rv. invs'\" + apply (simp add: setSchedulerAction_def) + apply wp + apply (clarsimp simp add: invs'_def valid_state'_def valid_irq_node'_def + valid_queues_def valid_queues_no_bitmap_def bitmapQ_defs cur_tcb'_def + ct_not_inQ_def) + apply (simp add: ct_idle_or_in_cur_domain'_def) + done + +end + +consts + copyregsets_map :: "arch_copy_register_sets \ Arch.copy_register_sets" + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + tcbinv_relation :: "tcb_invocation \ tcbinvocation \ bool" +where + "tcbinv_relation (tcb_invocation.ReadRegisters a b c d) x + = (x = tcbinvocation.ReadRegisters a b c (copyregsets_map d))" +| "tcbinv_relation (tcb_invocation.WriteRegisters a b c d) x + = (x = tcbinvocation.WriteRegisters a b c (copyregsets_map d))" +| "tcbinv_relation (tcb_invocation.CopyRegisters a b c d e f g) x + = (x = tcbinvocation.CopyRegisters a b c d e f (copyregsets_map g))" +| "tcbinv_relation (tcb_invocation.ThreadControl a sl flt_ep mcp prio croot vroot buf) x + = (\flt_ep' croot' vroot' sl' buf'. flt_ep = option_map to_bl flt_ep' \ + newroot_rel croot croot' \ newroot_rel vroot vroot' \ + ({croot, vroot, option_map undefined buf} \ {None} + \ sl' = cte_map sl) \ + (case buf of None \ buf' = None + | Some (vptr, g'') \ \g'''. buf' = Some (vptr, g''') + \ newroot_rel g'' g''') \ + x = tcbinvocation.ThreadControl a sl' flt_ep' mcp prio croot' vroot' buf')" +| "tcbinv_relation (tcb_invocation.Suspend a) x + = (x = tcbinvocation.Suspend a)" +| "tcbinv_relation (tcb_invocation.Resume a) x + = (x = tcbinvocation.Resume a)" +| "tcbinv_relation (tcb_invocation.NotificationControl t ntfnptr) x + = (x = tcbinvocation.NotificationControl t ntfnptr)" +| "tcbinv_relation (tcb_invocation.SetTLSBase ref w) x + = (x = tcbinvocation.SetTLSBase ref w)" + +primrec + tcb_inv_wf' :: "tcbinvocation \ kernel_state \ bool" +where + "tcb_inv_wf' (tcbinvocation.Suspend t) + = (tcb_at' t and ex_nonz_cap_to' t)" +| "tcb_inv_wf' (tcbinvocation.Resume t) + = (tcb_at' t and ex_nonz_cap_to' t)" +| "tcb_inv_wf' (tcbinvocation.ThreadControl t sl fe mcp p croot vroot buf) + = (tcb_at' t and ex_nonz_cap_to' t and + K (valid_option_prio p \ valid_option_prio mcp) and + case_option \ (valid_cap' o fst) croot and + K (case_option True (isCNodeCap o fst) croot) and + case_option \ (valid_cap' o fst) vroot and + K (case_option True (isValidVTableRoot o fst) vroot) and + case_option \ (case_option \ (valid_cap' o fst) o snd) buf and + case_option \ (case_option \ (cte_at' o snd) o snd) buf and + K (case_option True (swp is_aligned msg_align_bits o fst) buf) and + K (case_option True (case_option True (isArchObjectCap o fst) o snd) buf) and + (\s. {croot, vroot, option_map undefined buf} \ {None} + \ cte_at' sl s) and + (\s. case_option True (\(pr, auth). mcpriority_tcb_at' ((\) pr) auth s) p) and + (\s. case_option True (\(m, auth). mcpriority_tcb_at' ((\) m) auth s) mcp))" +| "tcb_inv_wf' (tcbinvocation.ReadRegisters src susp n arch) + = (tcb_at' src and ex_nonz_cap_to' src)" +| "tcb_inv_wf' (tcbinvocation.WriteRegisters dest resume values arch) + = (tcb_at' dest and ex_nonz_cap_to' dest)" +| "tcb_inv_wf' (tcbinvocation.CopyRegisters dest src suspend_source resume_target + trans_frame trans_int trans_arch) + = (tcb_at' dest and tcb_at' src and ex_nonz_cap_to' src and ex_nonz_cap_to' dest)" +| "tcb_inv_wf' (tcbinvocation.NotificationControl t ntfn) + = (tcb_at' t and ex_nonz_cap_to' t + and (case ntfn of None \ \ + | Some ntfnptr \ obj_at' (\ko. ntfnBoundTCB ko = None + \ (\q. ntfnObj ko \ WaitingNtfn q)) ntfnptr + and ex_nonz_cap_to' ntfnptr + and bound_tcb_at' ((=) None) t) )" +| "tcb_inv_wf' (tcbinvocation.SetTLSBase ref w) + = (tcb_at' ref and ex_nonz_cap_to' ref)" + +lemma invokeTCB_corres: + "tcbinv_relation ti ti' \ + corres (dc \ (=)) + (einvs and simple_sched_action and Tcb_AI.tcb_inv_wf ti) + (invs' and sch_act_simple and tcb_inv_wf' ti') + (invoke_tcb ti) (invokeTCB ti')" + apply (case_tac ti, simp_all only: tcbinv_relation.simps valid_tcb_invocation_def) + apply (rule corres_guard_imp [OF invokeTCB_WriteRegisters_corres], simp+)[1] + apply (rule corres_guard_imp [OF invokeTCB_ReadRegisters_corres], simp+)[1] + apply (rule corres_guard_imp [OF invokeTCB_CopyRegisters_corres], simp+)[1] + apply (clarsimp simp del: invoke_tcb.simps) + apply (rename_tac word one t2 mcp t3 t4 t5 t6 t7 t8 t9 t10 t11) + apply (rule_tac F="is_aligned word 5" in corres_req) + apply (clarsimp simp add: is_aligned_weaken [OF tcb_aligned]) + apply (rule corres_guard_imp [OF transferCaps_corres], clarsimp+) + apply (clarsimp simp: is_cnode_or_valid_arch_def + split: option.split option.split_asm) + apply clarsimp + apply (auto split: option.split_asm simp: newroot_rel_def)[1] + apply (simp add: invokeTCB_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (rule corres_guard_imp [OF suspend_corres], simp+) + apply (simp add: invokeTCB_def liftM_def[symmetric] + o_def dc_def[symmetric]) + apply (rule corres_guard_imp [OF restart_corres], simp+) + apply (simp add:invokeTCB_def) + apply (rename_tac option) + apply (case_tac option) + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF unbindNotification_corres]) + apply (rule corres_trivial, simp) + apply wp+ + apply (clarsimp) + apply clarsimp + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF bindNotification_corres]) + apply (rule corres_trivial, simp) + apply wp+ + apply clarsimp + apply (clarsimp simp: obj_at_def is_ntfn) + apply (clarsimp simp: obj_at'_def) + apply (simp add: invokeTCB_def tlsBaseRegister_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF TcbAcc_R.asUser_setRegister_corres]) + apply (rule corres_split[OF Bits_R.getCurThread_corres]) + apply (rule corres_split[OF Corres_UL.corres_when]) + apply simp + apply (rule TcbAcc_R.rescheduleRequired_corres) + apply (rule corres_trivial, simp) + apply (wpsimp wp: hoare_drop_imp)+ + apply (clarsimp simp: valid_sched_weak_strg einvs_valid_etcbs invs_distinct) + apply (clarsimp simp: invs_valid_queues' invs_queues) + done + +lemma tcbBoundNotification_caps_safe[simp]: + "\(getF, setF)\ran tcb_cte_cases. + getF (tcbBoundNotification_update (\_. Some ntfnptr) tcb) = getF tcb" + by (case_tac tcb, simp add: tcb_cte_cases_def cteSizeBits_def) + +lemma valid_bound_ntfn_lift: + assumes P: "\P T p. \\s. P (typ_at' T p s)\ f \\rv s. P (typ_at' T p s)\" + shows "\\s. valid_bound_ntfn' a s\ f \\rv s. valid_bound_ntfn' a s\" + apply (simp add: valid_bound_ntfn'_def, case_tac a, simp_all) + apply (wp typ_at_lifts[OF P])+ + done + +lemma bindNotification_invs': + "\bound_tcb_at' ((=) None) tcbptr + and ex_nonz_cap_to' ntfnptr + and ex_nonz_cap_to' tcbptr + and obj_at' (\ntfn. ntfnBoundTCB ntfn = None \ (\q. ntfnObj ntfn \ WaitingNtfn q)) ntfnptr + and invs'\ + bindNotification tcbptr ntfnptr + \\_. invs'\" + including no_pre + apply (simp add: bindNotification_def invs'_def valid_state'_def) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (rule hoare_pre) + apply (wp set_ntfn_valid_pspace' sbn_sch_act' sbn_valid_queues valid_irq_node_lift + setBoundNotification_ct_not_inQ valid_bound_ntfn_lift + untyped_ranges_zero_lift + | clarsimp dest!: global'_no_ex_cap simp: cteCaps_of_def)+ + apply (clarsimp simp: valid_pspace'_def) + apply (cases "tcbptr = ntfnptr") + apply (clarsimp dest!: pred_tcb_at' simp: obj_at'_def) + apply (clarsimp simp: pred_tcb_at' conj_comms o_def) + apply (subst delta_sym_refs, assumption) + apply (fastforce simp: ntfn_q_refs_of'_def obj_at'_def + dest!: symreftype_inverse' + split: ntfn.splits if_split_asm) + apply (clarsimp split: if_split_asm) + apply (fastforce simp: tcb_st_refs_of'_def + dest!: bound_tcb_at_state_refs_ofD' + split: if_split_asm thread_state.splits) + apply (fastforce simp: obj_at'_def state_refs_of'_def + dest!: symreftype_inverse') + apply (clarsimp simp: valid_pspace'_def) + apply (frule_tac P="\k. k=ntfn" in obj_at_valid_objs', simp) + apply (clarsimp simp: valid_obj'_def valid_ntfn'_def obj_at'_def + dest!: pred_tcb_at' + split: ntfn.splits) + done + +lemma tcbntfn_invs': + "\invs' and tcb_inv_wf' (tcbinvocation.NotificationControl tcb ntfnptr)\ + invokeTCB (tcbinvocation.NotificationControl tcb ntfnptr) + \\rv. invs'\" + apply (simp add: invokeTCB_def) + apply (case_tac ntfnptr, simp_all) + apply (wp unbindNotification_invs bindNotification_invs' | simp)+ + done + +lemma setTLSBase_invs'[wp]: + "\invs' and tcb_inv_wf' (tcbinvocation.SetTLSBase tcb tls_base)\ + invokeTCB (tcbinvocation.SetTLSBase tcb tls_base) + \\rv. invs'\" + by (wpsimp simp: invokeTCB_def) + +lemma tcbinv_invs': + "\invs' and sch_act_simple and ct_in_state' runnable' and tcb_inv_wf' ti\ + invokeTCB ti + \\rv. invs'\" + apply (case_tac ti, simp_all only:) + apply (simp add: invokeTCB_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap) + apply (simp add: invokeTCB_def) + apply (wp restart_invs') + apply (clarsimp simp: invs'_def valid_state'_def + dest!: global'_no_ex_cap) + apply (wp tc_invs') + apply (clarsimp split: option.split dest!: isCapDs) + apply (wp writereg_invs' readreg_invs' copyreg_invs' tcbntfn_invs' + | simp)+ + done + +declare assertDerived_wp [wp] + +lemma copyregsets_map_only[simp]: + "copyregsets_map v = ARMNoExtraRegisters" + by (cases "copyregsets_map v", simp) + +lemma decodeReadRegisters_corres: + "corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_read_registers args (cap.ThreadCap t)) + (decodeReadRegisters args (ThreadCap t))" + apply (simp add: decode_read_registers_def decodeReadRegisters_def) + apply (cases args, simp_all) + apply (case_tac list, simp_all) + apply (simp add: decodeTransfer_def) + apply (simp add: range_check_def rangeCheck_def frameRegisters_def gpRegisters_def) + apply (simp add: unlessE_def split del: if_split, simp add: returnOk_def split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_trivial) + apply (fastforce simp: returnOk_def) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_trivial) + apply (clarsimp simp: whenE_def) + apply (wp|simp)+ + done + +lemma decodeWriteRegisters_corres: + notes if_cong [cong] + shows + "\ length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_write_registers args (cap.ThreadCap t)) + (decodeWriteRegisters args (ThreadCap t))" + apply (simp add: decode_write_registers_def decodeWriteRegisters_def) + apply (cases args, simp_all) + apply (case_tac list, simp_all) + apply (simp add: decodeTransfer_def genericLength_def) + apply (simp add: word_less_nat_alt unat_of_nat64) + apply (simp add: whenE_def, simp add: returnOk_def) + apply (simp add: genericTake_def) + apply clarsimp + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split_norE) + apply (rule corres_trivial, simp) + apply (rule corres_trivial, simp) + apply (wp)+ + apply simp+ + done + +lemma decodeCopyRegisters_corres: + "\ list_all2 cap_relation extras extras'; length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (invs and tcb_at t) (invs' and tcb_at' t) + (decode_copy_registers args (cap.ThreadCap t) extras) + (decodeCopyRegisters args (ThreadCap t) extras')" + apply (simp add: decode_copy_registers_def decodeCopyRegisters_def) + apply (cases args, simp_all) + apply (cases extras, simp_all add: decodeTransfer_def null_def) + apply (clarsimp simp: list_all2_Cons1 null_def) + apply (case_tac aa, simp_all) + apply (simp add: returnOk_def) + apply clarsimp + done + +lemma decodeReadReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeReadRegisters args (ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeReadRegisters_def decodeTransfer_def whenE_def + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply simp + done + +lemma decodeWriteReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeWriteRegisters args (ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeWriteRegisters_def whenE_def decodeTransfer_def + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply simp + done + +lemma decodeCopyReg_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t + and (\s. \x \ set extras. s \' x + \ (\y \ zobj_refs' x. ex_nonz_cap_to' y s))\ + decodeCopyRegisters args (ThreadCap t) extras + \tcb_inv_wf'\,-" + apply (simp add: decodeCopyRegisters_def whenE_def decodeTransfer_def + cong: list.case_cong capability.case_cong bool.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (clarsimp simp: null_def neq_Nil_conv + valid_cap'_def[where c="ThreadCap t" for t]) + done + +lemma eq_ucast_word8[simp]: + "((ucast (x :: 8 word) :: machine_word) = ucast y) = (x = y)" + apply safe + apply (drule_tac f="ucast :: (machine_word \ 8 word)" in arg_cong) + apply (simp add: ucast_up_ucast_id is_up_def + source_size_def target_size_def word_size) + done + +lemma checkPrio_corres: + "corres (ser \ dc) (tcb_at auth and pspace_aligned and pspace_distinct) \ + (check_prio p auth) (checkPrio p auth)" + apply (simp add: check_prio_def checkPrio_def) + apply (rule corres_guard_imp) + apply (simp add: liftE_bindE) + apply (rule corres_split[OF threadGet_corres[where r="(=)"]]) + apply (clarsimp simp: tcb_relation_def) + apply (rule_tac rvr = dc and + R = \ and + R' = \ in + whenE_throwError_corres'[where m="returnOk ()" and m'="returnOk ()", simplified]) + apply (simp add: minPriority_def) + apply (clarsimp simp: minPriority_def) + apply (rule corres_returnOkTT) + apply (simp add: minPriority_def) + apply (wp gct_wp)+ + apply (simp add: cur_tcb_def cur_tcb'_def)+ + done + +lemma decodeSetPriority_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_priority args cap slot extras) + (decodeSetPriority args cap' extras')" + apply (cases args; cases extras; cases extras'; + clarsimp simp: decode_set_priority_def decodeSetPriority_def) + apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + by (wpsimp simp: valid_cap_def valid_cap'_def)+ + +lemma decodeSetMCPriority_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_mcpriority args cap slot extras) + (decodeSetMCPriority args cap' extras')" + apply (cases args; cases extras; cases extras'; + clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) + apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + by (wpsimp simp: valid_cap_def valid_cap'_def)+ + +lemma valid_objs'_maxPriority': + "\s t. \ valid_objs' s; tcb_at' t s \ \ obj_at' (\tcb. tcbMCP tcb \ maxPriority) t s" + apply (erule (1) valid_objs_valid_tcbE) + apply (clarsimp simp: valid_tcb'_def) + done + +lemma getMCP_sp: + "\P\ threadGet tcbMCP t \\rv. mcpriority_tcb_at' (\st. st = rv) t and P\" + apply (simp add: threadGet_def) + apply wp + apply (simp add: o_def pred_tcb_at'_def) + apply (wp getObject_tcb_wp) + apply (clarsimp simp: obj_at'_def) + done + +lemma getMCP_wp: "\\s. \mcp. mcpriority_tcb_at' ((=) mcp) t s \ P mcp s\ threadGet tcbMCP t \P\" + apply (rule hoare_post_imp) + prefer 2 + apply (rule getMCP_sp) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + done + +lemma checkPrio_wp: + "\ \s. mcpriority_tcb_at' (\mcp. prio \ ucast mcp) auth s \ P s \ + checkPrio prio auth + \ \rv. P \,-" + apply (simp add: checkPrio_def) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) + by (auto simp add: pred_tcb_at'_def obj_at'_def) + +lemma checkPrio_lt_ct: + "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. prio \ ucast mcp) auth s\, -" + by (wp checkPrio_wp) simp + +lemma checkPrio_lt_ct_weak: + "\\\ checkPrio prio auth \\rv s. mcpriority_tcb_at' (\mcp. ucast prio \ mcp) auth s\, -" + apply (rule hoare_post_imp_R) + apply (rule checkPrio_lt_ct) + apply (clarsimp simp: pred_tcb_at'_def obj_at'_def) + by (rule le_ucast_ucast_le) simp + +crunch inv: checkPrio "P" + +lemma decodeSetPriority_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetPriority args (ThreadCap t) extras \tcb_inv_wf'\,-" + unfolding decodeSetPriority_def + apply (wpsimp wp: checkPrio_lt_ct_weak | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + apply unat_arith + apply simp + done + +lemma decodeSetPriority_inv[wp]: + "\P\ decodeSetPriority args cap extras \\rv. P\" + apply (simp add: decodeSetPriority_def Let_def split del: if_split) + apply (rule hoare_pre) + apply (wp checkPrio_inv | simp add: whenE_def split del: if_split + | rule hoare_drop_imps + | wpcw)+ + done + +lemma decodeSetMCPriority_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetMCPriority args (ThreadCap t) extras \tcb_inv_wf'\,-" + unfolding decodeSetMCPriority_def Let_def + apply (rule hoare_pre) + apply (wp checkPrio_lt_ct_weak | wpc | simp | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + using max_word_max [of \UCAST(64 \ 8) x\ for x] + apply (simp add: max_word_mask numeral_eq_Suc mask_Suc) + done + +lemma decodeSetMCPriority_inv[wp]: + "\P\ decodeSetMCPriority args cap extras \\rv. P\" + apply (simp add: decodeSetMCPriority_def Let_def split del: if_split) + apply (rule hoare_pre) + apply (wp checkPrio_inv | simp add: whenE_def split del: if_split + | rule hoare_drop_imps + | wpcw)+ + done + +lemma decodeSetSchedParams_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t \ + decodeSetSchedParams args (ThreadCap t) extras + \tcb_inv_wf'\,-" + unfolding decodeSetSchedParams_def + apply (wpsimp wp: checkPrio_lt_ct_weak | wp (once) checkPrio_inv)+ + apply (clarsimp simp: maxPriority_def numPriorities_def) + using max_word_max [of \UCAST(64 \ 8) x\ for x] + apply (simp add: max_word_mask numeral_eq_Suc mask_Suc) + done + +lemma decodeSetSchedParams_corres: + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) + (cur_tcb and valid_etcbs and + (pspace_aligned and pspace_distinct and (\s. \x \ set extras. s \ (fst x)))) + (invs' and (\s. \x \ set extras'. s \' (fst x))) + (decode_set_sched_params args cap slot extras) + (decodeSetSchedParams args cap' extras')" + apply (simp add: decode_set_sched_params_def decodeSetSchedParams_def) + apply (cases "length args < 2") + apply (clarsimp split: list.split) + apply (cases "length extras < 1") + apply (clarsimp split: list.split simp: list_all2_Cons2) + apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) + apply (rule corres_split_eqrE) + apply corresKsimp + apply (rule corres_split_norE[OF checkPrio_corres]) + apply (rule corres_splitEE[OF checkPrio_corres]) + apply (rule corres_returnOkTT) + apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) + apply (wpsimp wp: check_prio_inv checkPrio_inv + simp: valid_cap_def valid_cap'_def)+ + done + +lemma checkValidIPCBuffer_corres: + "cap_relation cap cap' \ + corres (ser \ dc) \ \ + (check_valid_ipc_buffer vptr cap) + (checkValidIPCBuffer vptr cap')" + apply (simp add: check_valid_ipc_buffer_def + checkValidIPCBuffer_def + unlessE_def Let_def + split: cap_relation_split_asm arch_cap.split_asm bool.splits) + apply (simp add: capTransferDataSize_def msgMaxLength_def + msg_max_length_def msgMaxExtraCaps_def + cap_transfer_data_size_def word_size ipcBufferSizeBits_def + msgLengthBits_def msgExtraCapBits_def msg_align_bits msgAlignBits_def + msg_max_extra_caps_def is_aligned_mask whenE_def split:vmpage_size.splits) + apply (auto simp add: returnOk_def) + done + +lemma checkValidIPCBuffer_ArchObject_wp: + "\\s. isArchObjectCap cap \ is_aligned x msg_align_bits \ P s\ + checkValidIPCBuffer x cap + \\rv s. P s\,-" + apply (simp add: checkValidIPCBuffer_def + whenE_def unlessE_def + cong: capability.case_cong + arch_capability.case_cong + split del: if_split) + apply (rule hoare_pre) + apply (wp whenE_throwError_wp + | wpc | clarsimp simp: ipcBufferSizeBits_def isCap_simps is_aligned_mask msg_align_bits msgAlignBits_def)+ + done + +lemma decodeSetIPCBuffer_corres: + notes if_cong [cong] + shows + "\ cap_relation cap cap'; is_thread_cap cap; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras' \ \ + corres (ser \ tcbinv_relation) (\s. \x \ set extras. cte_at (snd x) s) + (\s. invs' s \ (\x \ set extras'. cte_at' (snd x) s)) + (decode_set_ipc_buffer args cap slot extras) + (decodeSetIPCBuffer args cap' (cte_map slot) extras')" + apply (simp add: decode_set_ipc_buffer_def decodeSetIPCBuffer_def + split del: if_split) + apply (cases args) + apply simp + apply (cases extras) + apply simp + apply (clarsimp simp: list_all2_Cons1 liftME_def[symmetric] + is_cap_simps + split del: if_split) + apply (clarsimp simp add: returnOk_def newroot_rel_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule deriveCap_corres; simp) + apply (simp add: o_def newroot_rel_def split_def dc_def[symmetric]) + apply (erule checkValidIPCBuffer_corres) + apply (wp hoareE_TrueI | simp)+ + apply fastforce + done + +lemma decodeSetIPC_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \v \ set extras. s \' fst v \ cte_at' (snd v) s)\ + decodeSetIPCBuffer args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeSetIPCBuffer_def Let_def whenE_def + split del: if_split cong: list.case_cong prod.case_cong) + apply (rule hoare_pre) + apply (wp | wpc | simp)+ + apply (rule checkValidIPCBuffer_ArchObject_wp) + apply simp + apply (wp hoare_drop_imps) + apply clarsimp + done + +lemma decodeSetIPCBuffer_is_tc[wp]: + "\\\ decodeSetIPCBuffer args cap slot extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetIPCBuffer_def Let_def + split del: if_split cong: list.case_cong prod.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (simp only: isThreadControl_def tcbinvocation.simps) + apply wp+ + apply (clarsimp simp: isThreadControl_def) + done + +lemma decodeSetPriority_is_tc[wp]: + "\\\ decodeSetPriority args cap extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetPriority_def) + apply wpsimp + apply (clarsimp simp: isThreadControl_def) + done + +lemma decodeSetMCPriority_is_tc[wp]: + "\\\ decodeSetMCPriority args cap extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetMCPriority_def) + apply wpsimp + apply (clarsimp simp: isThreadControl_def) + done + +crunch inv[wp]: decodeSetIPCBuffer "P" + (simp: crunch_simps) + +lemma slotCapLongRunningDelete_corres: + "cte_map ptr = ptr' \ + corres (=) (cte_at ptr and invs) invs' + (slot_cap_long_running_delete ptr) + (slotCapLongRunningDelete ptr')" + apply (clarsimp simp: slot_cap_long_running_delete_def + slotCapLongRunningDelete_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_cap_corres]) + apply (auto split: cap_relation_split_asm arch_cap.split_asm + intro!: corres_rel_imp [OF isFinalCapability_corres[where ptr=ptr]] + simp: liftM_def[symmetric] final_matters'_def + long_running_delete_def + longRunningDelete_def isCap_simps)[1] + apply (wp get_cap_wp getCTE_wp)+ + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of) + apply fastforce + done + +lemma slot_long_running_inv'[wp]: + "\P\ slotCapLongRunningDelete ptr \\rv. P\" + apply (simp add: slotCapLongRunningDelete_def) + apply (rule hoare_seq_ext [OF _ getCTE_inv]) + apply (rule hoare_pre, wpcw, (wp isFinalCapability_inv)+) + apply simp + done + +lemma cap_CNode_case_throw: + "(case cap of CNodeCap a b c d \ m | _ \ throw x) + = (doE unlessE (isCNodeCap cap) (throw x); m odE)" + by (cases cap, simp_all add: isCap_simps unlessE_def) + +lemma isValidVTableRoot_eq: + "cap_relation cap cap' \ isValidVTableRoot cap' = is_valid_vtable_root cap" + apply (cases cap; simp add: isValidVTableRoot_def isVTableRoot_def is_valid_vtable_root_simp) + apply (rename_tac acap, case_tac acap; simp) + apply (auto split: pt_type.splits simp: mdata_map_def) + done + +lemma decodeSetSpace_corres: + notes if_cong [cong] + shows + "\ cap_relation cap cap'; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + is_thread_cap cap \ \ + corres (ser \ tcbinv_relation) + (invs and valid_cap cap and (\s. \x \ set extras. cte_at (snd x) s)) + (invs' and valid_cap' cap' and (\s. \x \ set extras'. cte_at' (snd x) s)) + (decode_set_space args cap slot extras) + (decodeSetSpace args cap' (cte_map slot) extras')" + apply (simp add: decode_set_space_def decodeSetSpace_def + Let_def + split del: if_split) + apply (cases "3 \ length args \ 2 \ length extras'") + apply (clarsimp simp: val_le_length_Cons list_all2_Cons2 + split del: if_split) + apply (simp add: liftE_bindE liftM_def unlessE_throwError_returnOk unlessE_whenE + bindE_assoc cap_CNode_case_throw + getThreadCSpaceRoot getThreadVSpaceRoot + split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split[OF slotCapLongRunningDelete_corres]) + apply (clarsimp simp: is_cap_simps get_tcb_ctable_ptr_def cte_map_tcb_0) + apply (rule corres_split[OF slotCapLongRunningDelete_corres]) + apply (clarsimp simp: is_cap_simps get_tcb_vtable_ptr_def cte_map_tcb_1[simplified]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply simp + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_splitEE[OF deriveCap_corres]) + apply (fastforce dest: list_all2_nthD2[where p=0] simp: cap_map_update_data) + apply (fastforce dest: list_all2_nthD2[where p=0]) + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply simp + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_splitEE[OF deriveCap_corres]) + apply (clarsimp simp: cap_map_update_data) + apply simp + apply (rule corres_split_norE) + apply (rule corres_whenE) + apply (simp add: isValidVTableRoot_eq) + apply (rule corres_trivial, simp) + apply simp + apply (rule corres_trivial) + apply (clarsimp simp: returnOk_def newroot_rel_def is_cap_simps + list_all2_conv_all_nth split_def) + apply wp+ + apply ((simp only: simp_thms pred_conj_def | wp)+)[2] + apply (unfold whenE_def, wp+)[2] + apply ((simp split del: if_split | wp | rule hoare_drop_imps)+)[2] + apply (unfold whenE_def, wp+)[2] + apply simp + apply (wp hoare_drop_imps)+ + apply (clarsimp simp: get_tcb_ctable_ptr_def get_tcb_vtable_ptr_def + is_cap_simps valid_cap_def tcb_at_cte_at_0 + tcb_at_cte_at_1[simplified]) + apply fastforce + apply (frule list_all2_lengthD) + apply (clarsimp split: list.split) + done + +lemma decodeSetSpace_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \x \ set extras. s \' fst x \ cte_at' (snd x) s \ t \ snd x \ t + 32 \ snd x)\ + decodeSetSpace args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + cap_CNode_case_throw + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp + | simp add: o_def split_def + split del: if_split + | wpc + | rule hoare_drop_imps)+ + apply (clarsimp simp del: length_greater_0_conv + split del: if_split) + apply (simp del: length_greater_0_conv add: valid_updateCapDataI) + done + +lemma decodeSetSpace_inv[wp]: + "\P\ decodeSetSpace args cap slot extras \\rv. P\" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp add: o_def split_def split del: if_split + | wpcw)+ + done + +lemma decodeSetSpace_is_tc[wp]: + "\\\ decodeSetSpace args cap slot extras \\rv s. isThreadControl rv\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp only: isThreadControl_def tcbinvocation.simps + | wpcw)+ + apply simp + done + +lemma decodeSetSpace_tc_target[wp]: + "\\s. P (capTCBPtr cap)\ decodeSetSpace args cap slot extras \\rv s. P (tcThread rv)\,-" + apply (simp add: decodeSetSpace_def Let_def split_def + unlessE_def getThreadVSpaceRoot getThreadCSpaceRoot + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps + | simp only: tcbinvocation.sel + | wpcw)+ + apply simp + done + +lemma decodeSetSpace_tc_slot[wp]: + "\\s. P slot\ decodeSetSpace args cap slot extras \\rv s. P (tcThreadCapSlot rv)\,-" + apply (simp add: decodeSetSpace_def split_def unlessE_def + getThreadVSpaceRoot getThreadCSpaceRoot + cong: list.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpcw | simp only: tcbinvocation.sel)+ + apply simp + done + +lemma decodeTCBConfigure_corres: + notes if_cong [cong] option.case_cong [cong] + shows + "\ cap_relation cap cap'; list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + is_thread_cap cap \ \ + corres (ser \ tcbinv_relation) (einvs and valid_cap cap and (\s. \x \ set extras. cte_at (snd x) s)) + (invs' and valid_cap' cap' and (\s. \x \ set extras'. cte_at' (snd x) s)) + (decode_tcb_configure args cap slot extras) + (decodeTCBConfigure args cap' (cte_map slot) extras')" + apply (clarsimp simp add: decode_tcb_configure_def decodeTCBConfigure_def) + apply (cases "length args < 4") + apply (clarsimp split: list.split) + apply (cases "length extras < 3") + apply (clarsimp split: list.split simp: list_all2_Cons2) + apply (clarsimp simp: linorder_not_less val_le_length_Cons list_all2_Cons1 + priorityBits_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE) + apply (rule decodeSetIPCBuffer_corres; simp) + apply (rule corres_splitEE) + apply (rule decodeSetSpace_corres; simp) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_params" in corres_gen_asm) + apply (rule_tac F="tcb_invocation.is_ThreadControl set_space" in corres_gen_asm) + apply (rule_tac F="tcThreadCapSlot setSpace = cte_map slot" in corres_gen_asm2) + apply (rule corres_trivial) + apply (clarsimp simp: tcb_invocation.is_ThreadControl_def returnOk_def is_cap_simps) + apply (wp | simp add: invs_def valid_sched_def)+ + done + +lemma isThreadControl_def2: + "isThreadControl tinv = (\a b c d e f g h. tinv = ThreadControl a b c d e f g h)" + by (cases tinv, simp_all add: isThreadControl_def) + +lemma decodeSetSpaceSome[wp]: + "\\\ decodeSetSpace xs cap y zs + \\rv s. tcNewCRoot rv \ None\,-" + apply (simp add: decodeSetSpace_def split_def cap_CNode_case_throw + cong: list.case_cong if_cong del: not_None_eq) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpcw + | simp only: tcbinvocation.sel option.simps)+ + apply simp + done + +lemma decodeTCBConf_wf[wp]: + "\invs' and tcb_at' t and ex_nonz_cap_to' t and cte_at' slot + and (\s. \x \ set extras. s \' fst x \ cte_at' (snd x) s \ t \ snd x \ t + 2^cteSizeBits \ snd x)\ + decodeTCBConfigure args (ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (clarsimp simp add: decodeTCBConfigure_def Let_def + split del: if_split cong: list.case_cong) + apply (rule hoare_pre) + apply (wp | wpc)+ + apply (rule_tac Q'="\setSpace s. tcb_inv_wf' setSpace s \ tcb_inv_wf' setIPCParams s + \ isThreadControl setSpace \ isThreadControl setIPCParams + \ tcThread setSpace = t \ tcNewCRoot setSpace \ None" + in hoare_post_imp_R) + apply wp + apply (clarsimp simp: isThreadControl_def2 cong: option.case_cong) + apply wpsimp + apply (fastforce simp: isThreadControl_def2 objBits_defs) + done + +declare hoare_True_E_R [simp del] + +lemma lsft_real_cte: + "\valid_objs'\ lookupSlotForThread t x \\rv. real_cte_at' rv\, -" + apply (simp add: lookupSlotForThread_def) + apply (wp resolveAddressBits_real_cte_at'|simp add: split_def)+ + done + +lemma tcb_real_cte_32: + "\ real_cte_at' (t + 2^cteSizeBits) s; tcb_at' t s \ \ False" + by (clarsimp simp: obj_at'_def objBitsKO_def ps_clear_32) + +lemma decodeBindNotification_corres: +notes if_cong[cong] shows + "\ list_all2 (\x y. cap_relation (fst x) (fst y)) extras extras' \ \ + corres (ser \ tcbinv_relation) + (invs and tcb_at t and (\s. \x \ set extras. s \ (fst x))) + (invs' and tcb_at' t and (\s. \x \ set extras'. s \' (fst x))) + (decode_bind_notification (cap.ThreadCap t) extras) + (decodeBindNotification (capability.ThreadCap t) extras')" + apply (simp add: decode_bind_notification_def decodeBindNotification_def) + apply (simp add: null_def returnOk_def) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_trivial) + apply (auto simp: returnOk_def whenE_def)[1] + apply (rule_tac F="extras \ []" in corres_gen_asm) + apply (rule corres_split_eqrE) + apply simp + apply (rule getBoundNotification_corres) + apply (rule corres_split_norE) + apply (rule corres_trivial, simp split: option.splits add: returnOk_def) + apply (rule corres_splitEE_prod[where r'="\rv rv'. ((fst rv) = (fst rv')) \ ((snd rv') = (AllowRead \ (snd rv)))"]) + apply (rule corres_trivial, simp) + apply (case_tac extras, simp, clarsimp simp: list_all2_Cons1) + apply (fastforce split: cap.splits capability.splits simp: returnOk_def) + apply (rule corres_split_norE) + apply (rule corres_trivial, clarsimp simp: whenE_def returnOk_def) + apply (clarsimp split del: if_split) + apply (rule corres_splitEE[where r'=ntfn_relation]) + apply simp + apply (rule getNotification_corres) + apply (rule corres_trivial, simp split del: if_split) + apply (simp add: ntfn_relation_def + split: Structures_A.ntfn.splits Structures_H.ntfn.splits + option.splits) + apply wp+ + apply (wp | simp add: whenE_def split del: if_split)+ + apply (wp | wpc | simp)+ + apply (simp | wp gbn_wp gbn_wp')+ + apply (fastforce simp: valid_cap_def valid_cap'_def dest: hd_in_set)+ + done + +lemma decodeUnbindNotification_corres: + "corres (ser \ tcbinv_relation) + (tcb_at t and pspace_aligned and pspace_distinct) + \ + (decode_unbind_notification (cap.ThreadCap t)) + (decodeUnbindNotification (capability.ThreadCap t))" + apply (simp add: decode_unbind_notification_def decodeUnbindNotification_def) + apply (rule corres_guard_imp) + apply (rule corres_split_eqrE) + apply simp + apply (rule getBoundNotification_corres) + apply (rule corres_trivial) + apply (simp split: option.splits) + apply (simp add: returnOk_def) + apply wp+ + apply auto + done + +lemma decodeSetTLSBase_corres: + "corres (ser \ tcbinv_relation) (tcb_at t) (tcb_at' t) + (decode_set_tls_base w (cap.ThreadCap t)) + (decodeSetTLSBase w (capability.ThreadCap t))" + by (clarsimp simp: decode_set_tls_base_def decodeSetTLSBase_def returnOk_def + split: list.split) + +lemma decodeTCBInvocation_corres: + "\ c = Structures_A.ThreadCap t; cap_relation c c'; + list_all2 (\(c, sl) (c', sl'). cap_relation c c' \ sl' = cte_map sl) extras extras'; + length args < 2 ^ word_bits \ \ + corres (ser \ tcbinv_relation) (einvs and tcb_at t and (\s. \x \ set extras. s \ fst x \ cte_at (snd x) s)) + (invs' and tcb_at' t and (\s. \x \ set extras'. s \' fst x \ cte_at' (snd x) s)) + (decode_tcb_invocation label args c slot extras) + (decodeTCBInvocation label args c' (cte_map slot) extras')" + apply (rule_tac F="cap_aligned c \ capAligned c'" in corres_req) + apply (clarsimp simp: cap_aligned_def capAligned_def objBits_simps word_bits_def) + apply (drule obj_at_aligned', simp_all add: objBits_simps') + apply (clarsimp simp: decode_tcb_invocation_def + decodeTCBInvocation_def + split del: if_split split: gen_invocation_labels.split) + apply (simp add: returnOk_def) + apply (intro conjI impI + corres_guard_imp[OF decodeReadRegisters_corres] + corres_guard_imp[OF decodeWriteRegisters_corres] + corres_guard_imp[OF decodeCopyRegisters_corres] + corres_guard_imp[OF decodeTCBConfigure_corres] + corres_guard_imp[OF decodeSetPriority_corres] + corres_guard_imp[OF decodeSetMCPriority_corres] + corres_guard_imp[OF decodeSetSchedParams_corres] + corres_guard_imp[OF decodeSetIPCBuffer_corres] + corres_guard_imp[OF decodeSetSpace_corres] + corres_guard_imp[OF decodeBindNotification_corres] + corres_guard_imp[OF decodeUnbindNotification_corres] + corres_guard_imp[OF decodeSetTLSBase_corres], + simp_all add: valid_cap_simps valid_cap_simps' invs_def valid_state_def + valid_pspace_def valid_sched_def) + apply (auto simp: list_all2_map1 list_all2_map2 + elim!: list_all2_mono) + done + +crunch inv[wp]: decodeTCBInvocation P + (simp: crunch_simps) + +lemma real_cte_at_not_tcb_at': + "real_cte_at' x s \ \ tcb_at' x s" + "real_cte_at' (x + 2^cteSizeBits) s \ \ tcb_at' x s" + apply (clarsimp simp: obj_at'_def) + apply (clarsimp elim!: tcb_real_cte_32) + done + +lemma decodeBindNotification_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t + and (\s. \x \ set extras. s \' (fst x) \ (\y \ zobj_refs' (fst x). ex_nonz_cap_to' y s))\ + decodeBindNotification (capability.ThreadCap t) extras + \tcb_inv_wf'\,-" + apply (simp add: decodeBindNotification_def whenE_def + cong: list.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wp getNotification_wp getObject_tcb_wp + | wpc + | simp add: threadGet_def getBoundNotification_def)+ + apply (fastforce simp: valid_cap'_def[where c="capability.ThreadCap t"] + is_ntfn invs_def valid_state'_def valid_pspace'_def + null_def pred_tcb_at'_def obj_at'_def + dest!: global'_no_ex_cap hd_in_set) + done + +lemma decodeUnbindNotification_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeUnbindNotification (capability.ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeUnbindNotification_def) + apply (wp getObject_tcb_wp | wpc | simp add: threadGet_def getBoundNotification_def)+ + apply (auto simp: obj_at'_def pred_tcb_at'_def) + done + +lemma decodeSetTLSBase_wf: + "\invs' and tcb_at' t and ex_nonz_cap_to' t\ + decodeSetTLSBase w (capability.ThreadCap t) + \tcb_inv_wf'\,-" + apply (simp add: decodeSetTLSBase_def + cong: list.case_cong) + by wpsimp + +lemma decodeTCBInv_wf: + "\invs' and tcb_at' t and cte_at' slot and ex_nonz_cap_to' t + and (\s. \x \ set extras. real_cte_at' (snd x) s + \ s \' fst x \ (\y \ zobj_refs' (fst x). ex_nonz_cap_to' y s))\ + decodeTCBInvocation label args (capability.ThreadCap t) slot extras + \tcb_inv_wf'\,-" + apply (simp add: decodeTCBInvocation_def Let_def + cong: if_cong gen_invocation_labels.case_cong split del: if_split) + apply (rule hoare_pre) + apply (wpc, (wp decodeTCBConf_wf decodeReadReg_wf decodeWriteReg_wf decodeSetTLSBase_wf + decodeCopyReg_wf decodeBindNotification_wf decodeUnbindNotification_wf)+) + apply (clarsimp simp: real_cte_at') + apply (fastforce simp: real_cte_at_not_tcb_at' objBits_defs) + done + +lemma restart_makes_simple': + "\st_tcb_at' simple' t\ + restart t' + \\rv. st_tcb_at' simple' t\" + apply (simp add: restart_def) + apply (wp sts_st_tcb_at'_cases cancelIPC_simple + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ + apply (rule hoare_strengthen_post [OF isStopped_inv]) + prefer 2 + apply assumption + apply clarsimp + done + +lemma setPriority_st_tcb_at'[wp]: + "\st_tcb_at' P t\ setPriority t' p \\rv. st_tcb_at' P t\" + apply (simp add: setPriority_def) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma setMCPriority_st_tcb_at'[wp]: + "\st_tcb_at' P t\ setMCPriority t' p \\rv. st_tcb_at' P t\" + apply (simp add: setMCPriority_def) + apply (wp threadSet_pred_tcb_no_state | simp)+ + done + +lemma cteDelete_makes_simple': + "\st_tcb_at' simple' t\ cteDelete slot v \\rv. st_tcb_at' simple' t\" + by (wp cteDelete_st_tcb_at' | simp)+ + +crunches getThreadBufferSlot, setPriority, setMCPriority + for irq_states'[wp]: valid_irq_states' + (simp: crunch_simps) + +lemma inv_tcb_IRQInactive: + "\valid_irq_states'\ invokeTCB tcb_inv + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + including no_pre + apply (simp add: invokeTCB_def) + apply (rule hoare_pre) + apply (wpc | + wp withoutPreemption_R cteDelete_IRQInactive checkCap_inv + hoare_vcg_const_imp_lift_R cteDelete_irq_states' + hoare_vcg_const_imp_lift | + simp add: split_def)+ + done + +end + +end diff --git a/proof/refine/AARCH64/Untyped_R.thy b/proof/refine/AARCH64/Untyped_R.thy new file mode 100644 index 0000000000..091f209b24 --- /dev/null +++ b/proof/refine/AARCH64/Untyped_R.thy @@ -0,0 +1,5633 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Proofs about untyped invocations. *) + +theory Untyped_R +imports Detype_R Invocations_R InterruptAcc_R +begin + +unbundle l4v_word_context + +context begin interpretation Arch . (*FIXME: arch_split*) + +primrec + untypinv_relation :: "Invocations_A.untyped_invocation \ + Invocations_H.untyped_invocation \ bool" +where + "untypinv_relation + (Invocations_A.Retype c reset ob n ao n2 cl d) x = (\ao'. x = + (Invocations_H.Retype (cte_map c) reset ob n ao' n2 + (map cte_map cl) d) + \ ao = APIType_map2 (Inr ao'))" + +primrec + valid_untyped_inv_wcap' :: "Invocations_H.untyped_invocation + \ capability option \ kernel_state \ bool" +where + "valid_untyped_inv_wcap' (Invocations_H.Retype slot reset ptr_base ptr ty us slots d) + = (\co s. \sz idx. (cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr_base sz idx + \ (co = None \ co = Some (cteCap cte))) slot s + \ range_cover ptr sz (APIType_capBits ty us) (length slots) + \ ((\ reset \ idx \ unat (ptr - ptr_base)) \ (reset \ ptr = ptr_base)) + \ (ptr && ~~ mask sz) = ptr_base) + \ (reset \ descendants_of' slot (ctes_of s) = {}) + \ distinct (slot # slots) + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ (ty = APIObjectType ArchTypes_H.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) + \ (\slot \ set slots. cte_wp_at' (\c. cteCap c = NullCap) slot s) + \ (\slot \ set slots. ex_cte_cap_to' slot s) + \ sch_act_simple s \ 0 < length slots + \ (d \ ty = APIObjectType ArchTypes_H.Untyped \ isFrameType ty) + \ APIType_capBits ty us \ maxUntypedSizeBits)" + +abbreviation + "valid_untyped_inv' ui \ valid_untyped_inv_wcap' ui None" + +lemma valid_untyped_inv_wcap': + "valid_untyped_inv' ui + = (\s. \sz idx. valid_untyped_inv_wcap' ui + (Some (case ui of Invocations_H.Retype slot reset ptr_base ptr ty us slots d + \ UntypedCap d (ptr && ~~ mask sz) sz idx)) s)" + by (cases ui, auto simp: fun_eq_iff cte_wp_at_ctes_of) + +lemma whenE_rangeCheck_eq: + "(rangeCheck (x :: 'a :: {linorder, integral}) y z) = + (whenE (x < fromIntegral y \ fromIntegral z < x) + (throwError (RangeError (fromIntegral y) (fromIntegral z))))" + by (simp add: rangeCheck_def unlessE_whenE linorder_not_le[symmetric]) + +lemma APIType_map2_CapTable[simp]: + "(APIType_map2 ty = Structures_A.CapTableObject) + = (ty = Inr (APIObjectType ArchTypes_H.CapTableObject))" + by (simp add: APIType_map2_def + split: sum.split AARCH64_H.object_type.split + apiobject_type.split + kernel_object.split arch_kernel_object.splits) + +lemma alignUp_H[simp]: + "Untyped_H.alignUp = More_Word_Operations.alignUp" + apply (rule ext)+ + apply (clarsimp simp:Untyped_H.alignUp_def More_Word_Operations.alignUp_def mask_def) + done + +(* FIXME: MOVE *) +lemma corres_check_no_children: + "corres (\x y. x = y) (cte_at slot) + (pspace_aligned' and pspace_distinct' and valid_mdb' and + cte_wp_at' (\_. True) (cte_map slot)) + (const_on_failure x + (doE z \ ensure_no_children slot; + returnOk y + odE)) + (constOnFailure x + (doE z \ ensureNoChildren (cte_map slot); + returnOk y + odE))" + apply (clarsimp simp:const_on_failure_def constOnFailure_def) + apply (rule corres_guard_imp) + apply (rule corres_split_catch[where E = dc and E'=dc]) + apply (rule corres_guard_imp[OF corres_splitEE]) + apply (rule ensureNoChildren_corres) + apply simp + apply (rule corres_returnOkTT) + apply simp + apply wp+ + apply simp+ + apply (clarsimp simp:dc_def,wp)+ + apply simp + apply simp + done + +lemma mapM_x_stateAssert: + "mapM_x (\x. stateAssert (f x) (ss x)) xs + = stateAssert (\s. \x \ set xs. f x s) []" + apply (induct xs) + apply (simp add: mapM_x_Nil) + apply (simp add: mapM_x_Cons) + apply (simp add: fun_eq_iff stateAssert_def bind_assoc exec_get assert_def) + done + +lemma mapM_locate_eq: + "isCNodeCap cap + \ mapM (\x. locateSlotCap cap x) xs + = (do stateAssert (\s. case gsCNodes s (capUntypedPtr cap) of None \ xs = [] | Some n + \ \x \ set xs. n = capCNodeBits cap \ x < 2 ^ n) []; + return (map (\x. (capCNodePtr cap) + 2 ^ cte_level_bits * x) xs) od)" + apply (clarsimp simp: isCap_simps) + apply (simp add: locateSlot_conv objBits_simps cte_level_bits_def + liftM_def[symmetric] mapM_liftM_const isCap_simps) + apply (simp add: liftM_def mapM_discarded mapM_x_stateAssert) + apply (intro bind_cong refl arg_cong2[where f=stateAssert] ext) + apply (simp add: isCap_simps split: option.split) + done + +lemmas is_frame_type_defs = is_frame_type_def isFrameType_def arch_is_frame_type_def + +lemma is_frame_type_isFrameType_eq[simp]: + "(is_frame_type (APIType_map2 (Inr (toEnum (unat arg0))))) = + (Types_H.isFrameType (toEnum (unat arg0)))" + by (simp add: APIType_map2_def is_frame_type_defs split: apiobject_type.splits object_type.splits)+ + +(* FIXME: remove *) +lemmas APIType_capBits = objSize_eq_capBits + +(* FIXME: move *) +lemma corres_whenE_throw_merge: + "corres r P P' f (doE _ \ whenE (A \ B) (throwError e); h odE) + \ corres r P P' f (doE _ \ whenE A (throwError e); _ \ whenE B (throwError e); h odE)" + by (auto simp: whenE_def split: if_splits) + +lemma decodeUntypedInvocation_corres: + assumes cap_rel: "list_all2 cap_relation cs cs'" + shows "corres + (ser \ untypinv_relation) + (invs and cte_wp_at ((=) (cap.UntypedCap d w n idx)) slot and (\s. \x \ set cs. s \ x)) + (invs' + and (\s. \x \ set cs'. s \' x)) + (decode_untyped_invocation label args slot (cap.UntypedCap d w n idx) cs) + (decodeUntypedInvocation label args (cte_map slot) + (capability.UntypedCap d w n idx) cs')" +proof (cases "6 \ length args \ cs \ [] + \ gen_invocation_type label = UntypedRetype") + case False + show ?thesis using False cap_rel + apply (clarsimp simp: decode_untyped_invocation_def + decodeUntypedInvocation_def + whenE_whenE_body unlessE_whenE + split del: if_split cong: list.case_cong) + apply (auto split: list.split) + done +next + case True + have val_le_length_Cons: (* clagged from Tcb_R *) + "\n xs. n \ 0 \ (n \ length xs) = (\y ys. xs = y # ys \ (n - 1) \ length ys)" + apply (case_tac xs, simp_all) + apply (case_tac n, simp_all) + done + + obtain arg0 arg1 arg2 arg3 arg4 arg5 argsmore cap cap' csmore csmore' + where args: "args = arg0 # arg1 # arg2 # arg3 # arg4 # arg5 # argsmore" + and cs: "cs = cap # csmore" + and cs': "cs' = cap' # csmore'" + and crel: "cap_relation cap cap'" + using True cap_rel + by (clarsimp simp: neq_Nil_conv list_all2_Cons1 val_le_length_Cons) + + have il: "gen_invocation_type label = UntypedRetype" + using True by simp + + have word_unat_power2: + "\bits. \ bits < 64 \ bits < word_bits \ \ unat (2 ^ bits :: machine_word) = 2 ^ bits" + by (simp add: word_bits_def) + + have P: "\P. corres (ser \ dc) \ \ + (whenE P (throwError ExceptionTypes_A.syscall_error.TruncatedMessage)) + (whenE P (throwError Fault_H.syscall_error.TruncatedMessage))" + by (simp add: whenE_def returnOk_def) + have Q: "\v. corres (ser \ (\a b. APIType_map2 (Inr (toEnum (unat v))) = a)) \ \ + (data_to_obj_type v) + (whenE (fromEnum (maxBound :: AARCH64_H.object_type) < unat v) + (throwError (Fault_H.syscall_error.InvalidArgument 0)))" + apply (simp only: data_to_obj_type_def returnOk_bindE fun_app_def) + apply (simp add: maxBound_def enum_apiobject_type + fromEnum_def whenE_def) + apply (simp add: returnOk_def APIType_map2_def toEnum_def + enum_apiobject_type enum_object_type) + apply (intro conjI impI) + apply (subgoal_tac "unat v - 5 > 5") + apply (simp add: arch_data_to_obj_type_def) + apply simp + apply (subgoal_tac "\n. unat v = n + 5") + apply (clarsimp simp: arch_data_to_obj_type_def returnOk_def) + apply (rule_tac x="unat v - 5" in exI) + apply arith + done + have S: "\x (y :: ('g :: len) word) (z :: 'g word) bits. \ bits < len_of TYPE('g); x < 2 ^ bits \ \ toEnum x = (of_nat x :: 'g word)" + apply (rule toEnum_of_nat) + apply (erule order_less_trans) + apply simp + done + obtain xs where xs: "xs = [unat arg4..ref bits. + \ is_aligned ref bits; + Suc (unat arg4 + unat arg5 - Suc 0) \ 2 ^ bits; + bits < 64; 1 \ arg4 + arg5; + arg4 \ arg4 + arg5 \ \ + (map (\x. ref + 2 ^ cte_level_bits * x) [arg4 .e. arg4 + arg5 - 1]) + = map cte_map + (map (Pair ref) + (map (nat_to_cref bits) xs))" + apply (subgoal_tac "Suc (unat (arg4 + arg5 - 1)) = unat arg4 + unat arg5") + apply (simp add: upto_enum_def xs del: upt.simps) + apply (clarsimp simp: cte_map_def) + apply (subst of_bl_nat_to_cref) + apply simp + apply (simp add: word_bits_def) + apply (subst S) + apply simp + apply simp + apply (simp add: cte_level_bits_def shiftl_t2n) + apply unat_arith + done + have another: + "\bits a. \ (a::machine_word) \ 2 ^ bits; bits < word_bits\ + \ 2 ^ bits - a = of_nat (2 ^ bits - unat a)" + apply (subst of_nat_diff) + apply (subst (asm) word_le_nat_alt) + apply (simp add: word_unat_power2) + apply simp + done + have ty_size: + "\x y. (obj_bits_api (APIType_map2 (Inr x)) y) = (Types_H.getObjectSize x y)" + apply (clarsimp simp:obj_bits_api_def APIType_map2_def getObjectSize_def simp del: APIType_capBits) + apply (case_tac x) + apply (simp_all add:arch_kobj_size_def default_arch_object_def pageBits_def ptBits_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add: apiGetObjectSize_def tcbBlockSizeBits_def epSizeBits_def + ntfnSizeBits_def slot_bits_def cteSizeBits_def bit_simps) + done + obtain if_res where if_res_def: "\reset. if_res reset = (if reset then 0 else idx)" + by auto + have if_res_2n: + "\d res. (\s. s \ cap.UntypedCap d w n idx) \ if_res res \ 2 ^ n" + by (simp add: if_res_def valid_cap_def) + + note word_unat_power [symmetric, simp del] + show ?thesis + apply (rule corres_name_pre) + apply clarsimp + apply (subgoal_tac "cte_wp_at' (\cte. cteCap cte = (capability.UntypedCap d w n idx)) (cte_map slot) s'") + prefer 2 + apply (drule state_relation_pspace_relation) + apply (case_tac slot) + apply simp + apply (drule(1) pspace_relation_cte_wp_at) + apply fastforce+ + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (frule caps_of_state_valid_cap[unfolded valid_cap_def]) + apply fastforce + apply (clarsimp simp:cap_aligned_def) +(* ugh yuck. who likes a word proof? furthermore, some more restriction of + the returnOk_bindE stuff needs to be done in order to give you a single + target to do the word proof against or else it needs repeating. ugh. + maybe could seperate out the equality Isar-style? *) + apply (simp add: decodeUntypedInvocation_def decode_untyped_invocation_def + args cs cs' xs[symmetric] il whenE_rangeCheck_eq + cap_case_CNodeCap unlessE_whenE case_bool_If lookupTargetSlot_def + untypedBits_defs untyped_min_bits_def untyped_max_bits_def + del: upt.simps + split del: if_split + cong: if_cong list.case_cong) + apply (rule corres_guard_imp) + apply (rule corres_splitEE[OF Q]) + apply (rule corres_whenE_throw_merge) + apply (rule whenE_throwError_corres) + apply (simp add: word_bits_def word_size) + apply (clarsimp simp: word_size word_bits_def fromIntegral_def ty_size + toInteger_nat fromInteger_nat wordBits_def) + apply (simp add: not_le) + apply (rule whenE_throwError_corres, simp) + apply (clarsimp simp: fromAPIType_def) + apply (rule whenE_throwError_corres, simp) + apply (clarsimp simp: fromAPIType_def) + apply (rule_tac r' = "\cap cap'. cap_relation cap cap'" + in corres_splitEE[OF corres_if]) + apply simp + apply (rule corres_returnOkTT) + apply (rule crel) + apply simp + apply (rule corres_splitEE[OF lookupSlotForCNodeOp_corres]) + apply (rule crel) + apply simp + apply simp + apply (rule getSlotCap_corres,simp) + apply wp+ + apply (rule_tac corres_split_norE) + apply (rule corres_if) + apply simp + apply (rule corres_returnOkTT,clarsimp) + apply (rule corres_trivial) + apply (clarsimp simp: fromAPIType_def lookup_failure_map_def) + apply (rule_tac F="is_cnode_cap rva \ cap_aligned rva" in corres_gen_asm) + apply (subgoal_tac "is_aligned (obj_ref_of rva) (bits_of rva) \ bits_of rva < 64") + prefer 2 + apply (clarsimp simp: is_cap_simps bits_of_def cap_aligned_def word_bits_def + is_aligned_weaken) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (simp add: unat_arith_simps(2) unat_2p_sub_1 word_bits_def) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (simp add: unat_eq_0 word_less_nat_alt) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:Kernel_Config.retypeFanOutLimit_def is_cap_simps bits_of_def)+ + apply (clarsimp simp:toInteger_word unat_arith_simps(2) cap_aligned_def) + apply (subst unat_sub) + apply (simp add: linorder_not_less word_le_nat_alt) + apply (fold neq0_conv) + apply (simp add: unat_eq_0 cap_aligned_def) + apply (clarsimp simp:fromAPIType_def) + apply (clarsimp simp:liftE_bindE mapM_locate_eq) + apply (subgoal_tac "unat (arg4 + arg5) = unat arg4 + unat arg5") + prefer 2 + apply (clarsimp simp:not_less) + apply (subst unat_word_ariths(1)) + apply (rule mod_less) + apply (unfold word_bits_len_of)[1] + apply (subgoal_tac "2 ^ bits_of rva < (2 :: nat) ^ word_bits") + apply arith + apply (rule power_strict_increasing, simp add: word_bits_conv) + apply simp + apply (rule_tac P'="valid_cap rva" in corres_stateAssert_implied) + apply (frule_tac bits2 = "bits_of rva" in YUCK) + apply (simp) + apply (simp add: word_bits_conv) + apply (simp add: word_le_nat_alt) + apply (simp add: word_le_nat_alt) + apply (simp add:liftE_bindE[symmetric] free_index_of_def) + apply (rule corres_split_norE) + apply (clarsimp simp:is_cap_simps simp del:ser_def) + apply (simp add: mapME_x_map_simp del: ser_def) + apply (rule_tac P = "valid_cap (cap.CNodeCap r bits g) and invs" in corres_guard_imp [where P' = invs']) + apply (rule mapME_x_corres_inv [OF _ _ _ refl]) + apply (simp del: ser_def) + apply (rule ensureEmptySlot_corres) + apply (clarsimp simp: is_cap_simps) + apply (simp, wp) + apply (simp, wp) + apply clarsimp + apply (clarsimp simp add: xs is_cap_simps bits_of_def valid_cap_def) + apply (erule cap_table_at_cte_at) + apply (simp add: nat_to_cref_def word_bits_conv) + apply simp + apply (subst liftE_bindE)+ + apply (rule corres_split_eqr[OF corres_check_no_children]) + apply (simp only: free_index_of_def cap.simps if_res_def[symmetric]) + apply (rule_tac F="if_res reset \ 2 ^ n" in corres_gen_asm) + apply (rule whenE_throwError_corres) + apply (clarsimp simp:shiftL_nat word_less_nat_alt shiftr_div_2n' + split del: if_split)+ + apply (simp add: word_of_nat_le another) + apply (drule_tac x = "if_res reset" in unat_of_nat64[OF le_less_trans]) + apply (simp add:ty_size shiftR_nat)+ + apply (simp add:unat_of_nat64 le_less_trans[OF div_le_dividend] + le_less_trans[OF diff_le_self]) + apply (rule whenE_throwError_corres) + apply (clarsimp) + apply (clarsimp simp: fromAPIType_def) + apply (rule corres_returnOkTT) + apply (clarsimp simp:ty_size getFreeRef_def get_free_ref_def is_cap_simps) + apply simp + apply (strengthen if_res_2n, wp) + apply simp + apply wp + apply (wp mapME_x_inv_wp + validE_R_validE[OF valid_validE_R[OF ensure_empty_inv]] + validE_R_validE[OF valid_validE_R[OF ensureEmpty_inv]])+ + apply (clarsimp simp: is_cap_simps valid_cap_simps + cap_table_at_gsCNodes bits_of_def + linorder_not_less) + apply (erule order_le_less_trans) + apply (rule word_leq_le_minus_one) + apply (simp add: word_le_nat_alt) + apply (simp add: unat_arith_simps) + apply wpsimp+ + apply (rule hoare_strengthen_post [where Q = "\r. invs and valid_cap r and cte_at slot"]) + apply wp+ + apply (clarsimp simp: is_cap_simps bits_of_def cap_aligned_def + valid_cap_def word_bits_def) + apply (frule caps_of_state_valid_cap, clarsimp+) + apply (strengthen refl exI[mk_strg I E] exI[where x=d])+ + apply simp + apply wp+ + apply (rule hoare_strengthen_post [where Q = "\r. invs' and cte_at' (cte_map slot)"]) + apply wp+ + apply (clarsimp simp:invs_pspace_aligned' invs_pspace_distinct') + apply (wp whenE_throwError_wp | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' + cte_wp_at_caps_of_state cte_wp_at_ctes_of ) + apply (clarsimp simp: invs_valid_objs invs_psp_aligned) + apply (frule caps_of_state_valid_cap, clarsimp+) + apply (strengthen refl[where t=True] refl exI[mk_strg I E] exI[where x=d])+ + apply (clarsimp simp: is_cap_simps valid_cap_def bits_of_def cap_aligned_def + cte_level_bits_def word_bits_conv) + apply (clarsimp simp: invs_valid_objs' invs_pspace_aligned' invs_pspace_distinct' + cte_wp_at_caps_of_state cte_wp_at_ctes_of ) + done +qed + +lemma decodeUntyped_inv[wp]: + "\P\ decodeUntypedInvocation label args slot (UntypedCap d w n idx) cs \\rv. P\" + apply (simp add: decodeUntypedInvocation_def whenE_def + split_def unlessE_def Let_def + split del: if_split cong: if_cong list.case_cong) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp hoare_drop_imps constOnFailure_wp + mapM_wp' + | wpcw + | simp add: lookupTargetSlot_def locateSlot_conv)+ + done + +declare inj_Pair[simp] + +declare upt_Suc[simp del] + +lemma descendants_of_cte_at': + "\p \ descendants_of' x (ctes_of s); valid_mdb' s\ \ cte_wp_at' (\_. True) p s" + by (clarsimp simp: descendants_of'_def cte_wp_at_ctes_of dest!: subtree_target_Some) + +lemma ctes_of_ko: + "valid_cap' cap s \ + isUntypedCap cap \ + (\ptr\capRange cap. \optr ko. ksPSpace s optr = Some ko \ ptr \ obj_range' optr ko)" + apply (case_tac cap; simp add: isCap_simps capRange_def) + \ \TCB case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def obj_range'_def mask_def add_diff_eq + dest!: projectKO_opt_tcbD simp: objBitsKO_def) + \ \NTFN case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def mask_def add_diff_eq obj_range'_def objBitsKO_def) + \ \EP case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: objBits_def mask_def add_diff_eq obj_range'_def objBitsKO_def) + \ \Zombie case\ + apply (rename_tac word zombie_type nat) + apply (case_tac zombie_type) + apply (clarsimp simp: valid_cap'_def obj_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: mask_def add_ac objBits_simps' obj_range'_def dest!: projectKO_opt_tcbD) + apply (clarsimp simp: valid_cap'_def obj_at'_def capAligned_def objBits_simps') + apply (frule_tac ptr=ptr and sz=cte_level_bits + in nasty_range [where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: cte_level_bits_def)+ + apply clarsimp + apply (drule_tac x=idx in spec) + apply (clarsimp simp: less_mask_eq) + apply (fastforce simp: obj_range'_def objBits_simps' mask_def field_simps) + \ \Arch cases\ + apply (rename_tac arch_capability) + apply (case_tac arch_capability) + \ \ASID control\ + apply clarsimp + \ \ASIDPool\ + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def typ_at'_def ko_wp_at'_def) + apply (intro exI conjI, assumption) + apply (clarsimp simp: obj_range'_def archObjSize_def objBitsKO_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; + simp add: archObjSize_def asid_low_bits_def bit_simps mask_def add_ac) + \ \Frame case\ + apply (rename_tac word vmrights vmpage_size option) + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def typ_at'_def + ko_wp_at'_def capAligned_def) + apply (frule_tac ptr = ptr and sz = "pageBits" in + nasty_range[where 'a=machine_word_len, folded word_bits_def, rotated]) + apply simp + apply (simp add: pbfs_atleast_pageBits)+ + apply (clarsimp simp: frame_at'_def) + apply (drule_tac x = idx in spec, clarsimp simp: typ_at'_def ko_wp_at'_def) + apply (intro exI conjI,assumption) + apply (clarsimp simp: obj_range'_def shiftl_t2n mask_def add_diff_eq) + apply (case_tac ko, simp_all split: if_splits, + (simp add: objBitsKO_def archObjSize_def field_simps shiftl_t2n)+)[1] + \ \PT case\ + apply (rename_tac word pt_t option) + apply (clarsimp simp: valid_cap'_def valid_acap'_def valid_arch_cap_ref'_def obj_at'_def + page_table_at'_def typ_at'_def ko_wp_at'_def) + apply (cut_tac ptr=ptr and bz="ptBits pt_t" and word=word and sz=pte_bits in + nasty_range[where 'a=machine_word_len]; simp?) + apply (simp add: pt_bits_def) + apply clarsimp + apply (drule_tac x="ucast idx" in spec) + apply (clarsimp simp: pt_bits_def table_size_def le_mask_iff_lt_2n[THEN iffD1]) + apply (intro exI conjI,assumption) + apply (clarsimp simp: obj_range'_def) + apply (case_tac ko; simp) + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp) + apply (simp add: objBitsKO_def archObjSize_def bit_simps mask_def ucast_ucast_len field_simps + shiftl_t2n) + \ \VCPU case\ + apply (clarsimp simp: valid_cap'_def typ_at'_def ko_wp_at'_def objBits_simps) + apply (intro exI conjI, assumption) + apply (clarsimp simp: obj_range'_def archObjSize_def objBitsKO_def) + apply (case_tac ko, simp+)[1] + apply (rename_tac arch_kernel_object) + apply (case_tac arch_kernel_object; simp add: archObjSize_def bit_simps mask_def add_ac) + \ \CNode case\ + apply (clarsimp simp: valid_cap'_def obj_at'_def capAligned_def objBits_simps) + apply (frule_tac ptr=ptr and sz=cte_level_bits + in nasty_range [where 'a=machine_word_len, folded word_bits_def]) + apply (simp add: cte_level_bits_def objBits_defs)+ + apply clarsimp + apply (drule_tac x=idx in spec) + apply (clarsimp simp: less_mask_eq) + apply (fastforce simp: obj_range'_def mask_def objBits_simps' field_simps)[1] + done + +lemma untypedCap_descendants_range': + "\valid_pspace' s; ctes_of s p = Some cte; + isUntypedCap (cteCap cte); valid_mdb' s; + q \ descendants_of' p (ctes_of s) \ + \ cte_wp_at' (\c. (capRange (cteCap c) \ + usableUntypedRange (cteCap cte) = {})) q s" + apply (clarsimp simp: valid_pspace'_def) + apply (frule(1) descendants_of_cte_at') + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (clarsimp simp:valid_mdb'_def) + apply (frule valid_mdb_no_loops) + apply (case_tac "isUntypedCap (cteCap ctea)") + apply (case_tac ctea) + apply (rename_tac cap node) + apply (case_tac cte) + apply (rename_tac cap' node') + apply clarsimp + apply (frule(1) valid_capAligned[OF ctes_of_valid_cap']) + apply (frule_tac c = cap in valid_capAligned[OF ctes_of_valid_cap']) + apply (simp add:untypedCapRange)+ + apply (frule_tac c = cap' in aligned_untypedRange_non_empty) + apply simp + apply (frule_tac c = cap in aligned_untypedRange_non_empty) + apply simp + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def) + apply (drule untyped_incD', simp+) + apply clarify + apply (erule subset_splitE) + apply simp + apply (thin_tac "P \ Q" for P Q)+ + apply (elim conjE) + apply (simp add:descendants_of'_def) + apply (drule(1) subtree_trans) + apply (simp add:no_loops_no_subtree) + apply simp + apply (clarsimp simp:descendants_of'_def | erule disjE)+ + apply (drule(1) subtree_trans) + apply (simp add:no_loops_no_subtree)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (erule(1) disjoint_subset2[OF usableRange_subseteq]) + apply (simp add:Int_ac) + apply (case_tac ctea) + apply (rename_tac cap node) + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap')+ + apply (frule_tac cap = cap in ctes_of_ko; assumption?) + apply (elim disjE) + apply clarsimp+ + apply (thin_tac "s \' cap") + apply (clarsimp simp: valid_cap'_def isCap_simps valid_untyped'_def + simp del: usableUntypedRange.simps untypedRange.simps) + apply (thin_tac "\x y z. P x y z" for P) + apply (rule ccontr) + apply (clarsimp dest!: int_not_emptyD + simp del: usableUntypedRange.simps untypedRange.simps) + apply (drule(1) bspec) + apply (clarsimp simp: ko_wp_at'_def simp del: usableUntypedRange.simps untypedRange.simps) + apply (drule_tac x = optr in spec) + apply (clarsimp simp: ko_wp_at'_def simp del: usableUntypedRange.simps untypedRange.simps) + apply (frule(1) pspace_alignedD') + apply (frule(1) pspace_distinctD') + apply (erule(1) impE) + apply (clarsimp simp del: usableUntypedRange.simps untypedRange.simps) + apply blast + done + +lemma cte_wp_at_caps_descendants_range_inI': + "\invs' s; cte_wp_at' (\c. cteCap c = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ descendants_range_in' {ptr .. (ptr && ~~ mask sz) + mask sz} + cref (ctes_of s)" + apply (frule invs_mdb') + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp: descendants_range_in'_def cte_wp_at_ctes_of) + apply (drule untypedCap_descendants_range'[rotated]) + apply (simp add: isCap_simps)+ + apply (simp add: invs_valid_pspace') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule disjoint_subset2[rotated]) + apply clarsimp + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + done + +lemma checkFreeIndex_wp: + "\\s. if descendants_of' slot (ctes_of s) = {} then Q y s else Q x s\ + constOnFailure x (doE z \ ensureNoChildren slot; returnOk y odE) + \Q\" + apply (clarsimp simp:constOnFailure_def const_def) + apply (wp ensureNoChildren_wp) + apply simp + done + +declare upt_Suc[simp] + +lemma ensureNoChildren_sp: + "\P\ ensureNoChildren sl \\rv s. P s \ descendants_of' sl (ctes_of s) = {}\,-" + by (wp ensureNoChildren_wp, simp) + +lemma dui_sp_helper': + "\P\ if Q then returnOk root_cap + else doE slot \ + lookupTargetSlot root_cap cref dpth; + liftE (getSlotCap slot) + odE \\rv s. (rv = root_cap \ (\slot. cte_wp_at' ((=) rv o cteCap) slot s)) \ P s\, -" + apply (cases Q, simp_all add: lookupTargetSlot_def) + apply (wp, simp) + apply (simp add: getSlotCap_def split_def) + apply wp + apply (rule hoare_strengthen_post [OF getCTE_sp[where P=P]]) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (elim allE, drule(1) mp) + apply simp + apply wpsimp + apply simp + done + +lemma map_ensure_empty': + "\\s. (\slot \ set slots. cte_wp_at' (\cte. cteCap cte = NullCap) slot s) \ P s\ + mapME_x ensureEmptySlot slots + \\rv s. P s \,-" + apply (induct slots arbitrary: P) + apply (simp add: mapME_x_def sequenceE_x_def) + apply wp + apply (simp add: mapME_x_def sequenceE_x_def) + apply (rule_tac Q="\rv s. (\slot\set slots. cte_wp_at' (\cte. cteCap cte = NullCap) slot s) \ P s" + in validE_R_sp) + apply (simp add: ensureEmptySlot_def unlessE_def) + apply (wp getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (erule meta_allE) + apply (erule hoare_post_imp_R) + apply clarsimp + done + +lemma irq_nodes_global: + "irq_node' s + (ucast (irq :: irq) << cteSizeBits) \ global_refs' s" + by (simp add: global_refs'_def) + +lemma valid_global_refsD2': + "\ctes_of s p = Some cte; valid_global_refs' s\ \ global_refs' s \ capRange (cteCap cte) = {}" + by (blast dest: valid_global_refsD') + +lemma cte_cap_in_untyped_range: + "\ ptr \ x; x \ ptr + mask bits; cte_wp_at' (\cte. cteCap cte = UntypedCap d ptr bits idx) cref s; + descendants_of' cref (ctes_of s) = {}; invs' s; + ex_cte_cap_to' x s; valid_global_refs' s \ \ False" + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (case_tac ctea, simp) + apply (rename_tac cap node) + apply (frule ctes_of_valid_cap', clarsimp) + apply (case_tac "\irq. cap = IRQHandlerCap irq") + apply (drule (1) equals0D[where a=x, OF valid_global_refsD2'[where p=cref]]) + apply (clarsimp simp: irq_nodes_global add_mask_fold) + apply (frule_tac p=crefa and p'=cref in caps_containedD', assumption) + apply (clarsimp dest!: isCapDs) + apply (rule_tac x=x in notemptyI) + apply (simp add: subsetD[OF cte_refs_capRange] add_mask_fold) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + apply (frule_tac p=cref and p'=crefa in untyped_mdbD', assumption) + apply (simp_all add: isUntypedCap_def add_mask_fold) + apply (frule valid_capAligned) + apply (frule capAligned_capUntypedPtr) + apply (case_tac cap; simp) + apply blast + apply (case_tac cap; simp) + apply (clarsimp simp: invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def) + done + +lemma cap_case_CNodeCap_True_throw: + "(case cap of CNodeCap a b c d \ returnOk () + | _ \ throw $ e) + = (whenE (\isCNodeCap cap) (throwError e))" + by (simp split: capability.split bool.split + add: whenE_def isCNodeCap_def) + +lemma empty_descendants_range_in': + "\descendants_of' slot m = {}\ \ descendants_range_in' S slot m " + by (clarsimp simp:descendants_range_in'_def) + +lemma liftE_validE_R: + "\P\ f \Q\ \ \P\ liftE f \Q\,-" + by wpsimp + +lemma decodeUntyped_wf[wp]: + "\invs' and cte_wp_at' (\cte. cteCap cte = UntypedCap d w sz idx) slot + and sch_act_simple + and (\s. \x \ set cs. s \' x) + and (\s. \x \ set cs. \r \ cte_refs' x (irq_node' s). ex_cte_cap_to' r s)\ + decodeUntypedInvocation label args slot + (UntypedCap d w sz idx) cs + \valid_untyped_inv'\,-" + unfolding decodeUntypedInvocation_def + apply (simp add: unlessE_def[symmetric] unlessE_whenE rangeCheck_def whenE_def[symmetric] + returnOk_liftE[symmetric] Let_def cap_case_CNodeCap_True_throw + split del: if_split cong: if_cong list.case_cong) + apply (rule list_case_throw_validE_R) + apply (clarsimp split del: if_split split: list.splits) + apply (intro conjI impI allI) + apply (wp+)[6] + apply (clarsimp split del: if_split) + apply (rename_tac ty us nodeIndexW nodeDepthW nodeOffset nodeWindow rootCap cs' xs') + apply (rule validE_R_sp[OF map_ensure_empty'] validE_R_sp[OF whenE_throwError_sp] + validE_R_sp[OF dui_sp_helper'])+ + apply (case_tac "\ isCNodeCap nodeCap") + apply (simp add: validE_R_def) + apply (simp add: mapM_locate_eq bind_liftE_distrib bindE_assoc returnOk_liftE[symmetric]) + apply (rule validE_R_sp, rule liftE_validE_R, rule stateAssert_sp) + apply (rule hoare_pre, wp whenE_throwError_wp checkFreeIndex_wp map_ensure_empty') + apply (clarsimp simp:cte_wp_at_ctes_of not_less shiftL_nat) + apply (case_tac cte) + apply clarsimp + apply (frule(1) valid_capAligned[OF ctes_of_valid_cap'[OF _ invs_valid_objs']]) + apply (clarsimp simp:capAligned_def) + apply (subgoal_tac "idx \ 2^ sz") + prefer 2 + apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (clarsimp simp:valid_cap'_def valid_untyped_def) + apply (subgoal_tac "(2 ^ sz - idx) < 2^ word_bits") + prefer 2 + apply (rule le_less_trans[where y = "2^sz"]) + apply simp+ + apply (subgoal_tac "of_nat (2 ^ sz - idx) = (2::machine_word)^sz - of_nat idx") + prefer 2 + apply (simp add:word_of_nat_minus) + apply (subgoal_tac "valid_cap' nodeCap s") + prefer 2 + apply (erule disjE) + apply (fastforce dest: cte_wp_at_valid_objs_valid_cap') + apply clarsimp + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap'[OF _ invs_valid_objs'])+ + apply simp + apply (clarsimp simp: toEnum_of_nat [OF less_Suc_unat_less_bound]) + apply (subgoal_tac "args ! 4 \ 2 ^ capCNodeBits nodeCap") + prefer 2 + apply (clarsimp simp: isCap_simps) + apply (subst (asm) le_m1_iff_lt[THEN iffD1]) + apply (clarsimp simp:valid_cap'_def isCap_simps p2_gt_0 capAligned_def word_bits_def) + apply (rule less_imp_le) + apply simp + apply (subgoal_tac + "distinct (map (\y. capCNodePtr nodeCap + y * 2^cte_level_bits) [args ! 4 .e. args ! 4 + args ! 5 - 1])") + prefer 2 + apply (simp add: distinct_map upto_enum_def del: upt_Suc) + apply (rule comp_inj_on) + apply (rule inj_onI) + apply (clarsimp dest!: less_Suc_unat_less_bound) + apply (erule word_unat.Abs_eqD) + apply (simp add: unats_def) + apply (simp add: unats_def) + apply (rule inj_onI) + apply (clarsimp simp: toEnum_of_nat[OF less_Suc_unat_less_bound] isCap_simps) + apply (erule(2) inj_bits, simp add: cte_level_bits_def word_bits_def) + apply (subst Suc_unat_diff_1) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (drule(1) le_plus) + apply (rule unat_le_helper) + apply (erule order_trans) + apply (subst unat_power_lower64[symmetric], simp add: word_bits_def cte_level_bits_def) + apply (simp add: word_less_nat_alt[symmetric]) + apply (rule two_power_increasing) + apply (clarsimp dest!: valid_capAligned + simp: capAligned_def objBits_def objBitsKO_def) + apply (simp_all add: word_bits_def cte_level_bits_def objBits_defs)[2] + apply (clarsimp simp: AARCH64_H.fromAPIType_def) + apply (subgoal_tac "Suc (unat (args ! 4 + args ! 5 - 1)) = unat (args ! 4) + unat (args ! 5)") + prefer 2 + apply simp + apply (subst Suc_unat_diff_1) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (rule unat_plus_simple[THEN iffD1]) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply clarsimp + apply (subgoal_tac "(\x. (args ! 4) \ x \ x \ (args ! 4) + (args ! 5) - 1 \ + ex_cte_cap_wp_to' (\_. True) (capCNodePtr nodeCap + x * 2^cteSizeBits) s)") + prefer 2 + apply clarsimp + apply (erule disjE) + apply (erule bspec) + apply (clarsimp simp:isCap_simps image_def shiftl_t2n mult_ac) + apply (rule_tac x = x in bexI,simp) + apply (simp add: mask_def) + apply (erule order_trans) + apply (frule(1) le_plus) + apply (rule word_l_diffs,simp+) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (clarsimp simp:ex_cte_cap_wp_to'_def) + apply (rule_tac x = nodeSlot in exI) + apply (case_tac cte) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps image_def + shiftl_t2n) + apply (rule_tac x = x in bexI,simp) + apply (simp add: mask_def) + apply (erule order_trans) + apply (frule(1) le_plus) + apply (rule word_l_diffs,simp+) + apply (rule word_le_plus_either,simp) + apply (subst olen_add_eqv) + apply (subst add.commute) + apply (erule(1) plus_minus_no_overflow_ab) + apply (simp add: fromIntegral_def toInteger_nat fromInteger_nat) + apply (rule conjI) + apply (simp add: objBits_defs cte_level_bits_def) + apply (clarsimp simp:of_nat_shiftR word_le_nat_alt) + apply (frule_tac n = "unat (args ! 5)" + and bits = "(APIType_capBits (toEnum (unat (args ! 0))) (unat (args ! 1)))" + in range_cover_stuff[where rv = 0,rotated -1]) + apply (simp add:unat_1_0) + apply simp + apply (simp add:word_sub_le_iff word_of_nat_le) + apply simp+ + apply (clarsimp simp:getFreeRef_def) + apply (frule alignUp_idem[OF is_aligned_weaken,where a = w]) + apply (erule range_cover.sz) + apply (simp add:range_cover_def) + apply (simp add:empty_descendants_range_in' untypedBits_defs) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (intro conjI) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (drule_tac x=x in spec)+ + apply simp + apply (clarsimp simp: APIType_capBits_def) + apply clarsimp + apply (clarsimp simp: image_def getFreeRef_def cte_level_bits_def objBits_simps' field_simps) + apply (clarsimp simp: of_nat_shiftR word_le_nat_alt) + apply (frule_tac n = "unat (args ! 5)" + and bits = "(APIType_capBits (toEnum (unat (args ! 0))) (unat (args ! 1)))" + in range_cover_stuff[where w=w and sz=sz and rv = idx,rotated -1]; simp?) + apply (intro conjI; clarsimp simp add: image_def word_size) + apply (clarsimp simp: image_def isCap_simps nullPointer_def word_size field_simps) + apply (drule_tac x=x in spec)+ + apply simp + apply (clarsimp simp: APIType_capBits_def) + done + +lemma corres_list_all2_mapM_': + assumes w: "suffix xs oxs" "suffix ys oys" + assumes y: "\x xs y ys. \ F x y; suffix (x # xs) oxs; suffix (y # ys) oys \ + \ corres dc (P (x # xs)) (P' (y # ys)) (f x) (g y)" + assumes z: "\x y xs. \ F x y; suffix (x # xs) oxs \ \ \P (x # xs)\ f x \\rv. P xs\" + "\x y ys. \ F x y; suffix (y # ys) oys \ \ \P' (y # ys)\ g y \\rv. P' ys\" + assumes x: "list_all2 F xs ys" + shows "corres dc (P xs) (P' ys) (mapM_x f xs) (mapM_x g ys)" + apply (insert x w) + apply (induct xs arbitrary: ys) + apply (simp add: mapM_x_def sequence_x_def) + apply (case_tac ys) + apply simp + apply (clarsimp simp add: mapM_x_def sequence_x_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF y]; assumption?) + apply (clarsimp dest!: suffix_ConsD) + apply (erule meta_allE, (drule(1) meta_mp)+) + apply assumption + apply (erule(1) z)+ + apply simp+ + done + +lemmas suffix_refl = suffix_order.refl + +lemmas corres_list_all2_mapM_ + = corres_list_all2_mapM_' [OF suffix_refl suffix_refl] + +declare modify_map_id[simp] + +lemma valid_mdbD3': + "\ ctes_of s p = Some cte; valid_mdb' s \ \ p \ 0" + by (clarsimp simp add: valid_mdb'_def valid_mdb_ctes_def no_0_def) + +lemma capRange_sameRegionAs: + "\ sameRegionAs x y; s \' y; capClass x = PhysicalClass \ capClass y = PhysicalClass \ + \ capRange x \ capRange y \ {}" + apply (erule sameRegionAsE) + apply (subgoal_tac "capClass x = capClass y \ capRange x = capRange y") + apply simp + apply (drule valid_capAligned) + apply (drule(1) capAligned_capUntypedPtr) + apply clarsimp + apply (rule conjI) + apply (rule master_eqI, rule capClass_Master, simp) + apply (rule master_eqI, rule capRange_Master, simp) + apply blast + apply blast + apply (clarsimp simp: isCap_simps)+ + done +end + +locale mdb_insert_again = + mdb_ptr_parent?: mdb_ptr m _ _ parent parent_cap parent_node + + mdb_ptr_site?: mdb_ptr m _ _ site site_cap site_node + for m parent parent_cap parent_node site site_cap site_node + + + fixes c' + + assumes site_cap: "site_cap = NullCap" + assumes site_prev: "mdbPrev site_node = 0" + assumes site_next: "mdbNext site_node = 0" + + assumes is_untyped: "isUntypedCap parent_cap" + assumes same_region: "sameRegionAs parent_cap c'" + + assumes range: "descendants_range' c' parent m" + assumes phys: "capClass c' = PhysicalClass" + + fixes s + assumes valid_capI': "m p = Some (CTE cap node) \ s \' cap" + + assumes ut_rev: "ut_revocable' m" + + fixes n + defines "n \ + (modify_map + (\x. if x = site + then Some (CTE c' (MDB (mdbNext parent_node) parent True True)) + else m x) + parent (cteMDBNode_update (mdbNext_update (\x. site))))" + + assumes neq: "parent \ site" + +context mdb_insert_again +begin +interpretation Arch . (*FIXME: arch_split*) +lemmas parent = mdb_ptr_parent.m_p +lemmas site = mdb_ptr_site.m_p + +lemma next_wont_bite: + "\ mdbNext parent_node \ 0; m (mdbNext parent_node) = Some cte \ + \ \ sameRegionAs c' (cteCap cte)" + using range ut_rev + apply (cases cte) + apply clarsimp + apply (cases "m \ parent \ mdbNext parent_node") + apply (drule (2) descendants_rangeD') + apply (drule capRange_sameRegionAs) + apply (erule valid_capI') + apply (simp add: phys) + apply blast + apply (erule notE, rule direct_parent) + apply (clarsimp simp: mdb_next_unfold parent) + apply assumption + apply (simp add: parentOf_def parent) + apply (insert is_untyped same_region) + apply (clarsimp simp: isMDBParentOf_CTE) + apply (rule conjI) + apply (erule (1) sameRegionAs_trans) + apply (simp add: ut_revocable'_def) + apply (insert parent) + apply simp + apply (clarsimp simp: isCap_simps) + done + +lemma no_0_helper: "no_0 m \ no_0 n" + by (simp add: n_def, simp add: no_0_def) + +lemma no_0_n [intro!]: "no_0 n" by (auto intro: no_0_helper) + +lemmas n_0_simps [iff] = no_0_simps [OF no_0_n] + +lemmas neqs [simp] = neq neq [symmetric] + +definition + "new_site \ CTE c' (MDB (mdbNext parent_node) parent True True)" + +definition + "new_parent \ CTE parent_cap (mdbNext_update (\a. site) parent_node)" + +lemma n: "n = m (site \ new_site, parent \ new_parent)" + using parent site + by (simp add: n_def modify_map_apply new_site_def new_parent_def + fun_upd_def[symmetric]) + +lemma site_no_parent [iff]: + "m \ site \ x = False" using site site_next + by (auto dest: subtree_next_0) + +lemma site_no_child [iff]: + "m \ x \ site = False" using site site_prev + by (auto dest: subtree_prev_0) + +lemma parent_next: "m \ parent \ mdbNext parent_node" + by (simp add: parent mdb_next_unfold) + +lemma parent_next_rtrancl_conv [simp]: + "m \ mdbNext parent_node \\<^sup>* site = m \ parent \\<^sup>+ site" + apply (rule iffI) + apply (insert parent_next) + apply (fastforce dest: rtranclD) + apply (drule tranclD) + apply (clarsimp simp: mdb_next_unfold) + done + +lemma site_no_next [iff]: + "m \ x \ site = False" using site site_prev dlist + apply clarsimp + apply (simp add: mdb_next_unfold) + apply (elim exE conjE) + apply (case_tac z) + apply simp + apply (rule dlistEn [where p=x], assumption) + apply clarsimp + apply clarsimp + done + +lemma site_no_next_trans [iff]: + "m \ x \\<^sup>+ site = False" + by (clarsimp dest!: tranclD2) + +lemma site_no_prev [iff]: + "m \ site \ p = (p = 0)" using site site_next + by (simp add: mdb_next_unfold) + +lemma site_no_prev_trancl [iff]: + "m \ site \\<^sup>+ p = (p = 0)" + apply (rule iffI) + apply (drule tranclD) + apply clarsimp + apply simp + apply (insert chain site) + apply (simp add: mdb_chain_0_def) + apply auto + done + +lemma chain_n: + "mdb_chain_0 n" +proof - + from chain + have "m \ mdbNext parent_node \\<^sup>* 0" using dlist parent + apply (cases "mdbNext parent_node = 0") + apply simp + apply (erule dlistEn, simp) + apply (auto simp: mdb_chain_0_def) + done + moreover + have "\m \ mdbNext parent_node \\<^sup>* parent" + using parent_next + apply clarsimp + apply (drule (1) rtrancl_into_trancl2) + apply simp + done + moreover + have "\ m \ 0 \\<^sup>* site" using no_0 site + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + have "\ m \ 0 \\<^sup>* parent" using no_0 parent + by (auto elim!: next_rtrancl_tranclE dest!: no_0_lhs_trancl) + moreover + note chain + ultimately show "mdb_chain_0 n" using no_0 parent site + apply (simp add: n new_parent_def new_site_def) + apply (auto intro!: mdb_chain_0_update no_0_update simp: next_update_lhs_rtrancl) + done +qed + +lemma no_loops_n: "no_loops n" using chain_n no_0_n + by (rule mdb_chain_0_no_loops) + +lemma n_direct_eq: + "n \ p \ p' = (if p = parent then p' = site else + if p = site then m \ parent \ p' + else m \ p \ p')" + using parent site site_prev + by (auto simp: mdb_next_update n new_parent_def new_site_def + parent_next mdb_next_unfold) + +lemma next_not_parent: + "\ mdbNext parent_node \ 0; m (mdbNext parent_node) = Some cte \ + \ \ isMDBParentOf new_site cte" + apply (drule(1) next_wont_bite) + apply (cases cte) + apply (simp add: isMDBParentOf_def new_site_def) + done + +(* The newly inserted cap should never have children. *) +lemma site_no_parent_n: + "n \ site \ p = False" using parent valid_badges + apply clarsimp + apply (erule subtree.induct) + prefer 2 + apply simp + apply (clarsimp simp: parentOf_def mdb_next_unfold new_site_def n) + apply (cases "mdbNext parent_node = site") + apply (subgoal_tac "m \ parent \ site") + apply simp + apply (subst mdb_next_unfold) + apply (simp add: parent) + apply clarsimp + apply (erule notE[rotated], erule(1) next_not_parent[unfolded new_site_def]) + done + +end + +locale mdb_insert_again_child = mdb_insert_again + + assumes child: + "isMDBParentOf + (CTE parent_cap parent_node) + (CTE c' (MDB (mdbNext parent_node) parent True True))" + +context mdb_insert_again_child +begin + +lemma new_child [simp]: + "isMDBParentOf new_parent new_site" + by (simp add: new_parent_def new_site_def) (rule child) + +lemma n_site_child: + "n \ parent \ site" + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def parent site n) + done + +lemma parent_m_n: + assumes "m \ p \ p'" + shows "if p' = parent then n \ p \ site \ n \ p \ p' else n \ p \ p'" using assms +proof induct + case (direct_parent c) + thus ?case + apply (cases "p = parent") + apply simp + apply (rule conjI, clarsimp) + apply clarsimp + apply (rule subtree.trans_parent [where c'=site]) + apply (rule n_site_child) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (clarsimp simp: new_parent_def parent) + apply simp + apply (subgoal_tac "n \ p \ c") + prefer 2 + apply (rule subtree.direct_parent) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: new_parent_def parent) + apply clarsimp + apply (erule subtree_trans) + apply (rule n_site_child) + done +next + case (trans_parent c d) + thus ?case + apply - + apply (cases "c = site", simp) + apply (cases "d = site", simp) + apply (cases "c = parent") + apply clarsimp + apply (erule subtree.trans_parent [where c'=site]) + apply (clarsimp simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (rule conjI, clarsimp) + apply (clarsimp simp: new_parent_def parent) + apply clarsimp + apply (subgoal_tac "n \ p \ d") + apply clarsimp + apply (erule subtree_trans, rule n_site_child) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq) + apply simp + apply (clarsimp simp: parentOf_def n) + apply (fastforce simp: parent new_parent_def) + done +qed + +lemma n_to_site [simp]: + "n \ p \ site = (p = parent)" + by (simp add: n_direct_eq) + +lemma parent_n_m: + assumes "n \ p \ p'" + shows "if p' = site then p \ parent \ m \ p \ parent else m \ p \ p'" +proof - + from assms have [simp]: "p \ site" by (clarsimp simp: site_no_parent_n) + from assms + show ?thesis + proof induct + case (direct_parent c) + thus ?case + apply simp + apply (rule conjI) + apply clarsimp + apply clarsimp + apply (rule subtree.direct_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply simp + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + done + next + case (trans_parent c d) + thus ?case + apply clarsimp + apply (rule conjI, clarsimp) + apply (clarsimp split: if_split_asm) + apply (simp add: n_direct_eq) + apply (cases "p=parent") + apply simp + apply (rule subtree.direct_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + apply clarsimp + apply (erule subtree.trans_parent, assumption, assumption) + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + apply (erule subtree.trans_parent) + apply (simp add: n_direct_eq split: if_split_asm) + apply assumption + apply (clarsimp simp: parentOf_def n parent new_parent_def split: if_split_asm) + done + qed +qed + +lemma descendants: + "descendants_of' p n = + (if parent \ descendants_of' p m \ p = parent + then descendants_of' p m \ {site} else descendants_of' p m)" + apply (rule set_eqI) + apply (simp add: descendants_of'_def) + apply (fastforce dest!: parent_n_m dest: parent_m_n simp: n_site_child split: if_split_asm) + done + +end + +lemma blarg_descendants_of': + "descendants_of' x (modify_map m p (if P then id else cteMDBNode_update (mdbPrev_update f))) + = descendants_of' x m" + by (simp add: descendants_of'_def) + +lemma bluhr_descendants_of': + "mdb_insert_again_child (ctes_of s') parent parent_cap pmdb site site_cap site_node cap s + \ + descendants_of' x + (modify_map + (modify_map + (\c. if c = site + then Some (CTE cap (MDB (mdbNext pmdb) parent True True)) + else ctes_of s' c) + (mdbNext pmdb) + (if mdbNext pmdb = 0 then id + else cteMDBNode_update (mdbPrev_update (\x. site)))) + parent (cteMDBNode_update (mdbNext_update (\x. site)))) + = (if parent \ descendants_of' x (ctes_of s') \ x = parent + then descendants_of' x (ctes_of s') \ {site} + else descendants_of' x (ctes_of s'))" + apply (subst modify_map_com) + apply (case_tac x, rename_tac node, case_tac node, clarsimp) + apply (subst blarg_descendants_of') + apply (erule mdb_insert_again_child.descendants) + done + +lemma mdb_relation_simp: + "\ (s, s') \ state_relation; cte_at p s \ + \ descendants_of' (cte_map p) (ctes_of s') = cte_map ` descendants_of p (cdt s)" + by (cases p, clarsimp simp: state_relation_def cdt_relation_def) + +lemma in_getCTE2: + "((cte, s') \ fst (getCTE p s)) = (s' = s \ cte_wp_at' ((=) cte) p s)" + apply (safe dest!: in_getCTE) + apply (clarsimp simp: cte_wp_at'_def getCTE_def) + done + +declare wrap_ext_op_det_ext_ext_def[simp] + +lemma do_ext_op_update_cdt_list_symb_exec_l': + "corres_underlying {(s::det_state, s'). f (kheap s) (ekheap s) s'} nf nf' dc P P' (create_cap_ext p z a) (return x)" + apply (simp add: corres_underlying_def create_cap_ext_def + update_cdt_list_def set_cdt_list_def bind_def put_def get_def gets_def return_def) + done + +crunches updateMDB, updateNewFreeIndex + for it'[wp]: "\s. P (ksIdleThread s)" + and ups'[wp]: "\s. P (gsUserPages s)" + and cns'[wp]: "\s. P (gsCNodes s)" + and ksDomainTime[wp]: "\s. P (ksDomainTime s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and ksWorkUnitsCompleted[wp]: "\s. P (ksWorkUnitsCompleted s)" + and ksMachineState[wp]: "\s. P (ksMachineState s)" + and ksArchState[wp]: "\s. P (ksArchState s)" +crunches insertNewCap + for ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and norq[wp]: "\s. P (ksReadyQueues s)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksCurThread[wp]: "\s. P (ksCurThread s)" + (wp: crunch_wps) +crunch nosch[wp]: insertNewCaps "\s. P (ksSchedulerAction s)" + (simp: crunch_simps zipWithM_x_mapM wp: crunch_wps) + + +crunch exst[wp]: set_cdt "\s. P (exst s)" + +(*FIXME: Move to StateRelation*) +lemma state_relation_schact[elim!]: + "(s,s') \ state_relation \ sched_act_relation (scheduler_action s) (ksSchedulerAction s')" + apply (simp add: state_relation_def) + done + +lemma state_relation_queues[elim!]: "(s,s') \ state_relation \ ready_queues_relation (ready_queues s) (ksReadyQueues s')" + apply (simp add: state_relation_def) + done + +lemma set_original_symb_exec_l: + "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_original p b) (return x)" + by (simp add: corres_underlying_def return_def set_original_def in_monad Bex_def) + +lemma set_cdt_symb_exec_l: + "corres_underlying {(s, s'). f (kheap s) (exst s) s'} nf nf' dc P P' (set_cdt g) (return x)" + by (simp add: corres_underlying_def return_def set_cdt_def in_monad Bex_def) + +crunch domain_index[wp]: create_cap_ext "\s. P (domain_index s)" +crunch work_units_completed[wp]: create_cap_ext "\s. P (work_units_completed s)" + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma updateNewFreeIndex_noop_psp_corres: + "corres_underlying {(s, s'). pspace_relations (ekheap s) (kheap s) (ksPSpace s')} False True + dc \ (cte_at' slot) + (return ()) (updateNewFreeIndex slot)" + apply (simp add: updateNewFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_bind_return2) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' slot"]) + apply (rule corres_trivial, simp) + apply (wp getCTE_wp' | wpc + | simp add: updateTrackedFreeIndex_def getSlotCap_def)+ + done + +lemma insertNewCap_corres: +notes if_cong[cong del] if_weak_cong[cong] +shows + "\ cref' = cte_map (fst tup) + \ cap_relation (default_cap tp (snd tup) sz d) cap \ \ + corres dc + (cte_wp_at ((=) cap.NullCap) (fst tup) and pspace_aligned + and pspace_distinct and valid_objs and valid_mdb and valid_list + and cte_wp_at ((\) cap.NullCap) p) + (cte_wp_at' (\c. cteCap c = NullCap) cref' and + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ sameRegionAs (cteCap cte) cap) (cte_map p) + and valid_mdb' and pspace_aligned' and pspace_distinct' and valid_objs' + and (\s. descendants_range' cap (cte_map p) (ctes_of s))) + (create_cap tp sz p d tup) + (insertNewCap (cte_map p) cref' cap)" + apply (cases tup, + clarsimp simp add: create_cap_def insertNewCap_def + liftM_def) + apply (rule corres_symb_exec_r [OF _ getCTE_sp])+ + prefer 3 + apply (rule no_fail_pre, wp) + apply (clarsimp elim!: cte_wp_at_weakenE') + prefer 4 + apply (rule no_fail_pre, wp) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (rule corres_assert_assume) + prefer 2 + apply (case_tac oldCTE) + apply (clarsimp simp: cte_wp_at_ctes_of valid_mdb'_def valid_mdb_ctes_def + valid_nullcaps_def) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: initMDBNode_def) + apply clarsimp + apply (rule_tac F="capClass cap = PhysicalClass" in corres_req) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (drule sameRegionAs_classes, simp) + apply (rule corres_caps_decomposition) + prefer 3 + apply wp+ + apply (rule hoare_post_imp, simp) + apply (wp | assumption)+ + defer + apply ((wp | simp)+)[1] + apply (simp add: create_cap_ext_def set_cdt_list_def update_cdt_list_def bind_assoc) + apply ((wp | simp)+)[1] + apply (wp updateMDB_ctes_of_cases + | simp add: o_def split del: if_split)+ + apply (clarsimp simp: cdt_relation_def cte_wp_at_ctes_of + split del: if_split cong: if_cong simp del: id_apply) + apply (subst if_not_P, erule(1) valid_mdbD3') + apply (case_tac x, case_tac oldCTE) + apply (subst bluhr_descendants_of') + apply (rule mdb_insert_again_child.intro) + apply (rule mdb_insert_again.intro) + apply (rule mdb_ptr.intro) + apply (simp add: valid_mdb'_def vmdb_def) + apply (rule mdb_ptr_axioms.intro) + apply simp + apply (rule mdb_ptr.intro) + apply (simp add: valid_mdb'_def vmdb_def) + apply (rule mdb_ptr_axioms.intro) + apply fastforce + apply (rule mdb_insert_again_axioms.intro) + apply (clarsimp simp: nullPointer_def)+ + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply clarsimp + apply (rule mdb_insert_again_child_axioms.intro) + apply (clarsimp simp: isMDBParentOf_def) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def + ut_revocable'_def) + apply (fold fun_upd_def) + apply (subst descendants_of_insert_child') + apply (erule(1) mdb_Null_descendants) + apply (clarsimp simp: cte_wp_at_def) + apply (erule(1) mdb_Null_None) + apply (subgoal_tac "cte_at (aa, bb) s") + prefer 2 + apply (drule not_sym, clarsimp simp: cte_wp_at_caps_of_state split: if_split_asm) + apply (subst descendants_of_eq' [OF _ cte_wp_at_cte_at], assumption+) + apply (clarsimp simp: state_relation_def) + apply assumption+ + apply (subst cte_map_eq_subst [OF _ cte_wp_at_cte_at], assumption+) + apply (simp add: mdb_relation_simp) + defer + apply (clarsimp split del: if_split)+ + apply (clarsimp simp add: revokable_relation_def cte_wp_at_ctes_of + split del: if_split) + apply simp + apply (rule conjI) + apply clarsimp + apply (elim modify_map_casesE) + apply ((clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst cte_wp_at_cte_at + revokable_relation_simp)+)[4] + apply clarsimp + apply (subgoal_tac "null_filter (caps_of_state s) (aa, bb) \ None") + prefer 2 + apply (clarsimp simp: null_filter_def cte_wp_at_caps_of_state split: if_split_asm) + apply (subgoal_tac "cte_at (aa,bb) s") + prefer 2 + apply clarsimp + apply (drule null_filter_caps_of_stateD) + apply (erule cte_wp_cte_at) + apply (elim modify_map_casesE) + apply (clarsimp split: if_split_asm cong: conj_cong + simp: cte_map_eq_subst cte_wp_at_cte_at revokable_relation_simp)+ + apply (clarsimp simp: state_relation_def ghost_relation_of_heap pt_types_of_heap_eq o_def)+ + apply wp+ + apply (rule corres_guard_imp) + apply (rule corres_underlying_symb_exec_l [OF gets_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF gets_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF set_cdt_symb_exec_l]) + apply (rule corres_underlying_symb_exec_l [OF do_ext_op_update_cdt_list_symb_exec_l']) + apply (rule corres_underlying_symb_exec_l [OF set_original_symb_exec_l]) + apply (rule corres_cong[OF refl refl _ refl refl, THEN iffD1]) + apply (rule bind_return[THEN fun_cong]) + apply (rule corres_split) + apply (rule setCTE_corres; simp) + apply (subst bind_return[symmetric], + rule corres_split) + apply (simp add: dc_def[symmetric]) + apply (rule updateMDB_symb_exec_r) + apply (simp add: dc_def[symmetric]) + apply (rule corres_split_noop_rhs[OF updateMDB_symb_exec_r]) + apply (rule updateNewFreeIndex_noop_psp_corres) + apply (wp getCTE_wp set_cdt_valid_objs set_cdt_cte_at + hoare_weak_lift_imp | simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_cte_at) + apply (clarsimp simp: cte_wp_at_ctes_of no_0_def valid_mdb'_def + valid_mdb_ctes_def) + apply (rule conjI, clarsimp) + apply clarsimp + apply (erule (2) valid_dlistEn) + apply simp + apply(simp only: cdt_list_relation_def valid_mdb_def2) + apply(subgoal_tac "finite_depth (cdt s)") + prefer 2 + apply(simp add: finite_depth valid_mdb_def2[symmetric]) + apply(intro impI allI) + apply(subgoal_tac "mdb_insert_abs (cdt s) p (a, b)") + prefer 2 + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(rule mdb_insert_abs.intro) + apply(clarsimp) + apply(erule (1) mdb_cte_at_Null_None) + apply (erule (1) mdb_cte_at_Null_descendants) + apply(subgoal_tac "no_0 (ctes_of s')") + prefer 2 + apply(simp add: valid_mdb_ctes_def valid_mdb'_def) + apply simp + apply (elim conjE) + apply (case_tac "cdt s (a,b)") + prefer 2 + apply (simp add: mdb_insert_abs_def) + apply simp + apply(case_tac x) + apply(simp add: cte_wp_at_ctes_of) + apply(simp add: mdb_insert_abs.next_slot split del: if_split) + apply(case_tac "c=p") + apply(simp) + apply(clarsimp simp: modify_map_def) + apply(case_tac z) + apply(fastforce split: if_split_asm) + apply(case_tac "c = (a, b)") + apply(simp) + apply(case_tac "next_slot p (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(clarsimp simp: modify_map_def const_def) + apply(clarsimp split: if_split_asm) + apply(drule_tac p="cte_map p" in valid_mdbD1') + apply(simp) + apply(simp add: valid_mdb'_def valid_mdb_ctes_def) + apply(clarsimp simp: nullPointer_def no_0_def) + apply(clarsimp simp: state_relation_def) + apply(clarsimp simp: cte_wp_at_caps_of_state) + apply(drule_tac slot=p in pspace_relation_ctes_ofI) + apply(simp add: cte_wp_at_caps_of_state) + apply(simp) + apply(simp) + apply(simp) + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x="fst p" in allE, erule_tac x="snd p" in allE) + apply(fastforce) + apply(simp) + apply(case_tac "next_slot c (cdt_list s) (cdt s)") + apply(simp) + apply(simp) + apply(subgoal_tac "cte_at c s") + prefer 2 + apply(rule cte_at_next_slot) + apply(simp_all add: valid_mdb_def2)[4] + apply(clarsimp simp: modify_map_def const_def) + apply(simp split: if_split_asm) + apply(simp add: valid_mdb'_def) + apply(drule_tac ptr="cte_map p" in no_self_loop_next) + apply(simp) + apply(simp) + apply(drule_tac p="(aa, bb)" in cte_map_inj) + apply(simp_all add: cte_wp_at_caps_of_state)[5] + apply(clarsimp) + apply(simp) + apply(clarsimp) + apply(drule cte_map_inj_eq; simp add: cte_wp_at_caps_of_state) + apply(clarsimp) + apply(case_tac z) + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE) + apply(fastforce) + apply(clarsimp) + apply(drule cte_map_inj_eq) + apply(simp_all add: cte_wp_at_caps_of_state)[6] + apply(clarsimp simp: state_relation_def cdt_list_relation_def) + apply(erule_tac x=aa in allE, erule_tac x=bb in allE, fastforce) + done + +definition apitype_of :: "cap \ apiobject_type option" +where + "apitype_of c \ case c of + Structures_A.UntypedCap d p b idx \ Some ArchTypes_H.Untyped + | Structures_A.EndpointCap r badge rights \ Some EndpointObject + | Structures_A.NotificationCap r badge rights \ Some NotificationObject + | Structures_A.CNodeCap r bits guard \ Some ArchTypes_H.CapTableObject + | Structures_A.ThreadCap r \ Some TCBObject + | _ \ None" + +lemma cte_wp_at_cteCaps_of: + "cte_wp_at' (\cte. P (cteCap cte)) p s + = (\cap. cteCaps_of s p = Some cap \ P cap)" + apply (subst tree_cte_cteCap_eq[unfolded o_def]) + apply (clarsimp split: option.splits) + done + +lemma caps_contained_modify_mdb_helper[simp]: + "(\n. modify_map m p (cteMDBNode_update f) x = Some (CTE c n)) + = (\n. m x = Some (CTE c n))" + apply (cases "m p", simp_all add: modify_map_def) + apply (case_tac a, simp_all) + done + +lemma sameRegionAs_capRange_subset: + "\ sameRegionAs c c'; capClass c = PhysicalClass \ \ capRange c' \ capRange c" + apply (erule sameRegionAsE) + apply (rule equalityD1) + apply (rule master_eqI, rule capRange_Master) + apply simp + apply assumption+ + apply (clarsimp simp: isCap_simps)+ + done + + +definition + is_end_chunk :: "cte_heap \ capability \ machine_word \ bool" +where + "is_end_chunk ctes cap p \ \p'. ctes \ p \ p' + \ (\cte. ctes p = Some cte \ sameRegionAs cap (cteCap cte)) + \ (\cte'. ctes p' = Some cte' \ \ sameRegionAs cap (cteCap cte'))" + +definition + mdb_chunked2 :: "cte_heap \ bool" +where + "mdb_chunked2 ctes \ (\x p p' cte. ctes x = Some cte + \ is_end_chunk ctes (cteCap cte) p \ is_end_chunk ctes (cteCap cte) p' + \ p = p') + \ (\p p' cte cte'. ctes p = Some cte \ ctes p' = Some cte' + \ ctes \ p \ p' \ sameRegionAs (cteCap cte') (cteCap cte) + \ sameRegionAs (cteCap cte) (cteCap cte'))" + +lemma mdb_chunked2_revD: + "\ ctes p = Some cte; ctes p' = Some cte'; ctes \ p \ p'; + mdb_chunked2 ctes; sameRegionAs (cteCap cte') (cteCap cte) \ + \ sameRegionAs (cteCap cte) (cteCap cte')" + by (fastforce simp add: mdb_chunked2_def) + +lemma valid_dlist_step_back: + "\ ctes \ p \ p''; ctes \ p' \ p''; valid_dlist ctes; p'' \ 0 \ + \ p = p'" + apply (simp add: mdb_next_unfold valid_dlist_def) + apply (frule_tac x=p in spec) + apply (drule_tac x=p' in spec) + apply (clarsimp simp: Let_def) + done + +lemma chunk_sameRegionAs_step1: + "\ ctes \ p' \\<^sup>* p''; ctes p'' = Some cte; + is_chunk ctes (cteCap cte) p p''; + mdb_chunked2 ctes; valid_dlist ctes \ \ + \cte'. ctes p' = Some cte' + \ ctes \ p \\<^sup>+ p' + \ sameRegionAs (cteCap cte') (cteCap cte)" + apply (erule converse_rtrancl_induct) + apply (clarsimp simp: is_chunk_def) + apply (drule_tac x=p'' in spec, clarsimp) + apply (clarsimp simp: is_chunk_def) + apply (frule_tac x=y in spec) + apply (drule_tac x=z in spec) + apply ((drule mp, erule(1) transitive_closure_trans) + | clarsimp)+ + apply (rule sameRegionAs_trans[rotated], assumption) + apply (drule(3) mdb_chunked2_revD) + apply simp + apply (erule(1) sameRegionAs_trans) + apply simp + done + +end +locale mdb_insert_again_all = mdb_insert_again_child + + assumes valid_c': "s \' c'" + + fixes n' + defines "n' \ modify_map n (mdbNext parent_node) (cteMDBNode_update (mdbPrev_update (\a. site)))" +begin +interpretation Arch . (*FIXME: arch_split*) +lemma no_0_n' [simp]: "no_0 n'" + using no_0_n by (simp add: n'_def) + +lemma dom_n' [simp]: "dom n' = dom n" + apply (simp add: n'_def) + apply (simp add: modify_map_if dom_def) + apply (rule set_eqI) + apply simp + apply (rule iffI) + apply auto[1] + apply clarsimp + apply (case_tac y) + apply (case_tac "mdbNext parent_node = x") + apply auto + done + +lemma mdb_chain_0_n' [simp]: "mdb_chain_0 n'" + using chain_n + apply (simp add: mdb_chain_0_def) + apply (simp add: n'_def trancl_prev_update) + done + +lemma parency_n': + "n' \ p \ p' = (if m \ p \ parent \ p = parent + then m \ p \ p' \ p' = site + else m \ p \ p')" + using descendants [of p] + unfolding descendants_of'_def + by (auto simp add: set_eq_iff n'_def) + +lemma n'_direct_eq: + "n' \ p \ p' = (if p = parent then p' = site else + if p = site then m \ parent \ p' + else m \ p \ p')" + by (simp add: n'_def n_direct_eq) + +lemma n'_tranclD: + "n' \ p \\<^sup>+ p' \ + (if p = site then m \ parent \\<^sup>+ p' + else if m \ p \\<^sup>+ parent \ p = parent then m \ p \\<^sup>+ p' \ p' = site + else m \ p \\<^sup>+ p')" + apply (erule trancl_induct) + apply (fastforce simp: n'_direct_eq split: if_split_asm) + apply (fastforce simp: n'_direct_eq split: if_split_asm elim: trancl_trans) + done + +lemma site_in_dom: "site \ dom n" + by (simp add: n) + +lemma m_tranclD: + assumes m: "m \ p \\<^sup>+ p'" + shows "p' \ site \ n' \ p \\<^sup>+ p'" +proof - + from m have "p = site \ p' = 0" by clarsimp + with mdb_chain_0_n' m + show ?thesis + apply - + apply (erule trancl_induct) + apply (rule context_conjI) + apply clarsimp + apply (cases "p = site") + apply (simp add: mdb_chain_0_def site_in_dom) + apply (cases "p = parent") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule context_conjI) + apply clarsimp + apply clarsimp + apply (erule trancl_trans) + apply (case_tac "y = parent") + apply simp + apply (rule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + done +qed + +lemma n'_trancl_eq: + "n' \ p \\<^sup>+ p' = + (if p = site then m \ parent \\<^sup>+ p' + else if m \ p \\<^sup>+ parent \ p = parent then m \ p \\<^sup>+ p' \ p' = site + else m \ p \\<^sup>+ p')" + apply simp + apply (intro conjI impI iffI) + apply (drule n'_tranclD) + apply simp + apply simp + apply (drule n'_tranclD) + apply simp + apply (erule disjE) + apply (drule m_tranclD)+ + apply simp + apply (drule m_tranclD) + apply simp + apply (erule trancl_trans) + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (drule n'_tranclD, simp) + apply (erule disjE) + apply (drule m_tranclD) + apply simp + apply simp + apply (rule r_into_trancl) + apply (simp add: n'_direct_eq) + apply (drule n'_tranclD, simp) + apply simp + apply (cases "p' = site", simp) + apply (drule m_tranclD) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n'_direct_eq) + apply (simp add: rtrancl_eq_or_trancl) + apply (drule n'_tranclD, simp) + apply clarsimp + apply (drule m_tranclD, simp) + done + +lemma n'_rtrancl_eq: + "n' \ p \\<^sup>* p' = + (if p = site then p' \ site \ m \ parent \\<^sup>+ p' \ p' = site + else if m \ p \\<^sup>* parent then m \ p \\<^sup>* p' \ p' = site + else m \ p \\<^sup>* p')" + by (auto simp: rtrancl_eq_or_trancl n'_trancl_eq) + +lemma mdbNext_parent_site [simp]: + "mdbNext parent_node \ site" +proof + assume "mdbNext parent_node = site" + hence "m \ parent \ site" + using parent + by (unfold mdb_next_unfold) simp + thus False by simp +qed + +lemma mdbPrev_parent_site [simp]: + "site \ mdbPrev parent_node" +proof + assume "site = mdbPrev parent_node" + with parent site + have "m \ site \ parent" + apply (unfold mdb_next_unfold) + apply simp + apply (erule dlistEp) + apply clarsimp + apply clarsimp + done + with p_0 show False by simp +qed + +lemma parent_prev: + "(m \ parent \ p) = (p = mdbNext parent_node \ p \ 0)" + apply (rule iffI) + apply (frule dlist_prevD, rule parent) + apply (simp add: mdb_next_unfold parent) + apply (clarsimp simp: mdb_prev_def) + apply clarsimp + apply (rule dlist_nextD0) + apply (rule parent_next) + apply assumption + done + +lemma parent_next_prev: + "(m \ p \ mdbNext parent_node) = (p = parent \ mdbNext parent_node \ 0)" + using parent + apply - + apply (rule iffI) + apply (clarsimp simp add: mdb_prev_def) + apply (rule conjI) + apply (erule dlistEn) + apply clarsimp + apply simp + apply clarsimp + apply clarsimp + apply (rule dlist_nextD0) + apply (rule parent_next) + apply assumption + done + + +lemma n'_prev_eq: + notes if_cong[cong del] if_weak_cong[cong] + shows "n' \ p \ p' = (if p' = site then p = parent + else if p = site then m \ parent \ p' + else if p = parent then p' = site + else m \ p \ p')" + using parent site site_prev + apply (simp add: n'_def n mdb_prev_def new_parent_def new_site_def split del: if_split) + apply (clarsimp simp add: modify_map_if cong: if_cong split del: if_split) + apply (cases "p' = site", simp) + apply (simp cong: if_cong split del: if_split) + apply (cases "p' = parent") + apply clarsimp + apply (rule conjI, clarsimp simp: mdb_prev_def) + apply (clarsimp simp: mdb_prev_def) + apply (simp cong: if_cong split del: if_split) + apply (cases "p = site") + apply (simp add: parent_prev) + apply (cases "mdbNext parent_node = p'") + apply simp + apply (rule iffI) + prefer 2 + apply clarsimp + apply (erule dlistEn) + apply simp + apply clarsimp + apply (case_tac cte') + apply clarsimp + apply clarsimp + apply clarsimp + apply (insert site_next)[1] + apply (rule valid_dlistEp [OF dlist, where p=p'], assumption) + apply clarsimp + apply clarsimp + apply (simp cong: if_cong split del: if_split) + apply (cases "p = parent") + apply clarsimp + apply (insert site_next) + apply (cases "mdbNext parent_node = p'", clarsimp) + apply clarsimp + apply (rule valid_dlistEp [OF dlist, where p=p'], assumption) + apply clarsimp + apply clarsimp + apply simp + apply (cases "mdbNext parent_node = p'") + prefer 2 + apply (clarsimp simp: mdb_prev_def) + apply (rule iffI, clarsimp) + apply clarsimp + apply (case_tac z) + apply simp + apply (rule iffI) + apply (clarsimp simp: mdb_prev_def) + apply (drule sym [where t=p']) + apply (simp add: parent_next_prev) + done + +lemma dlist_n' [simp]: + notes if_cong[cong del] if_weak_cong[cong] + shows "valid_dlist n'" + using no_0_n' + by (clarsimp simp: valid_dlist_def2 n'_direct_eq + n'_prev_eq Invariants_H.valid_dlist_prevD [OF dlist]) + +lemma n'_cap: + "n' p = Some (CTE c node) \ + if p = site then c = c' \ m p = Some (CTE NullCap site_node) + else \node'. m p = Some (CTE c node')" + by (auto simp: n'_def n modify_map_if new_parent_def parent + new_site_def site site_cap split: if_split_asm) + +lemma m_cap: + "m p = Some (CTE c node) \ + if p = site + then \node'. n' site = Some (CTE c' node') + else \node'. n' p = Some (CTE c node')" + by (clarsimp simp: n n'_def new_parent_def new_site_def parent) + +lemma n'_badged: + "n' p = Some (CTE c node) \ + if p = site then c = c' \ mdbFirstBadged node + else \node'. m p = Some (CTE c node') \ mdbFirstBadged node = mdbFirstBadged node'" + by (auto simp: n'_def n modify_map_if new_parent_def parent + new_site_def site site_cap split: if_split_asm) + +lemma no_next_region: + "\ m \ parent \ p'; m p' = Some (CTE cap' node) \ \ \sameRegionAs c' cap'" + apply (clarsimp simp: mdb_next_unfold parent) + apply (frule next_wont_bite [rotated], clarsimp) + apply simp + done + +lemma valid_badges_n' [simp]: "valid_badges n'" + using valid_badges + apply (clarsimp simp: valid_badges_def) + apply (simp add: n'_direct_eq) + apply (drule n'_badged)+ + apply (clarsimp split: if_split_asm) + apply (drule (1) no_next_region) + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + done + +lemma c'_not_Null: "c' \ NullCap" + using same_region by clarsimp + +lemma valid_nullcaps_n' [simp]: + "valid_nullcaps n'" + using nullcaps is_untyped c'_not_Null + apply (clarsimp simp: valid_nullcaps_def n'_def n modify_map_if new_site_def + new_parent_def isCap_simps) + apply (erule allE)+ + apply (erule (1) impE) + apply (simp add: nullMDBNode_def) + apply (insert parent) + apply (rule dlistEn, rule parent) + apply clarsimp + apply (clarsimp simp: nullPointer_def) + done + +lemma phys': "capClass parent_cap = PhysicalClass" + using sameRegionAs_classes [OF same_region] phys + by simp + +lemma capRange_c': "capRange c' \ capRange parent_cap" + apply (rule sameRegionAs_capRange_subset) + apply (rule same_region) + apply (rule phys') + done + +lemma untypedRange_c': + assumes ut: "isUntypedCap c'" + shows "untypedRange c' \ untypedRange parent_cap" + using ut is_untyped capRange_c' + by (auto simp: isCap_simps) + +lemma sameRegion_parentI: + "sameRegionAs c' cap \ sameRegionAs parent_cap cap" + using same_region + apply - + apply (erule (1) sameRegionAs_trans) + done + +lemma no_loops_n': "no_loops n'" + using mdb_chain_0_n' no_0_n' + by (rule mdb_chain_0_no_loops) + +lemmas no_loops_simps' [simp]= + no_loops_trancl_simp [OF no_loops_n'] + no_loops_direct_simp [OF no_loops_n'] + +lemma rangeD: + "\ m \ parent \ p; m p = Some (CTE cap node) \ \ + capRange cap \ capRange c' = {}" + using range by (rule descendants_rangeD') + +lemma capAligned_c': "capAligned c'" + using valid_c' by (rule valid_capAligned) + +lemma capRange_ut: + "capRange c' \ untypedRange parent_cap" + using capRange_c' is_untyped + by (clarsimp simp: isCap_simps del: subsetI) + +lemma untyped_mdb_n' [simp]: "untyped_mdb' n'" + using untyped_mdb capRange_ut untyped_inc + apply (clarsimp simp: untyped_mdb'_def descendants_of'_def) + apply (drule n'_cap)+ + apply (simp add: parency_n') + apply (simp split: if_split_asm) + apply clarsimp + apply (erule_tac x=parent in allE) + apply (simp add: parent is_untyped) + apply (erule_tac x=p' in allE) + apply simp + apply (frule untypedCapRange) + apply (drule untypedRange_c') + apply (erule impE, blast) + apply (drule (1) rangeD) + apply simp + apply clarsimp + apply (thin_tac "All P" for P) + apply (simp add: untyped_inc'_def) + apply (erule_tac x=parent in allE) + apply (erule_tac x=p in allE) + apply (simp add: parent is_untyped) + apply (clarsimp simp: descendants_of'_def) + apply (case_tac "untypedRange parent_cap = untypedRange c") + apply simp + apply (elim disjE conjE) + apply (drule (1) rangeD) + apply (drule untypedCapRange) + apply simp + apply blast + apply simp + apply (erule disjE) + apply clarsimp + apply (erule disjE) + apply (simp add: psubsetI) + apply (elim conjE) + apply (drule (1) rangeD) + apply (drule untypedCapRange) + apply simp + apply blast + apply blast + apply clarsimp + done + +lemma site': + "n' site = Some new_site" + by (simp add: n n'_def modify_map_if new_site_def) + +lemma loopE: "m \ x \\<^sup>+ x \ P" + by simp + +lemma m_loop_trancl_rtrancl: + "m \ y \\<^sup>* x \ \ m \ x \\<^sup>+ y" + apply clarsimp + apply (drule(1) transitive_closure_trans) + apply (erule loopE) + done + +lemma m_rtrancl_to_site: + "m \ p \\<^sup>* site = (p = site)" + apply (rule iffI) + apply (erule rtranclE) + apply assumption + apply simp + apply simp + done + +lemma descendants_of'_D: "p' \ descendants_of' p ctes \ ctes \ p \ p' " + by (clarsimp simp:descendants_of'_def) + +lemma untyped_inc_mdbD: + "\ sameRegionAs cap cap'; isUntypedCap cap; + ctes p = Some (CTE cap node); ctes p' = Some (CTE cap' node'); + untyped_inc' ctes; untyped_mdb' ctes; no_loops ctes \ + \ ctes \ p \ p' \ p = p' \ + (isUntypedCap cap' \ untypedRange cap \ untypedRange cap' + \ sameRegionAs cap' cap + \ ctes \ p' \ p)" + apply (subgoal_tac "untypedRange cap \ untypedRange cap' \ sameRegionAs cap' cap") + apply (cases "isUntypedCap cap'") + apply (drule(4) untyped_incD'[where p=p and p'=p']) + apply (erule sameRegionAsE, simp_all add: untypedCapRange)[1] + apply (cases "untypedRange cap = untypedRange cap'") + apply simp + apply (elim disjE conjE) + apply (simp only: simp_thms descendants_of'_D)+ + apply (elim disjE conjE) + apply (simp add: subset_iff_psubset_eq) + apply (elim disjE) + apply (simp add:descendants_of'_D)+ + apply (clarsimp simp:descendants_of'_def) + apply (clarsimp simp: isCap_simps) + apply clarsimp + apply (erule sameRegionAsE) + apply simp + apply (drule(1) untyped_mdbD',simp) + apply (simp add:untypedCapRange) + apply blast + apply simp + apply assumption + apply (simp add:descendants_of'_def) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp add: sameRegionAs_def3 del: disjCI) + apply (rule disjI1) + apply (erule disjE) + apply (intro conjI) + apply blast + apply (simp add:untypedCapRange) + apply (erule subset_trans[OF _ untypedRange_in_capRange]) + apply clarsimp + apply (rule untypedRange_not_emptyD) + apply (simp add:untypedCapRange) + apply blast + apply (clarsimp simp:isCap_simps) + done + +lemma parent_chunk: + "is_chunk n' parent_cap parent site" + by (clarsimp simp: is_chunk_def + n'_trancl_eq n'_rtrancl_eq site' new_site_def same_region + m_loop_trancl_rtrancl m_rtrancl_to_site) + +lemma mdb_chunked_n' [simp]: + notes if_cong[cong del] if_weak_cong[cong] + shows "mdb_chunked n'" + using chunked untyped_mdb untyped_inc + apply (clarsimp simp: mdb_chunked_def) + apply (drule n'_cap)+ + apply (simp add: n'_trancl_eq split del: if_split) + apply (simp split: if_split_asm) + apply clarsimp + apply (frule sameRegion_parentI) + apply (frule(1) untyped_inc_mdbD [OF _ is_untyped _ _ untyped_inc untyped_mdb no_loops, OF _ parent]) + apply (elim disjE) + apply (frule sameRegionAs_capRange_Int) + apply (simp add: phys) + apply (rule valid_capAligned [OF valid_c']) + apply (rule valid_capAligned) + apply (erule valid_capI') + apply (erule notE, erule(1) descendants_rangeD' [OF range, rotated]) + apply (clarsimp simp: parent parent_chunk) + apply clarsimp + apply (frule subtree_mdb_next) + apply (simp add: m_loop_trancl_rtrancl [OF trancl_into_rtrancl, where x=parent]) + apply (case_tac "p' = parent") + apply (clarsimp simp: parent) + apply (drule_tac x=p' in spec) + apply (drule_tac x=parent in spec) + apply (frule sameRegionAs_trans [OF _ same_region]) + apply (clarsimp simp: parent is_chunk_def n'_trancl_eq n'_rtrancl_eq + m_rtrancl_to_site site' new_site_def) + apply (drule_tac x=p'' in spec) + apply clarsimp + apply (drule_tac p=p'' in m_cap, clarsimp) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=parent in allE) + apply (insert parent is_untyped)[1] + apply simp + apply (case_tac "p = parent") + apply (simp add: parent) + apply (clarsimp simp add: is_chunk_def) + apply (simp add: rtrancl_eq_or_trancl) + apply (erule disjE) + apply (clarsimp simp: site' new_site_def) + apply clarsimp + apply (drule tranclD) + apply (clarsimp simp: n'_direct_eq) + apply (drule (1) transitive_closure_trans) + apply simp + apply simp + apply (case_tac "isUntypedCap cap") + prefer 2 + apply (simp add: untyped_mdb'_def) + apply (erule_tac x=parent in allE) + apply simp + apply (erule_tac x=p in allE) + apply (simp add: descendants_of'_def) + apply (drule mp[where P="S \ T \ {}" for S T]) + apply (frule sameRegionAs_capRange_Int, simp add: phys) + apply (rule valid_capAligned, erule valid_capI') + apply (rule valid_capAligned, rule valid_c') + apply (insert capRange_ut)[1] + apply blast + apply (drule (1) rangeD) + apply (drule capRange_sameRegionAs, rule valid_c') + apply (simp add: phys) + apply simp + apply (case_tac "untypedRange parent_cap \ untypedRange cap") + apply (erule impE) + apply (clarsimp simp only: isCap_simps untypedRange.simps) + apply (subst (asm) range_subset_eq) + apply (drule valid_capI')+ + apply (drule valid_capAligned)+ + apply (clarsimp simp: capAligned_def) + apply (erule is_aligned_no_overflow) + apply (simp(no_asm) add: sameRegionAs_def3 isCap_simps) + apply (drule valid_capI')+ + apply (drule valid_capAligned)+ + apply (clarsimp simp: capAligned_def is_aligned_no_overflow) + apply clarsimp + apply (erule disjE) + apply simp + apply (rule conjI) + prefer 2 + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (thin_tac "P \ Q" for P Q) + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (simp add: site' new_site_def) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (simp add: rtrancl_eq_or_trancl) + apply simp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (drule (1) transitive_closure_trans, erule loopE) + apply (subgoal_tac "m \ p \ parent") + apply (drule subtree_mdb_next) + apply (drule (1) trancl_trans, erule loopE) + apply (thin_tac "All P" for P) + apply (drule_tac p=parent and p'=p in untyped_incD'[rotated], assumption+) + apply simp + apply (subgoal_tac "\ m \ parent \ p") + prefer 2 + apply clarsimp + apply (drule (1) rangeD) + apply (drule capRange_sameRegionAs, rule valid_c') + apply (simp add: phys) + apply simp + apply (clarsimp simp: descendants_of'_def subset_iff_psubset_eq) + apply (erule disjE,simp,simp) + apply (drule_tac p=parent and p'=p in untyped_incD'[rotated], assumption+) + apply (simp add:subset_iff_psubset_eq descendants_of'_def) + apply (elim disjE conjE| simp )+ + apply (drule(1) rangeD) + apply (drule capRange_sameRegionAs[OF _ valid_c']) + apply (simp add:phys)+ + apply (insert capRange_c' is_untyped)[1] + apply (simp add: untypedCapRange [symmetric]) + apply (drule(1) disjoint_subset) + apply (drule capRange_sameRegionAs[OF _ valid_c']) + apply (simp add:phys) + apply (simp add:Int_ac) + apply clarsimp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (erule disjE) + apply simp + apply (thin_tac "P \ Q" for P Q) + apply (subgoal_tac "is_chunk n' cap p p'") + prefer 2 + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (erule disjE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (erule impE, fastforce) + apply (clarsimp simp: parent) + apply (simp add: site' new_site_def) + apply (erule sameRegionAs_trans, rule same_region) + apply (clarsimp simp add: parent) + apply (simp add: site' new_site_def) + apply (rule same_region) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply simp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply simp + apply (thin_tac "P \ Q" for P Q) + apply (subgoal_tac "is_chunk n' cap' p' p") + prefer 2 + apply (clarsimp simp: is_chunk_def) + apply (simp add: n'_trancl_eq n'_rtrancl_eq split: if_split_asm) + apply (erule disjE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (erule impE, fastforce) + apply (clarsimp simp: parent) + apply (simp add: site' new_site_def) + apply (erule sameRegionAs_trans, rule same_region) + apply (clarsimp simp add: parent) + apply (simp add: site' new_site_def) + apply (rule same_region) + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply (erule_tac x=p'' in allE) + apply clarsimp + apply (drule_tac p=p'' in m_cap) + apply clarsimp + apply simp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + apply (rule conjI, clarsimp) + apply clarsimp + apply (drule (1) trancl_trans, erule loopE) + done + +lemma caps_contained_n' [simp]: "caps_contained' n'" + using caps_contained untyped_mdb untyped_inc + apply (clarsimp simp: caps_contained'_def) + apply (drule n'_cap)+ + apply (clarsimp split: if_split_asm) + apply (drule capRange_untyped) + apply simp + apply (frule capRange_untyped) + apply (frule untypedRange_c') + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply (simp add: parent) + apply (erule impE, blast) + apply (simp add: untyped_mdb'_def) + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply (simp add: parent is_untyped descendants_of'_def) + apply (erule impE) + apply (thin_tac "m site = t" for t) + apply (drule valid_capI') + apply (frule valid_capAligned) + apply blast + apply (drule (1) rangeD) + apply (frule capRange_untyped) + apply (drule untypedCapRange) + apply simp + apply (thin_tac "All P" for P) + apply (insert capRange_c')[1] + apply (simp add: untypedCapRange is_untyped) + apply (subgoal_tac "untypedRange parent_cap \ untypedRange c \ {}") + prefer 2 + apply blast + apply (frule untyped_incD'[OF _ capRange_untyped _ is_untyped]) + apply (case_tac c) + apply simp_all + apply (simp add:isCap_simps) + apply (rule parent) + apply clarsimp + apply (case_tac "untypedRange c = untypedRange parent_cap") + apply blast + apply simp + apply (elim disjE) + apply (drule_tac A = "untypedRange c" in psubsetI) + apply simp+ + apply (thin_tac "P\Q" for P Q) + apply (elim conjE) + apply (simp add:descendants_of'_def) + apply (drule(1) rangeD) + apply (frule capRange_untyped) + apply (simp add:untypedCapRange Int_ac) + apply blast + apply (simp add:descendants_of'_def) + apply blast + apply blast + done + +lemma untyped_inc_n' [simp]: "untypedRange c' \ usableUntypedRange parent_cap = {} \ untyped_inc' n'" + using untyped_inc + apply (clarsimp simp: untyped_inc'_def) + apply (drule n'_cap)+ + apply (clarsimp simp: descendants_of'_def parency_n' split: if_split_asm) + apply (frule untypedRange_c') + apply (insert parent is_untyped)[1] + apply (erule_tac x=parent in allE) + apply (erule_tac x=p' in allE) + apply clarsimp + apply (case_tac "untypedRange parent_cap = untypedRange c'a") + apply simp + apply (intro conjI) + apply (intro impI) + apply (elim disjE conjE) + apply (drule(1) subtree_trans,simp) + apply (simp add:subset_not_psubset) + apply simp + apply (clarsimp simp:subset_not_psubset) + apply (drule valid_capI')+ + apply (drule(2) disjoint_subset[OF usableRange_subseteq[OF valid_capAligned],rotated -1]) + apply simp + apply (clarsimp) + apply (rule int_not_empty_subsetD) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (erule subset_splitE) + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply blast + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (intro conjI,intro impI,drule(1) subtree_trans,simp) + apply clarsimp + apply (intro impI) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (rule int_not_empty_subsetD) + apply (simp add:Int_ac) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (thin_tac "P \ Q" for P Q)+ + apply (drule(1) disjoint_subset[rotated]) + apply simp + apply (drule_tac B = "untypedRange c'a" in int_not_empty_subsetD) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply simp + apply (frule untypedRange_c') + apply (insert parent is_untyped)[1] + apply (erule_tac x=p in allE) + apply (erule_tac x=parent in allE) + apply clarsimp + apply (case_tac "untypedRange parent_cap = untypedRange c") + apply simp + apply (intro conjI) + apply (intro impI) + apply (elim disjE conjE) + apply (clarsimp simp:subset_not_psubset )+ + apply (drule(1) subtree_trans,simp) + apply simp + apply (clarsimp simp:subset_not_psubset) + apply (drule disjoint_subset[OF usableRange_subseteq[OF valid_capAligned[OF valid_capI']],rotated]) + apply simp + apply assumption + apply simp + apply clarsimp + apply (rule int_not_empty_subsetD) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply simp + apply (erule subset_splitE) + apply (simp|elim conjE)+ + apply (thin_tac "P \ Q" for P Q)+ + apply (intro conjI,intro impI,drule(1) subtree_trans,simp) + apply clarsimp + apply (intro impI) + apply (drule(1) rangeD) + apply (simp add:untypedCapRange Int_ac) + apply (rule int_not_empty_subsetD) + apply (simp add:Int_ac) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_c']]) + apply simp + apply (thin_tac "P\Q" for P Q)+ + apply blast + apply (thin_tac "P\Q" for P Q)+ + apply simp + apply (drule(1) disjoint_subset2[rotated]) + apply simp + apply (drule_tac B = "untypedRange c'" in int_not_empty_subsetD) + apply (erule(1) aligned_untypedRange_non_empty[OF valid_capAligned[OF valid_capI']]) + apply (erule aligned_untypedRange_non_empty[OF capAligned_c']) + apply simp + apply (erule_tac x=p in allE) + apply (erule_tac x=p' in allE) + apply simp + apply blast + done + +lemma ut_rev_n' [simp]: "ut_revocable' n'" + using ut_rev + apply (clarsimp simp: ut_revocable'_def n'_def n_def) + apply (clarsimp simp: modify_map_if split: if_split_asm) + done + +lemma class_links_m: "class_links m" + using valid + by (simp add: valid_mdb_ctes_def) + +lemma parent_phys: "capClass parent_cap = PhysicalClass" + using is_untyped + by (clarsimp simp: isCap_simps) + +lemma class_links [simp]: "class_links n'" + using class_links_m + apply (clarsimp simp add: class_links_def) + apply (simp add: n'_direct_eq + split: if_split_asm) + apply (case_tac cte, + clarsimp dest!: n'_cap simp: site' parent new_site_def phys parent_phys) + apply (drule_tac x=parent in spec) + apply (drule_tac x=p' in spec) + apply (case_tac cte') + apply (clarsimp simp: site' new_site_def parent parent_phys phys dest!: n'_cap + split: if_split_asm) + apply (case_tac cte, case_tac cte') + apply (clarsimp dest!: n'_cap split: if_split_asm) + apply fastforce + done + +lemma irq_control_n' [simp]: + "irq_control n'" + using irq_control phys + apply (clarsimp simp: irq_control_def) + apply (clarsimp simp: n'_def n_def) + apply (clarsimp simp: modify_map_if split: if_split_asm) + done + +lemma dist_z_m: + "distinct_zombies m" + using valid by auto + +lemma dist_z [simp]: + "distinct_zombies n'" + using dist_z_m + apply (simp add: n'_def distinct_zombies_nonCTE_modify_map) + apply (simp add: n_def distinct_zombies_nonCTE_modify_map + fun_upd_def[symmetric]) + apply (erule distinct_zombies_seperateE, simp) + apply (case_tac cte, clarsimp) + apply (rename_tac cap node) + apply (subgoal_tac "capRange cap \ capRange c' \ {}") + apply (frule untyped_mdbD' [OF _ _ _ _ _ untyped_mdb, OF parent]) + apply (simp add: is_untyped) + apply (clarsimp simp add: untypedCapRange[OF is_untyped, symmetric]) + apply (drule disjoint_subset2 [OF capRange_c']) + apply simp + apply simp + apply (simp add: descendants_of'_def) + apply (drule(1) rangeD) + apply simp + apply (drule capAligned_capUntypedPtr [OF capAligned_c']) + apply (frule valid_capAligned [OF valid_capI']) + apply (drule(1) capAligned_capUntypedPtr) + apply auto + done + +lemma reply_masters_rvk_fb_m: + "reply_masters_rvk_fb m" + using valid by auto + +lemma reply_masters_rvk_fb_n[simp]: + "reply_masters_rvk_fb n'" + using reply_masters_rvk_fb_m + apply (simp add: reply_masters_rvk_fb_def n'_def ball_ran_modify_map_eq + n_def fun_upd_def[symmetric]) + apply (rule ball_ran_fun_updI, assumption) + apply clarsimp + done + +lemma valid_n': + "untypedRange c' \ usableUntypedRange parent_cap = {} \ valid_mdb_ctes n'" + by auto + +end + +lemma caps_overlap_reserved'_D: + "\caps_overlap_reserved' S s; ctes_of s p = Some cte;isUntypedCap (cteCap cte)\ \ usableUntypedRange (cteCap cte) \ S = {}" + apply (simp add:caps_overlap_reserved'_def) + apply (erule ballE) + apply (erule(2) impE) + apply fastforce + done + +context begin interpretation Arch . (*FIXME: arch_split*) +lemma insertNewCap_valid_mdb: + "\valid_mdb' and valid_objs' and K (slot \ p) and + caps_overlap_reserved' (untypedRange cap) and + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p and + K (\isZombie cap) and valid_cap' cap and + (\s. descendants_range' cap p (ctes_of s))\ + insertNewCap p slot cap + \\rv. valid_mdb'\" + apply (clarsimp simp: insertNewCap_def valid_mdb'_def) + apply (wp getCTE_ctes_of | simp add: o_def)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: no_0_def valid_mdb_ctes_def) + apply (case_tac cte) + apply (rename_tac p_cap p_node) + apply (clarsimp cong: if_cong) + apply (case_tac ya) + apply (rename_tac node) + apply (clarsimp simp: nullPointer_def) + apply (rule mdb_insert_again_all.valid_n') + apply unfold_locales[1] + apply (assumption|rule refl)+ + apply (frule sameRegionAs_classes, clarsimp simp: isCap_simps) + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb_ctes_def) + apply simp + apply (clarsimp simp: isMDBParentOf_CTE) + apply (clarsimp simp: isCap_simps valid_mdb_ctes_def ut_revocable'_def) + apply assumption + apply (drule(1) caps_overlap_reserved'_D) + apply simp + apply (simp add:Int_ac) + done + +(* FIXME: move *) +lemma no_default_zombie: + "cap_relation (default_cap tp p sz d) cap \ \isZombie cap" + by (cases tp, auto simp: isCap_simps) + +lemmas updateNewFreeIndex_typ_ats[wp] = typ_at_lifts[OF updateNewFreeIndex_typ_at'] + +lemma updateNewFreeIndex_valid_objs[wp]: + "\valid_objs'\ updateNewFreeIndex slot \\_. valid_objs'\" + apply (simp add: updateNewFreeIndex_def getSlotCap_def) + apply (wp getCTE_wp' | wpc | simp add: updateTrackedFreeIndex_def)+ + done + +lemma insertNewCap_valid_objs [wp]: + "\ valid_objs' and valid_cap' cap and pspace_aligned' and pspace_distinct'\ + insertNewCap parent slot cap + \\_. valid_objs'\" + apply (simp add: insertNewCap_def) + apply (wp setCTE_valid_objs getCTE_wp') + apply clarsimp + done + +lemma insertNewCap_valid_cap [wp]: + "\ valid_cap' c \ + insertNewCap parent slot cap + \\_. valid_cap' c\" + apply (simp add: insertNewCap_def) + apply (wp getCTE_wp') + apply clarsimp + done + +lemmas descendants_of'_mdbPrev = descendants_of_prev_update + +lemma insertNewCap_ranges: + "\\s. descendants_range' c p (ctes_of s) \ + descendants_range' cap p (ctes_of s) \ + capRange c \ capRange cap = {} \ + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p s \ + valid_mdb' s \ valid_objs' s\ + insertNewCap p slot cap + \\_ s. descendants_range' c p (ctes_of s)\" + apply (simp add: insertNewCap_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (case_tac ctea) + apply (case_tac cteb) + apply (clarsimp simp: nullPointer_def cong: if_cong) + apply (simp (no_asm) add: descendants_range'_def descendants_of'_mdbPrev) + apply (subst mdb_insert_again_child.descendants) + apply unfold_locales[1] + apply (simp add: valid_mdb'_def) + apply (assumption|rule refl)+ + apply (frule sameRegionAs_classes, clarsimp simp: isCap_simps) + apply (erule (1) ctes_of_valid_cap') + apply (simp add: valid_mdb'_def valid_mdb_ctes_def) + apply clarsimp + apply (clarsimp simp: isMDBParentOf_def) + apply (clarsimp simp: isCap_simps valid_mdb'_def + valid_mdb_ctes_def ut_revocable'_def) + apply clarsimp + apply (rule context_conjI, blast) + apply (clarsimp simp: descendants_range'_def) + done + +lemma insertNewCap_overlap_reserved'[wp]: + "\\s. caps_overlap_reserved' (capRange c) s\ + capRange c \ capRange cap = {} \ capAligned cap \ + cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) p s \ + valid_mdb' s \ valid_objs' s\ + insertNewCap p slot cap + \\_ s. caps_overlap_reserved' (capRange c) s\" + apply (simp add: insertNewCap_def caps_overlap_reserved'_def) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule conjI) + apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def no_0_def) + apply (case_tac ctea) + apply (case_tac cteb) + apply (clarsimp simp: nullPointer_def ball_ran_modify_map_eq + caps_overlap_reserved'_def[symmetric]) + apply (clarsimp simp: ran_def split: if_splits) + apply (case_tac "slot = a") + apply clarsimp + apply (rule disjoint_subset) + apply (erule(1) usableRange_subseteq) + apply (simp add:untypedCapRange Int_ac)+ + apply (subst Int_commute) + apply (erule(2) caps_overlap_reserved'_D) + done + +crunch ksArch[wp]: insertNewCap "\s. P (ksArchState s)" + (wp: crunch_wps) + +lemma inv_untyped_corres_helper1: + "list_all2 cap_relation (map (\ref. default_cap tp ref sz d) orefs) cps + \ + corres dc + (\s. pspace_aligned s \ pspace_distinct s + \ valid_objs s \ valid_mdb s \ valid_list s + \ cte_wp_at is_untyped_cap p s + \ (\tup \ set (zip crefs orefs). + cte_wp_at (\c. cap_range (default_cap tp (snd tup) sz d) \ untyped_range c) p s) + \ (\tup \ set (zip crefs orefs). + descendants_range (default_cap tp (snd tup) sz d) p s) + \ (\tup \ set (zip crefs orefs). + caps_overlap_reserved (untyped_range (default_cap tp (snd tup) sz d)) s) + \ (\tup \ set (zip crefs orefs). real_cte_at (fst tup) s) + \ (\tup \ set (zip crefs orefs). + cte_wp_at ((=) cap.NullCap) (fst tup) s) + \ distinct (p # (map fst (zip crefs orefs))) + \ distinct_sets (map (\tup. cap_range (default_cap tp (snd tup) sz d)) (zip crefs orefs)) + \ (\tup \ set (zip crefs orefs). + valid_cap (default_cap tp (snd tup) sz d) s)) + (\s. (\tup \ set (zip (map cte_map crefs) cps). valid_cap' (snd tup) s) + \ (\tup \ set (zip (map cte_map crefs) cps). cte_wp_at' (\c. cteCap c = NullCap) (fst tup) s) + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup \ set (zip (map cte_map crefs) cps). + sameRegionAs (cteCap cte) (snd tup))) + (cte_map p) s + \ distinct ((cte_map p) # (map fst (zip (map cte_map crefs) cps))) + \ valid_mdb' s \ valid_objs' s \ pspace_aligned' s \ pspace_distinct' s + \ (\tup \ set (zip (map cte_map crefs) cps). descendants_range' (snd tup) (cte_map p) (ctes_of s)) + \ (\tup \ set (zip (map cte_map crefs) cps). + caps_overlap_reserved' (capRange (snd tup)) s) + \ distinct_sets (map capRange (map snd (zip (map cte_map crefs) cps)))) + (sequence_x (map (create_cap tp sz p d) (zip crefs orefs))) + (zipWithM_x (insertNewCap (cte_map p)) + ((map cte_map crefs)) cps)" + apply (simp add: zipWithM_x_def zipWith_def split_def) + apply (fold mapM_x_def) + apply (rule corres_list_all2_mapM_) + apply (rule corres_guard_imp) + apply (erule insertNewCap_corres) + apply (clarsimp simp: cte_wp_at_def is_cap_simps) + apply (clarsimp simp: fun_upd_def cte_wp_at_ctes_of) + apply clarsimp + apply (rule hoare_pre, wp hoare_vcg_const_Ball_lift) + apply clarsimp + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state + cap_range_def[where c="default_cap a b c d" for a b c d]) + apply (drule(2) caps_overlap_reservedD[rotated]) + apply (simp add:Int_ac) + apply (rule conjI) + apply (clarsimp simp: valid_cap_def) + apply (rule conjI) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (rule conjI) + apply (clarsimp simp:Int_ac) + apply (erule disjoint_subset2[rotated]) + apply fastforce + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule conjI) + subgoal by fastforce + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps valid_cap_def) + apply (fastforce simp: image_def) + apply (rule hoare_pre) + apply (wp + hoare_vcg_const_Ball_lift + insertNewCap_valid_mdb hoare_vcg_all_lift insertNewCap_ranges + | subst cte_wp_at_cteCaps_of)+ + apply (subst(asm) cte_wp_at_cteCaps_of)+ + apply (clarsimp simp only:) + apply simp + apply (rule conjI) + apply clarsimp + apply (thin_tac "cte_map p \ S" for S) + apply (erule notE, erule rev_image_eqI) + apply simp + apply (rule conjI,clarsimp+) + apply (rule conjI,erule caps_overlap_reserved'_subseteq) + apply (rule untypedRange_in_capRange) + apply (rule conjI,erule no_default_zombie) + apply (rule conjI, clarsimp simp:Int_ac) + apply fastforce + apply (clarsimp simp:Int_ac valid_capAligned ) + apply fastforce + apply (rule list_all2_zip_split) + apply (simp add: list_all2_map2 list_all2_refl) + apply (simp add: list_all2_map1) + done + +lemma createNewCaps_valid_pspace_extras: + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. pspace_aligned'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. pspace_distinct'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. valid_mdb'\" + "\(\s. n \ 0 \ ptr \ 0 \ range_cover ptr sz (APIType_capBits ty us) n + \ sz \ maxUntypedSizeBits + \ pspace_no_overlap' ptr sz s + \ valid_pspace' s \ caps_no_overlap'' ptr sz s + \ caps_overlap_reserved' {ptr .. ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s + \ ksCurDomain s \ maxDomain + )\ + createNewCaps ty ptr n us d + \\rv. valid_objs'\" + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + apply (rule hoare_grab_asm)+ + apply (rule hoare_pre,rule hoare_strengthen_post[OF createNewCaps_valid_pspace]) + apply (simp add:valid_pspace'_def)+ + done + +declare map_fst_zip_prefix[simp] + +declare map_snd_zip_prefix[simp] + +declare word_unat_power [symmetric, simp del] + +lemma createNewCaps_range_helper: + "\\s. range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \capfn. + rv = map capfn (map (\p. ptr_add ptr (p * 2 ^ (APIType_capBits tp us))) + [0 ..< n]) + \ (\p. capClass (capfn p) = PhysicalClass + \ capUntypedPtr (capfn p) = p + \ capBits (capfn p) = (APIType_capBits tp us))\" + apply (simp add: createNewCaps_def toAPIType_def Arch_createNewCaps_def + split del: if_split cong: option.case_cong) + apply (rule hoare_grab_asm)+ + apply (frule range_cover.range_cover_n_less) + apply (frule range_cover.unat_of_nat_n) + apply (cases tp, simp_all split del: if_split) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type, simp_all split del: if_split) + apply (rule hoare_pre, wp) + apply (frule range_cover_not_zero[rotated -1],simp) + apply (clarsimp simp: APIType_capBits_def objBits_simps ptr_add_def o_def) + apply (subst upto_enum_red') + apply unat_arith + apply (clarsimp simp: o_def fromIntegral_def toInteger_nat fromInteger_nat) + apply fastforce + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def bit_simps + objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def + objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (rule hoare_pre,wp createObjects_ret2) + apply (clarsimp simp: APIType_capBits_def word_bits_def objBits_simps ptr_add_def o_def) + apply (fastforce simp: objBitsKO_def objBits_def) + apply (wp createObjects_ret2 + | clarsimp simp: APIType_capBits_def objBits_if_dev archObjSize_def word_bits_def + split del: if_split + | simp add: objBits_simps + | (rule exI, (fastforce simp: bit_simps)))+ + done + +lemma createNewCaps_range_helper2: + "\\s. range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \cp \ set rv. capRange cp \ {} \ capRange cp \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}\" + apply (rule hoare_assume_pre) + apply (rule hoare_strengthen_post) + apply (rule createNewCaps_range_helper) + apply (clarsimp simp: capRange_def ptr_add_def word_unat_power[symmetric] + simp del: atLeastatMost_subset_iff + dest!: less_two_pow_divD) + apply (rule conjI) + apply (rule is_aligned_no_overflow) + apply (rule is_aligned_add_multI [OF _ _ refl]) + apply (fastforce simp:range_cover_def) + apply simp + apply (rule range_subsetI) + apply (rule machine_word_plus_mono_right_split[OF range_cover.range_cover_compare]) + apply (assumption)+ + apply (simp add:range_cover_def word_bits_def) + apply (frule range_cover_cell_subset) + apply (erule of_nat_mono_maybe[rotated]) + apply (drule (1) range_cover.range_cover_n_less ) + apply (clarsimp) + apply (erule impE) + apply (simp add:range_cover_def) + apply (rule is_aligned_no_overflow) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (fastforce simp:range_cover_def) + apply simp + done + +lemma createNewCaps_children: + "\\s. cap = UntypedCap d (ptr && ~~ mask sz) sz idx + \ range_cover ptr sz (APIType_capBits tp us) n \ 0 < n\ + createNewCaps tp ptr n us d + \\rv s. \y \ set rv. (sameRegionAs cap y)\" + apply (rule hoare_assume_pre) + apply (rule hoare_chain [OF createNewCaps_range_helper2]) + apply fastforce + apply clarsimp + apply (drule(1) bspec) + apply (clarsimp simp: sameRegionAs_def3 isCap_simps) + apply (drule(1) subsetD) + apply clarsimp + apply (erule order_trans[rotated]) + apply (rule word_and_le2) + done + +fun isDeviceCap :: "capability \ bool" +where + "isDeviceCap (UntypedCap d _ _ _) = d" +| "isDeviceCap (ArchObjectCap (FrameCap _ _ _ d _)) = d" +| "isDeviceCap _ = False" + +lemmas makeObjectKO_simp = makeObjectKO_def[split_simps AARCH64_H.object_type.split + Structures_H.kernel_object.split ArchTypes_H.apiobject_type.split + sum.split arch_kernel_object.split] + +lemma createNewCaps_descendants_range': + "\\s. descendants_range' p q (ctes_of s) \ + range_cover ptr sz (APIType_capBits ty us) n \ n \ 0 \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us d + \ \rv s. descendants_range' p q (ctes_of s)\" + apply (clarsimp simp:descendants_range'_def2 descendants_range_in'_def2) + apply (wp createNewCaps_null_filter') + apply fastforce + done + +lemma caps_overlap_reserved'_def2: + "caps_overlap_reserved' S = + (\s. (\cte \ ran (null_filter' (ctes_of s)). + isUntypedCap (cteCap cte) \ + usableUntypedRange (cteCap cte) \ S = {}))" + apply (rule ext) + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (intro iffI ballI impI) + apply (elim ballE impE) + apply simp + apply simp + apply (simp add:ran_def null_filter'_def split:if_split_asm option.splits) + apply (elim ballE impE) + apply simp + apply simp + apply (clarsimp simp: ran_def null_filter'_def is_cap_simps + simp del: split_paired_All split_paired_Ex split: if_splits) + apply (drule_tac x = a in spec) + apply simp + done + +lemma createNewCaps_caps_overlap_reserved': + "\\s. caps_overlap_reserved' S s \ pspace_aligned' s \ pspace_distinct' s \ + pspace_no_overlap' ptr sz s \ 0 < n \ + range_cover ptr sz (APIType_capBits ty us) n\ + createNewCaps ty ptr n us d + \\rv s. caps_overlap_reserved' S s\" + apply (clarsimp simp: caps_overlap_reserved'_def2) + apply (wp createNewCaps_null_filter') + apply fastforce + done + +lemma createNewCaps_caps_overlap_reserved_ret': + "\\s. caps_overlap_reserved' + {ptr..ptr + of_nat n * 2 ^ APIType_capBits ty us - 1} s \ + pspace_aligned' s \ pspace_distinct' s \ pspace_no_overlap' ptr sz s \ + 0 < n \ range_cover ptr sz (APIType_capBits ty us) n\ + createNewCaps ty ptr n us d + \\rv s. \y\set rv. caps_overlap_reserved' (capRange y) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_def) + apply (frule use_valid[OF _ createNewCaps_range_helper]) + apply fastforce + apply clarsimp + apply (erule use_valid[OF _ createNewCaps_caps_overlap_reserved']) + apply (intro conjI,simp_all) + apply (erule caps_overlap_reserved'_subseteq) + apply (drule(1) range_cover_subset) + apply simp + apply (clarsimp simp: ptr_add_def capRange_def + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff) + done + +lemma createNewCaps_descendants_range_ret': + "\\s. (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n) + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ descendants_range_in' {ptr..ptr + of_nat n * 2^(APIType_capBits ty us) - 1} cref (ctes_of s)\ + createNewCaps ty ptr n us d + \ \rv s. \y\set rv. descendants_range' y cref (ctes_of s)\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: valid_def) + apply (frule use_valid[OF _ createNewCaps_range_helper]) + apply simp + apply (erule use_valid[OF _ createNewCaps_descendants_range']) + apply (intro conjI,simp_all) + apply (clarsimp simp:descendants_range'_def descendants_range_in'_def) + apply (drule(1) bspec)+ + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (erule disjoint_subset2[rotated]) + apply (drule(1) range_cover_subset) + apply simp + apply (simp add:capRange_def ptr_add_def) + done + +lemma createNewCaps_parent_helper: + "\\s. cte_wp_at' (\cte. cteCap cte = UntypedCap d (ptr && ~~ mask sz) sz idx) p s + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us) + \ range_cover ptr sz (APIType_capBits ty us) n \ 0 < n \ + createNewCaps ty ptr n us d + \\rv. cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup\set (zip (xs rv) rv). + sameRegionAs (cteCap cte) (snd tup))) + p\" + apply (rule hoare_post_imp [where Q="\rv s. \cte. cte_wp_at' ((=) cte) p s + \ isUntypedCap (cteCap cte) + \ (\tup\set (zip (xs rv) rv). + sameRegionAs (cteCap cte) (snd tup))"]) + apply (clarsimp elim!: cte_wp_at_weakenE') + apply (rule hoare_pre) + apply (wp hoare_vcg_ex_lift createNewCaps_cte_wp_at' + set_tuple_pick createNewCaps_children) + apply (auto simp:cte_wp_at'_def isCap_simps) + done + +lemma createNewCaps_valid_cap': + "\\s. pspace_no_overlap' ptr sz s \ + valid_pspace' s \ n \ 0 \ + range_cover ptr sz (APIType_capBits ty us) n \ + (ty = APIObjectType ArchTypes_H.CapTableObject \ 0 < us) \ + (ty = APIObjectType apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits) \ + ptr \ 0 \ sz \ maxUntypedSizeBits\ + createNewCaps ty ptr n us d + \\r s. \cap\set r. s \' cap\" + apply (rule hoare_assume_pre) + apply clarsimp + apply (erule createNewCaps_valid_cap) + apply simp+ + done + +lemma dmo_ctes_of[wp]: + "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" + by (simp add: doMachineOp_def split_def | wp)+ + +lemma createNewCaps_ranges: + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 + createNewCaps ty ptr n us d + \\rv s. distinct_sets (map capRange rv)\" + apply (rule hoare_assume_pre) + apply (rule hoare_chain) + apply (rule createNewCaps_range_helper) + apply fastforce + apply (clarsimp simp: distinct_sets_prop distinct_prop_map) + apply (rule distinct_prop_distinct) + apply simp + apply (clarsimp simp: capRange_def simp del: Int_atLeastAtMost + dest!: less_two_pow_divD) + apply (rule aligned_neq_into_no_overlap[simplified field_simps]) + apply (rule notI) + apply (erule(3) ptr_add_distinct_helper) + apply (simp add:range_cover_def word_bits_def) + apply (erule range_cover.range_cover_n_le(1)[where 'a=machine_word_len]) + apply (clarsimp simp: ptr_add_def word_unat_power[symmetric]) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (simp add:range_cover_def)+ + apply (clarsimp simp: ptr_add_def word_unat_power[symmetric]) + apply (rule is_aligned_add_multI[OF _ le_refl refl]) + apply (simp add:range_cover_def)+ + done + +lemma createNewCaps_ranges': + "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 < n\ + createNewCaps ty ptr n us d + \\rv s. distinct_sets (map capRange (map snd (zip xs rv)))\" + apply (rule hoare_strengthen_post) + apply (rule createNewCaps_ranges) + apply (simp add: distinct_sets_prop del: map_map) + apply (erule distinct_prop_prefixE) + apply (rule Sublist.map_mono_prefix) + apply (rule map_snd_zip_prefix [unfolded less_eq_list_def]) + done + +declare split_paired_Ex[simp del] +lemmas corres_split_retype_createNewCaps + = corres_split[OF corres_retype_region_createNewCaps, + simplified bind_assoc, simplified ] +declare split_paired_Ex[simp add] + +lemma retype_region_caps_overlap_reserved: + "\valid_pspace and valid_mdb and + pspace_no_overlap_range_cover ptr sz and caps_no_overlap ptr sz and + caps_overlap_reserved + {ptr..ptr + of_nat n * 2^obj_bits_api (APIType_map2 (Inr ao')) us - 1} and + (\s. \slot. cte_wp_at (\c. up_aligned_area ptr sz \ cap_range c \ cap_is_device c = dev) slot s) and + K (APIType_map2 (Inr ao') = Structures_A.apiobject_type.CapTableObject \ 0 < us) and + K (range_cover ptr sz (obj_bits_api (APIType_map2 (Inr ao')) us) n) and + K (S \ {ptr..ptr + of_nat n * + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1})\ + retype_region ptr n us (APIType_map2 (Inr ao')) dev + \\rv s. caps_overlap_reserved S s\" + apply (rule hoare_gen_asm)+ + apply (simp (no_asm) add:caps_overlap_reserved_def2) + apply (rule hoare_pre) + apply (wp retype_region_caps_of) + apply simp+ + apply (simp add:caps_overlap_reserved_def2) + apply (intro conjI,simp+) + apply clarsimp + apply (drule bspec) + apply simp+ + apply (erule(1) disjoint_subset2) + done + +lemma retype_region_caps_overlap_reserved_ret: + "\valid_pspace and valid_mdb and caps_no_overlap ptr sz and + pspace_no_overlap_range_cover ptr sz and + caps_overlap_reserved + {ptr..ptr + of_nat n * 2^obj_bits_api (APIType_map2 (Inr ao')) us - 1} and + (\s. \slot. cte_wp_at (\c. up_aligned_area ptr sz \ cap_range c \ cap_is_device c = dev) slot s) and + K (APIType_map2 (Inr ao') = Structures_A.apiobject_type.CapTableObject \ 0 < us) and + K (range_cover ptr sz (obj_bits_api (APIType_map2 (Inr ao')) us) n)\ + retype_region ptr n us (APIType_map2 (Inr ao')) dev + \\rv s. \y\set rv. caps_overlap_reserved (untyped_range (default_cap + (APIType_map2 (Inr ao')) y us d)) s\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_def) + apply (frule retype_region_ret[unfolded valid_def,simplified,THEN spec,THEN bspec]) + apply clarsimp + apply (erule use_valid[OF _ retype_region_caps_overlap_reserved]) + apply clarsimp + apply (intro conjI,simp_all) + apply fastforce + apply (case_tac ao') + apply (simp_all add:APIType_map2_def) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (simp_all add:obj_bits_api_def ptr_add_def) + apply (drule(1) range_cover_subset) + apply (clarsimp)+ + done + +lemma updateFreeIndex_pspace_no_overlap': + "\\s. pspace_no_overlap' ptr sz s \ + valid_pspace' s \ cte_wp_at' (isUntypedCap o cteCap) src s\ + updateFreeIndex src index + \\r s. pspace_no_overlap' ptr sz s\" + apply (simp add: updateFreeIndex_def getSlotCap_def updateTrackedFreeIndex_def) + apply (rule hoare_pre) + apply (wp getCTE_wp' | wp (once) pspace_no_overlap'_lift + | simp)+ + apply (clarsimp simp:valid_pspace'_def pspace_no_overlap'_def) + done + +lemma updateFreeIndex_updateCap_caps_overlap_reserved: + "\\s. valid_mdb' s \ valid_objs' s \ S \ untypedRange cap \ + usableUntypedRange (capFreeIndex_update (\_. index) cap) \ S = {} \ + isUntypedCap cap \ descendants_range_in' S src (ctes_of s) \ + cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. caps_overlap_reserved' S s\" + apply (clarsimp simp:caps_overlap_reserved'_def) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp:modify_map_def cte_wp_at_ctes_of) + apply (erule ranE) + apply (clarsimp split:if_split_asm simp:valid_mdb'_def valid_mdb_ctes_def) + apply (case_tac cte) + apply (case_tac ctea) + apply simp + apply (drule untyped_incD') + apply (simp+)[4] + apply clarify + apply (erule subset_splitE) + apply (simp del:usable_untyped_range.simps) + apply (thin_tac "P \ Q" for P Q)+ + apply (elim conjE) + apply blast + apply (simp) + apply (thin_tac "P\Q" for P Q)+ + apply (elim conjE) + apply (drule(2) descendants_range_inD') + apply simp + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply (simp add:untypedCapRange)+ + apply (elim disjE) + apply clarsimp + apply (drule(2) descendants_range_inD') + apply simp + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply (simp add:untypedCapRange)+ + apply (thin_tac "P\Q" for P Q)+ + apply (rule disjoint_subset[OF usableRange_subseteq]) + apply (rule valid_capAligned) + apply (erule(1) ctes_of_valid_cap') + apply simp+ + apply blast + done + +lemma updateFreeIndex_caps_overlap_reserved: + "\\s. valid_pspace' s \ descendants_range_in' S src (ctes_of s) + \ cte_wp_at' ((\cap. S \ untypedRange cap \ + usableUntypedRange (capFreeIndex_update (\_. index) cap) \ S = {} \ + isUntypedCap cap) o cteCap) src s\ + updateFreeIndex src index + \\r s. caps_overlap_reserved' S s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_caps_overlap_reserved getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) + apply (clarsimp simp: valid_mdb'_def split: option.split) + done + +lemma updateFreeIndex_updateCap_caps_no_overlap'': + "\\s. isUntypedCap cap \ caps_no_overlap'' ptr sz s \ + cte_wp_at' (\c. cteCap c = cap) src s\ + updateCap src (capFreeIndex_update (\_. index) cap) + \\r s. caps_no_overlap'' ptr sz s\" + apply (clarsimp simp:caps_no_overlap''_def) + apply (wp updateCap_ctes_of_wp) + apply (clarsimp simp: modify_map_def ran_def cte_wp_at_ctes_of + simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (case_tac "a = src") + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (erule subsetD[rotated]) + apply (elim allE impE) + apply fastforce + apply (clarsimp simp:isCap_simps) + apply (erule subset_trans) + apply (clarsimp simp:isCap_simps) + apply (clarsimp simp del: atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex) + apply (erule subsetD[rotated]) + apply (elim allE impE) + prefer 2 + apply assumption + apply fastforce+ + done + +lemma updateFreeIndex_caps_no_overlap'': + "\\s. caps_no_overlap'' ptr sz s \ + cte_wp_at' (isUntypedCap o cteCap) src s\ + updateFreeIndex src index + \\r s. caps_no_overlap'' ptr sz s\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_caps_no_overlap'' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: caps_no_overlap''_def split: option.split) + done + +lemma updateFreeIndex_descendants_of': + "\\s. cte_wp_at' (\c. \idx'. cteCap c = capFreeIndex_update (K idx') cap) ptr s \ isUntypedCap cap \ + P ((swp descendants_of') (null_filter' (ctes_of s)))\ + updateCap ptr (capFreeIndex_update (\_. index) cap) + \\r s. P ((swp descendants_of') (null_filter' (ctes_of s)))\" + apply (wp updateCap_ctes_of_wp) + apply clarsimp + apply (erule subst[rotated,where P = P]) + apply (rule ext) + apply (clarsimp simp:null_filter_descendants_of'[OF null_filter_simp']) + apply (rule mdb_inv_preserve.descendants_of) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (frule_tac m="ctes_of s" and index=index in mdb_inv_preserve_updateCap) + apply (clarsimp simp: isCap_simps) + apply (clarsimp simp: isCap_simps) + done + +lemma updateFreeIndex_updateCap_descendants_range_in': + "\\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap \ + descendants_range_in' S slot (ctes_of s)\ + updateCap slot (capFreeIndex_update (\_. index) cap) + \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (rule hoare_pre) + apply (wp descendants_range_in_lift' + [where Q'="\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap" and + Q = "\s. cte_wp_at' (\c. cteCap c = cap) slot s \ isUntypedCap cap "] ) + apply (wp updateFreeIndex_descendants_of') + apply (clarsimp simp: cte_wp_at_ctes_of swp_def isCap_simps) + apply (simp add:updateCap_def) + apply (wp setCTE_weak_cte_wp_at getCTE_wp) + apply (fastforce simp:cte_wp_at_ctes_of isCap_simps) + apply (clarsimp) + done + +lemma updateFreeIndex_descendants_range_in': + "\\s. cte_wp_at' (isUntypedCap o cteCap) slot s + \ descendants_range_in' S slot (ctes_of s)\ + updateFreeIndex slot index + \\r s. descendants_range_in' S slot (ctes_of s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_updateCap_descendants_range_in' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma caps_no_overlap''_def2: + "caps_no_overlap'' ptr sz = + (\s. \cte\ran (null_filter' (ctes_of s)). + untypedRange (cteCap cte) \ + {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} \ {} \ + {ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1} \ + untypedRange (cteCap cte))" + apply (intro ext iffI) + apply (clarsimp simp:caps_no_overlap''_def null_filter'_def ran_def) + apply (drule_tac x = cte in spec) + apply fastforce + apply (clarsimp simp:caps_no_overlap''_def null_filter'_def) + apply (case_tac "cte = CTE capability.NullCap nullMDBNode") + apply clarsimp + apply (drule_tac x = cte in bspec) + apply (clarsimp simp:ran_def) + apply (rule_tac x= a in exI) + apply clarsimp + apply clarsimp + apply (erule subsetD) + apply simp + done + +lemma deleteObjects_caps_no_overlap'': + "\\s. invs' s \ ct_active' s \ sch_act_simple s \ + cte_wp_at' (\c. cteCap c = capability.UntypedCap d ptr sz idx) slot s \ + caps_no_overlap'' ptr sz s \ + descendants_range' (capability.UntypedCap d ptr sz idx) slot (ctes_of s)\ + deleteObjects ptr sz + \\rv s. caps_no_overlap'' ptr sz s\" + apply (rule hoare_name_pre_state) + apply (clarsimp split:if_splits) + apply (clarsimp simp:caps_no_overlap''_def2 deleteObjects_def2 capAligned_def valid_cap'_def + dest!:ctes_of_valid_cap') + apply (wp deleteObjects_null_filter[where idx = idx and p = slot]) + apply (clarsimp simp:cte_wp_at_ctes_of invs_def) + apply (case_tac cte) + apply clarsimp + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + apply (simp add:valid_cap'_def capAligned_def) + done + +lemma descendants_range_in_subseteq': + "\descendants_range_in' A p ms ;B\ A\ \ descendants_range_in' B p ms" + by (auto simp:descendants_range_in'_def cte_wp_at_ctes_of dest!:bspec) + +lemma updateFreeIndex_mdb_simple': + "\\s. descendants_of' src (ctes_of s) = {} \ + pspace_no_overlap' (capPtr cap) (capBlockSize cap) s \ + valid_pspace' s \ cte_wp_at' (\c. \idx'. cteCap c = capFreeIndex_update (\_. idx') cap) src s \ + isUntypedCap cap\ + updateCap src (capFreeIndex_update (\_. idx) cap) + \\rv. valid_mdb'\" + apply (clarsimp simp:valid_mdb'_def updateCap_def valid_pspace'_def) + apply (wp getCTE_wp) + apply (clarsimp simp:cte_wp_at_ctes_of isCap_simps simp del:fun_upd_apply) + + apply (frule mdb_inv_preserve_updateCap[where index=idx and m="ctes_of s" and slot=src for s]) + apply (simp add: isCap_simps) + apply (simp add: modify_map_def) + apply (clarsimp simp add: mdb_inv_preserve.preserve_stuff mdb_inv_preserve.by_products valid_mdb_ctes_def) + + proof - + fix s cte ptr sz idx' d + assume descendants: "descendants_of' src (ctes_of s) = {}" + and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" + and unt_inc' :"untyped_inc' (ctes_of s)" + and valid_objs' :"valid_objs' s" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" + (is "mdb_inv_preserve (ctes_of s) ?ctes") + + show "untyped_inc' ?ctes" + using cte_wp_at' + apply (clarsimp simp:untyped_inc'_def mdb_inv_preserve.descendants_of[OF invp, symmetric] + descendants + split del: if_split) + apply (case_tac "ctes_of s p") + apply (simp split: if_split_asm) + apply (case_tac "ctes_of s p'") + apply (simp split: if_split_asm) + apply (case_tac "the (ctes_of s p)", case_tac "the (ctes_of s p')") + apply clarsimp + apply (cut_tac p=p and p'=p' in untyped_incD'[OF _ _ _ _ unt_inc']) + apply assumption + apply (clarsimp simp: isCap_simps split: if_split_asm) + apply assumption + apply (clarsimp simp: isCap_simps split: if_split_asm) + apply (clarsimp simp: descendants split: if_split_asm) + done +qed + +lemma pspace_no_overlap_valid_untyped': + "\ pspace_no_overlap' ptr bits s; is_aligned ptr bits; bits < word_bits; + pspace_aligned' s \ + \ valid_untyped' d ptr bits idx s" + apply (clarsimp simp: valid_untyped'_def ko_wp_at'_def split del: if_split) + apply (frule(1) pspace_no_overlapD') + apply (simp add: obj_range'_def[symmetric] Int_commute add_mask_fold) + apply (erule disjE) + apply (drule base_member_set[simplified field_simps add_mask_fold]) + apply (simp add: word_bits_def) + apply blast + apply (simp split: if_split_asm) + apply (erule notE, erule disjoint_subset2[rotated]) + apply (clarsimp simp: is_aligned_no_wrap'[OF _ word_of_nat_less]) + done + +lemma updateFreeIndex_valid_pspace_no_overlap': + "\\s. valid_pspace' s \ + (\ptr sz. pspace_no_overlap' ptr sz s \ idx \ 2 ^ sz \ + cte_wp_at' ((\c. isUntypedCap c \ capPtr c = ptr \ capBlockSize c = sz) o cteCap) src s) + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits \ + descendants_of' src (ctes_of s) = {}\ + updateFreeIndex src idx + \\r s. valid_pspace' s\" + apply (clarsimp simp:valid_pspace'_def updateFreeIndex_def updateTrackedFreeIndex_def) + apply (rule hoare_pre) + apply (rule hoare_vcg_conj_lift) + apply (clarsimp simp:updateCap_def getSlotCap_def) + apply (wp getCTE_wp | simp)+ + apply (wp updateFreeIndex_mdb_simple' getCTE_wp' | simp add: getSlotCap_def)+ + apply (clarsimp simp:cte_wp_at_ctes_of valid_pspace'_def) + apply (case_tac cte,simp add:isCap_simps) + apply (frule(1) ctes_of_valid_cap') + apply (clarsimp simp: valid_cap_simps' capAligned_def pspace_no_overlap_valid_untyped') + done + +crunch vms'[wp]: updateFreeIndex "valid_machine_state'" + +(* FIXME: move *) +lemma setCTE_tcbDomain_inv[wp]: + "\obj_at' (\tcb. P (tcbState tcb)) t\ setCTE ptr v \\_. obj_at' (\tcb. P (tcbState tcb)) t\" + apply (simp add: setCTE_def) + apply (rule setObject_cte_obj_at_tcb', simp_all) + done + +(* FIXME: move *) +crunch tcbState_inv[wp]: cteInsert "obj_at' (\tcb. P (tcbState tcb)) t" + (wp: crunch_simps hoare_drop_imps) + +lemma updateFreeIndex_clear_invs': + "\\s. invs' s \ + (\ptr sz. pspace_no_overlap' ptr sz s \ idx \ 2 ^ sz \ + cte_wp_at' ((\c. isUntypedCap c \ capPtr c = ptr \ capBlockSize c = sz) o cteCap) src s) + \ is_aligned (of_nat idx :: machine_word) minUntypedSizeBits + \ descendants_of' src (ctes_of s) = {}\ + updateFreeIndex src idx + \\r s. invs' s\" + apply (clarsimp simp:invs'_def valid_state'_def) + apply (wp updateFreeIndex_valid_pspace_no_overlap') + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (wp updateFreeIndex_valid_pspace_no_overlap' sch_act_wf_lift valid_queues_lift + updateCap_iflive' tcb_in_cur_domain'_lift + | simp add: pred_tcb_at'_def)+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: ifunsafe'_def3 cteInsert_def setUntypedCapAsFull_def + split del: if_split) + apply wp+ + apply (rule hoare_vcg_conj_lift) + apply (simp add: updateCap_def) + apply (wp valid_irq_node_lift setCTE_typ_at')+ + apply (rule hoare_vcg_conj_lift) + apply (simp add:updateCap_def) + apply (wp setCTE_irq_handlers' getCTE_wp) + apply (simp add:updateCap_def) + apply (wp irqs_masked_lift valid_queues_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift + hoare_vcg_disj_lift untyped_ranges_zero_lift getCTE_wp + | wp (once) hoare_use_eq[where f="gsUntypedZeroRanges"] + | simp add: getSlotCap_def + | simp add: cte_wp_at_ctes_of)+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (clarsimp simp: isCap_simps) + apply (frule(1) valid_global_refsD_with_objSize) + apply clarsimp + apply (intro conjI allI impI) + apply (clarsimp simp: modify_map_def cteCaps_of_def ifunsafe'_def3 split:if_splits) + apply (drule_tac x=src in spec) + apply (clarsimp simp:isCap_simps) + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (drule_tac x = cref in spec) + apply clarsimp + apply (rule_tac x = cref' in exI) + apply clarsimp + apply (clarsimp simp: valid_pspace'_def) + apply (erule untyped_ranges_zero_fun_upd, simp_all) + apply (clarsimp simp: untypedZeroRange_def cteCaps_of_def isCap_simps) + done + +lemma cte_wp_at_pspace_no_overlapI': + "\invs' s; cte_wp_at' (\c. cteCap c = capability.UntypedCap + d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ pspace_no_overlap' ptr sz s" + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte,clarsimp) + apply (frule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + apply (clarsimp simp:valid_cap'_def invs'_def valid_state'_def valid_pspace'_def + valid_untyped'_def simp del:usableUntypedRange.simps) + apply (unfold pspace_no_overlap'_def) + apply (intro allI impI) + apply (unfold ko_wp_at'_def) + apply (clarsimp simp del: atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff usableUntypedRange.simps) + apply (drule spec)+ + apply (frule(1) pspace_distinctD') + apply (frule(1) pspace_alignedD') + apply (erule(1) impE)+ + apply (clarsimp simp: obj_range'_def simp del: atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff usableUntypedRange.simps) + apply (erule disjoint_subset2[rotated]) + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp:p_assoc_help) + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + done + +lemma descendants_range_caps_no_overlapI': + "\invs' s; cte_wp_at' (\c. cteCap c = capability.UntypedCap + d (ptr && ~~ mask sz) sz idx) cref s; + descendants_range_in' {ptr .. (ptr && ~~ mask sz) + mask sz} cref (ctes_of s)\ + \ caps_no_overlap'' ptr sz s" + apply (frule invs_mdb') + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of + simp del:usableUntypedRange.simps untypedRange.simps) + apply (unfold caps_no_overlap''_def add_mask_fold) + apply (intro ballI impI) + apply (erule ranE) + apply (subgoal_tac "isUntypedCap (cteCap ctea)") + prefer 2 + apply (rule untypedRange_not_emptyD) + apply blast + apply (case_tac ctea,case_tac cte) + apply simp + apply (drule untyped_incD') + apply ((simp add:isCap_simps del:usableUntypedRange.simps untypedRange.simps)+)[4] + apply (elim conjE subset_splitE) + apply (erule subset_trans[OF _ psubset_imp_subset,rotated]) + apply (clarsimp simp:word_and_le2 add_mask_fold) + apply simp + apply (elim conjE) + apply (thin_tac "P\Q" for P Q)+ + apply (drule(2) descendants_range_inD') + apply (simp add:untypedCapRange)+ + apply (erule subset_trans[OF _ equalityD1,rotated]) + apply (clarsimp simp:word_and_le2 add_mask_fold) + apply (thin_tac "P\Q" for P Q)+ + apply (drule disjoint_subset[rotated, where A' = "{ptr..(ptr && ~~ mask sz) + mask sz}"]) + apply (clarsimp simp:word_and_le2 Int_ac add_mask_fold)+ + done + +lemma cte_wp_at_caps_no_overlapI': + "\invs' s; cte_wp_at' (\c. (cteCap c) = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s; + idx \ unat (ptr && mask sz); sz < word_bits\ + \ caps_no_overlap'' ptr sz s" + apply (frule invs_mdb') + apply (frule(1) le_mask_le_2p) + apply (clarsimp simp:valid_mdb'_def valid_mdb_ctes_def cte_wp_at_ctes_of) + apply (case_tac cte) + apply simp + apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (unfold caps_no_overlap''_def) + apply (intro ballI impI) + apply (erule ranE) + apply (subgoal_tac "isUntypedCap (cteCap ctea)") + prefer 2 + apply (rule untypedRange_not_emptyD) + apply blast + apply (case_tac ctea) + apply simp + apply (drule untyped_incD') + apply (simp add:isCap_simps)+ + apply (elim conjE) + apply (erule subset_splitE) + apply (erule subset_trans[OF _ psubset_imp_subset,rotated]) + apply (clarsimp simp: word_and_le2) + apply simp + apply (thin_tac "P\Q" for P Q)+ + apply (elim conjE) + apply (drule disjoint_subset2[rotated, where B' = "{ptr..(ptr && ~~ mask sz) + mask sz}"]) + apply clarsimp + apply (rule le_plus'[OF word_and_le2]) + apply simp + apply (erule word_of_nat_le) + apply (simp add: add_mask_fold) + apply (erule subset_trans[OF _ equalityD1,rotated]) + apply (clarsimp simp:word_and_le2) + apply (thin_tac "P\Q" for P Q)+ + apply (drule disjoint_subset[rotated, where A' = "{ptr..(ptr && ~~ mask sz) + 2 ^ sz - 1}"]) + apply (clarsimp simp:word_and_le2 Int_ac)+ + done + + +lemma descendants_range_ex_cte': + "\descendants_range_in' S p (ctes_of s'); ex_cte_cap_wp_to' P q s'; S \ capRange (cteCap cte); + invs' s'; ctes_of s' p = Some cte; isUntypedCap (cteCap cte)\ \ q \ S" + apply (frule invs_valid_objs') + apply (frule invs_mdb') + apply (clarsimp simp:invs'_def valid_state'_def) + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of) + apply (frule_tac cte = "cte" in valid_global_refsD') + apply simp + apply (case_tac "\irq. cteCap ctea = IRQHandlerCap irq") + apply clarsimp + apply (erule(1) in_empty_interE[OF _ _ subsetD,rotated -1]) + apply (clarsimp simp:global_refs'_def) + apply (erule_tac A = "range P" for P in subsetD) + apply (simp add:range_eqI field_simps) + apply (case_tac ctea) + apply clarsimp + apply (case_tac ctea) + apply (drule_tac cte = "cte" and cte' = ctea in untyped_mdbD') + apply assumption + apply (clarsimp simp:isCap_simps) + apply (drule_tac B = "untypedRange (cteCap cte)" in subsetD[rotated]) + apply (clarsimp simp:untypedCapRange) + apply clarsimp + apply (drule_tac x = " (irq_node' s')" in cte_refs_capRange[rotated]) + apply (erule(1) ctes_of_valid_cap') + apply blast + apply (clarsimp simp:isCap_simps) + apply (simp add:valid_mdb'_def valid_mdb_ctes_def) + apply (drule(2) descendants_range_inD') + apply clarsimp + apply (drule_tac x = " (irq_node' s')" in cte_refs_capRange[rotated]) + apply (erule(1) ctes_of_valid_cap') + apply blast + done + +lemma updateCap_isUntypedCap_corres: + "\is_untyped_cap cap; isUntypedCap cap'; cap_relation cap cap'\ + \ corres dc + (cte_wp_at (\c. is_untyped_cap c \ obj_ref_of c = obj_ref_of cap \ + cap_bits c = cap_bits cap \ cap_is_device c = cap_is_device cap) src and valid_objs and + pspace_aligned and pspace_distinct) + (cte_at' (cte_map src) and pspace_distinct' and pspace_aligned') + (set_cap cap src) (updateCap (cte_map src) cap')" + apply (rule corres_name_pre) + apply (simp add: updateCap_def) + apply (frule state_relation_pspace_relation) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule pspace_relation_cte_wp_atI) + apply (fastforce simp: cte_wp_at_ctes_of) + apply simp + apply clarify + apply (frule cte_map_inj_eq) + apply (fastforce simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state)+ + apply (clarsimp simp: is_cap_simps isCap_simps) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r) + apply (rule_tac F = "cteCap_update (\_. capability.UntypedCap dev r bits f) ctea + = cteCap_update (\cap. capFreeIndex_update (\_. f) (cteCap cte)) cte" + in corres_gen_asm2) + apply (rule_tac F = " (cap.UntypedCap dev r bits f) = free_index_update (\_. f) c" + in corres_gen_asm) + apply simp + apply (rule setCTE_UntypedCap_corres) + apply ((clarsimp simp: cte_wp_at_caps_of_state cte_wp_at_ctes_of)+)[3] + apply (subst identity_eq) + apply (wp getCTE_sp getCTE_get no_fail_getCTE)+ + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state)+ + done + +end + +lemma updateFreeIndex_corres: + "\is_untyped_cap cap; free_index_of cap = idx \ + \ corres dc + (cte_wp_at (\c. is_untyped_cap c \ obj_ref_of c = obj_ref_of cap \ + cap_bits c = cap_bits cap \ cap_is_device c = cap_is_device cap) src and valid_objs + and pspace_aligned and pspace_distinct) + (cte_at' (cte_map src) + and pspace_distinct' and pspace_aligned') + (set_cap cap src) (updateFreeIndex (cte_map src) idx)" + apply (rule corres_name_pre) + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_r_conj[where P'="cte_at' (cte_map src)"])+ + apply (rule_tac F="isUntypedCap capa + \ cap_relation cap (capFreeIndex_update (\_. idx) capa)" + in corres_gen_asm2) + apply (rule updateCap_isUntypedCap_corres, simp+) + apply (clarsimp simp: isCap_simps) + apply simp + apply (wp getSlotCap_wp)+ + apply (clarsimp simp: state_relation_def cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp no_fail_getSlotCap) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (wp getSlotCap_wp)+ + apply (clarsimp simp: state_relation_def cte_wp_at_ctes_of) + apply (rule no_fail_pre, wp no_fail_getSlotCap) + apply simp + apply clarsimp + apply (clarsimp simp: cte_wp_at_ctes_of cte_wp_at_caps_of_state) + apply (frule state_relation_pspace_relation) + apply (frule(1) pspace_relation_ctes_ofI[OF _ caps_of_state_cteD], simp+) + apply (clarsimp simp: isCap_simps is_cap_simps + cte_wp_at_caps_of_state free_index_of_def) + done + + +locale invokeUntyped_proofs = + fixes s cref reset ptr_base ptr tp us slots sz idx dev + assumes vui: "valid_untyped_inv_wcap' + (Invocations_H.Retype cref reset ptr_base ptr tp us slots dev) + (Some (UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" + and misc: "ct_active' s" "invs' s" + +begin + +lemma cte_wp_at': "cte_wp_at' (\cte. cteCap cte = capability.UntypedCap + dev (ptr && ~~ mask sz) sz idx) cref s" + and cover: "range_cover ptr sz (APIType_capBits tp us) (length (slots::machine_word list))" + and misc2: "distinct slots" + "slots \ []" + "\slot\set slots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s" + "\x\set slots. ex_cte_cap_wp_to' (\_. True) x s" + using vui + by (auto simp: cte_wp_at_ctes_of) + +interpretation Arch . (*FIXME: arch_split*) + +lemma idx_cases: + "((\ reset \ idx \ unat (ptr - (ptr && ~~ mask sz))) \ reset \ ptr = ptr && ~~ mask sz)" + using vui + by (clarsimp simp: cte_wp_at_ctes_of) + +lemma desc_range: + "reset \ descendants_range_in' (mask_range ptr sz) (cref) (ctes_of s)" + using vui by (clarsimp simp: empty_descendants_range_in') + +abbreviation(input) + "retype_range == {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1}" + +abbreviation(input) + "usable_range == {ptr..(ptr && ~~ mask sz) + mask sz}" + +lemma not_0_ptr[simp]: "ptr\ 0" + using misc cte_wp_at' + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (drule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) + apply (simp add: valid_cap'_def) + done + +lemmas range_cover_subset'' = range_cover_subset'[simplified add_mask_fold] + +lemma subset_stuff[simp]: + "retype_range \ usable_range" + apply (rule range_cover_subset''[OF cover]) + apply (simp add:misc2) + done + +lemma descendants_range[simp]: + "descendants_range_in' usable_range cref (ctes_of s)" + "descendants_range_in' retype_range cref (ctes_of s)" +proof - + have "descendants_range_in' usable_range cref (ctes_of s)" + using misc idx_cases cte_wp_at' cover + apply - + apply (erule disjE) + apply (erule cte_wp_at_caps_descendants_range_inI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply simp+ + using desc_range + apply simp + done + thus "descendants_range_in' usable_range cref (ctes_of s)" + by simp + thus "descendants_range_in' retype_range cref (ctes_of s)" + by (rule descendants_range_in_subseteq'[OF _ subset_stuff]) +qed + +lemma vc'[simp] : "s \' capability.UntypedCap dev (ptr && ~~ mask sz) sz idx" + using misc cte_wp_at' + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply (simp add: invs_valid_objs') + done + +lemma sz_limit[simp]: + "sz \ maxUntypedSizeBits" + using vc' unfolding valid_cap'_def by clarsimp + +lemma ps_no_overlap'[simp]: "\ reset \ pspace_no_overlap' ptr sz s" + using misc cte_wp_at' cover idx_cases + apply clarsimp + apply (erule cte_wp_at_pspace_no_overlapI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply (simp add: cte_wp_at_ctes_of) + apply simp+ + done + +lemma caps_no_overlap'[simp]: "caps_no_overlap'' ptr sz s" + using cte_wp_at' misc cover desc_range idx_cases + apply - + apply (erule disjE) + apply (erule cte_wp_at_caps_no_overlapI' + [OF _ _ _ range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def]]) + apply simp+ + apply (erule descendants_range_caps_no_overlapI') + apply simp+ + done + +lemma idx_compare'[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots)<< (APIType_capBits tp us))) \ 2 ^ sz" + apply (rule le_trans[OF unat_plus_gt]) + apply (simp add: range_cover.unat_of_nat_n_shift[OF cover] range_cover_unat) + apply (insert range_cover.range_cover_compare_bound[OF cover]) + apply simp + done + +lemma ex_cte_no_overlap': + "\P p. ex_cte_cap_wp_to' P p s \ p \ usable_range" + using cte_wp_at' misc + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule_tac cte = cte in descendants_range_ex_cte'[OF descendants_range(1)]) + apply (clarsimp simp: word_and_le2 isCap_simps add_mask_fold)+ + done + +lemma cref_inv: "cref \ usable_range" + apply (insert misc cte_wp_at') + apply (drule if_unsafe_then_capD') + apply (simp add: invs'_def valid_state'_def) + apply simp + apply (erule ex_cte_no_overlap') + done + +lemma slots_invD: + "\x. x \ set slots \ x \ cref \ x \ usable_range \ ex_cte_cap_wp_to' (\_. True) x s" + using misc cte_wp_at' vui + apply clarsimp + apply (drule(1) bspec)+ + apply (drule ex_cte_no_overlap') + apply simp + apply (clarsimp simp: cte_wp_at_ctes_of) + done + +lemma usableRange_disjoint: + "usableUntypedRange (capability.UntypedCap d (ptr && ~~ mask sz) sz + (unat ((ptr && mask sz) + of_nat (length slots) * 2 ^ APIType_capBits tp us))) \ + {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1} = {}" +proof - + have idx_compare''[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots) * (2::machine_word) ^ APIType_capBits tp us)) < 2 ^ sz + \ ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1 + < ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us" + apply (rule word_leq_le_minus_one,simp) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split) + apply (simp add: shiftl_t2n range_cover_unat[OF cover] field_simps) + apply (simp add: range_cover.sz(1)[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + show ?thesis + apply (clarsimp simp: mask_out_sub_mask) + apply (drule idx_compare'') + apply simp + done +qed + +lemma szw: "sz < word_bits" + using cte_wp_at_valid_objs_valid_cap'[OF cte_wp_at'] misc + by (clarsimp simp: valid_cap_simps' capAligned_def invs_valid_objs') + +lemma idx_le_new_offs: + "\ reset + \ idx \ unat ((ptr && mask sz) + (of_nat (length slots) * 2 ^ (APIType_capBits tp us)))" + using misc idx_cases range_cover.range_cover_base_le[OF cover] + apply (clarsimp simp only: simp_thms) + apply (erule order_trans) + apply (simp add: word_le_nat_alt[symmetric] + shiftl_t2n mult.commute) + done + +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" + by (simp add: valid_sched_def) + +crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) +crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) +crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) + +lemma deleteObjects_ksCurThread[wp]: + "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" + apply (simp add: deleteObjects_def3) + apply (wp | simp add: doMachineOp_def split_def)+ + done + +lemma deleteObjects_ct_active': + "\invs' and sch_act_simple and ct_active' + and cte_wp_at' (\c. cteCap c = UntypedCap d ptr sz idx) cref + and (\s. descendants_range' (UntypedCap d ptr sz idx) cref (ctes_of s)) + and K (sz < word_bits \ is_aligned ptr sz)\ + deleteObjects ptr sz + \\_. ct_active'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply wps + apply (wp deleteObjects_st_tcb_at') + apply (auto simp: ct_in_state'_def elim: pred_tcb'_weakenE) + done + +defs cNodeOverlap_def: + "cNodeOverlap \ \cns inRange. \p n. cns p = Some n \ (\ is_aligned p (cte_level_bits + n) + \ cte_level_bits + n \ word_bits + \ ({p .. p + 2 ^ (cte_level_bits + n) - 1} \ {p. inRange p} \ {}))" + +lemma cNodeNoOverlap: + notes Int_atLeastAtMost[simp del] + shows + "corres dc (\s. \cref. cte_wp_at (\cap. is_untyped_cap cap + \ Collect R \ usable_untyped_range cap) cref s + \ valid_objs s \ pspace_aligned s) + \ + (return x) (stateAssert (\s. \ cNodeOverlap (gsCNodes s) R) [])" + apply (simp add: stateAssert_def assert_def) + apply (rule corres_symb_exec_r[OF _ get_sp]) + apply (rule corres_req[rotated], subst if_P, assumption) + apply simp + apply (clarsimp simp: cNodeOverlap_def cte_wp_at_caps_of_state) + apply (frule(1) caps_of_state_valid_cap) + apply (frule usable_range_subseteq[rotated], simp add: valid_cap_def) + apply (clarsimp simp: valid_cap_def valid_untyped_def cap_table_at_gsCNodes_eq + obj_at_def is_cap_table is_cap_simps) + apply (frule(1) pspace_alignedD) + apply simp + apply (elim allE, drule(1) mp, simp add: obj_range_def valid_obj_def cap_aligned_def) + apply (erule is_aligned_get_word_bits[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp: is_aligned_no_overflow simp del: ) + apply blast + apply (simp add: is_aligned_no_overflow power_overflow word_bits_def + Int_atLeastAtMost) + apply wp+ + done + +lemma reset_ineq_eq_idx_0: + "\ idx \ 2 ^ sz; b \ sz; (ptr :: obj_ref) \ 0; is_aligned ptr sz; sz < word_bits \ + \ (ptr + of_nat idx - 1 < ptr) = (idx = 0)" + apply (cases "idx = 0") + apply (simp add: gt0_iff_gem1[symmetric] word_neq_0_conv) + apply simp + apply (subgoal_tac "ptr \ ptr + of_nat idx - 1", simp_all)[1] + apply (subst field_simps[symmetric], erule is_aligned_no_wrap') + apply (subst word_less_nat_alt) + apply simp + apply (subst unat_of_nat_minus_1) + apply (erule order_le_less_trans, rule power_strict_increasing) + apply (simp add: word_bits_def) + apply simp + apply (rule notI, simp) + apply (erule order_less_le_trans[rotated]) + apply simp + done + +lemma reset_addrs_same: + "\ idx \ 2 ^ sz; resetChunkBits \ sz; ptr \ 0; is_aligned ptr sz; sz < word_bits \ + \ [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] = + map (\i. getFreeRef ptr (i * 2 ^ resetChunkBits)) + [i\[0..<2 ^ (sz - resetChunkBits)]. i * 2 ^ resetChunkBits < idx]" + apply (simp add: upto_enum_step_def getFreeRef_def reset_ineq_eq_idx_0) + apply (clarsimp simp: upto_enum_word o_def unat_div simp del: upt.simps) + apply (subst unat_of_nat_minus_1) + apply (rule_tac y="2 ^ sz" in order_le_less_trans, simp) + apply (rule power_strict_increasing, simp_all add: word_bits_def)[1] + apply simp + apply (rule_tac f="map f" for f in arg_cong) + apply (rule filter_upt_eq[symmetric]) + apply clarsimp + apply (erule order_le_less_trans[rotated]) + apply simp + apply (rule notI) + apply (drule order_less_le_trans[where x="a * b" for a b], + rule_tac m="2 ^ resetChunkBits" and n=idx in alignUp_ge_nat) + apply simp+ + apply (simp add: field_simps) + apply (simp only: mult_Suc_right[symmetric]) + apply (subst(asm) div_add_self1[where 'a=nat, simplified, symmetric]) + apply simp + apply (simp only: field_simps) + apply simp + apply clarsimp + apply (rule order_le_less_trans, rule div_mult_le, simp) + apply (simp add: Suc_le_eq td_gal_lt[symmetric] power_add[symmetric]) + done + +lemmas descendants_of_null_filter' = null_filter_descendants_of'[OF null_filter_simp'] + +lemmas deleteObjects_descendants + = deleteObjects_null_filter[where P="\c. Q (descendants_of' p c)" for p Q, + simplified descendants_of_null_filter'] + +lemma updateFreeIndex_descendants_of2: + " \\s. cte_wp_at' (isUntypedCap o cteCap) ptr s \ + P (\y. descendants_of' y (ctes_of s))\ + updateFreeIndex ptr index + \\r s. P (\y. descendants_of' y (ctes_of s))\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateFreeIndex_descendants_of'[simplified swp_def descendants_of_null_filter'] + getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + done + +crunch typ_at'[wp]: updateFreeIndex "\s. P (typ_at' T p s)" + +lemma updateFreeIndex_cte_wp_at: + "\\s. cte_wp_at' (\c. P (cteCap_update (if p = slot + then capFreeIndex_update (\_. idx) else id) c)) p s\ + updateFreeIndex slot idx + \\rv. cte_wp_at' P p\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def split del: if_split) + apply (rule hoare_pre, wp updateCap_cte_wp_at' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (case_tac "the (ctes_of s p)") + apply (auto split: if_split_asm) + done + +lemma ex_tupI: + "P (fst x) (snd x) \ \a b. P a b" + by blast + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma resetUntypedCap_corres: + "untypinv_relation ui ui' + \ corres (dc \ dc) + (invs and valid_untyped_inv_wcap ui + (Some (cap.UntypedCap dev ptr sz idx)) + and ct_active and einvs + and (\_. \ptr_base ptr' ty us slots dev'. ui = Invocations_A.Retype slot True + ptr_base ptr' ty us slots dev)) + (invs' and valid_untyped_inv_wcap' ui' (Some (UntypedCap dev ptr sz idx)) and ct_active') + (reset_untyped_cap slot) + (resetUntypedCap (cte_map slot))" + apply (rule corres_gen_asm, clarsimp) + apply (simp add: reset_untyped_cap_def resetUntypedCap_def liftE_bindE cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getSlotCap_corres]) + apply simp + apply (rule_tac F="cap = cap.UntypedCap dev ptr sz idx \ (\s. s \ cap)" in corres_gen_asm) + apply (clarsimp simp: bits_of_def free_index_of_def unlessE_def + split del: if_split cong: if_cong) + apply (rule corres_if[OF refl]) + apply (rule corres_returnOk[where P=\ and P'=\], simp) + apply (rule corres_split[OF deleteObjects_corres]) + apply (clarsimp simp add: valid_cap_def cap_aligned_def) + apply (clarsimp simp add: valid_cap_def cap_aligned_def untyped_min_bits_def) + apply (rule corres_if) + apply simp + apply (simp add: bits_of_def shiftL_nat) + apply (rule corres_split_nor) + apply (simp add: unless_def) + apply (rule corres_when, simp) + apply (rule corres_machine_op) + apply (rule corres_Id, simp, simp, wp) + apply (rule updateFreeIndex_corres, simp) + apply (simp add: free_index_of_def) + apply (wp | simp only: unless_def)+ + apply (rule_tac F="sz < word_bits \ idx \ 2 ^ sz + \ ptr \ 0 \ is_aligned ptr sz + \ resetChunkBits \ sz" in corres_gen_asm) + apply (simp add: bits_of_def free_index_of_def mapME_x_map_simp liftE_bindE + reset_addrs_same[where ptr=ptr and idx=idx and sz=sz] + o_def rev_map + del: capFreeIndex_update.simps) + apply (rule_tac P="\x. valid_objs and pspace_aligned and pspace_distinct + and pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} + and cte_wp_at (\a. is_untyped_cap a \ obj_ref_of a = ptr \ cap_bits a = sz + \ cap_is_device a = dev) slot" + and P'="\_. valid_pspace' and (\s. descendants_of' (cte_map slot) (ctes_of s) = {}) + and pspace_no_overlap' ptr sz + and cte_wp_at' (\cte. \idx. cteCap cte = UntypedCap dev ptr sz idx) (cte_map slot)" + in mapME_x_corres_same_xs) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_machine_op) + apply (rule corres_Id) + apply (simp add: shiftL_nat getFreeRef_def shiftl_t2n mult.commute) + apply simp + apply wp + apply (rule corres_split_nor[OF updateFreeIndex_corres]) + apply simp + apply (simp add: getFreeRef_def getFreeIndex_def free_index_of_def) + apply clarify + apply (subst unat_mult_simple) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply (subst unat_p2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply simp + apply (rule preemptionPoint_corres) + apply wp+ + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (clarsimp simp: getFreeRef_def valid_pspace'_def cte_wp_at_ctes_of + valid_cap_def cap_aligned_def) + apply (erule aligned_add_aligned) + apply (rule is_aligned_weaken) + apply (rule is_aligned_mult_triv2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (simp add: untyped_min_bits_def) + apply (rule hoare_pre) + apply simp + apply (strengthen imp_consequent) + apply (wp preemption_point_inv set_cap_cte_wp_at update_untyped_cap_valid_objs + set_cap_no_overlap | simp)+ + apply (clarsimp simp: exI cte_wp_at_caps_of_state) + apply (drule caps_of_state_valid_cap, simp+) + apply (clarsimp simp: is_cap_simps valid_cap_simps + cap_aligned_def + valid_untyped_pspace_no_overlap) + apply (rule hoare_pre) + apply (simp del: capFreeIndex_update.simps) + apply (strengthen imp_consequent) + apply (wp updateFreeIndex_valid_pspace_no_overlap' + updateFreeIndex_descendants_of2 + doMachineOp_psp_no_overlap + updateFreeIndex_cte_wp_at + pspace_no_overlap'_lift + preemptionPoint_inv + hoare_vcg_ex_lift + | simp)+ + apply (clarsimp simp add: cte_wp_at_ctes_of exI isCap_simps valid_pspace'_def) + apply (clarsimp simp: getFreeIndex_def getFreeRef_def) + apply (subst is_aligned_weaken[OF is_aligned_mult_triv2]) + apply (simp add: Kernel_Config.resetChunkBits_def minUntypedSizeBits_def) + apply (subst unat_mult_simple) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply (subst unat_p2) + apply (simp add: Kernel_Config.resetChunkBits_def) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (subst unat_of_nat_eq) + apply (rule order_less_trans[rotated], + rule_tac n=sz in power_strict_increasing; simp add: word_bits_def) + apply (erule order_less_le_trans; simp) + apply simp + apply simp + apply (simp add: if_apply_def2) + apply (strengthen invs_valid_objs invs_psp_aligned invs_distinct) + apply (wp hoare_vcg_const_imp_lift) + apply (simp add: if_apply_def2) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace') + apply (wp hoare_vcg_const_imp_lift deleteObjects_cte_wp_at'[where p="cte_map slot"] + deleteObjects_invs'[where p="cte_map slot"] + deleteObjects_descendants[where p="cte_map slot"] + | simp)+ + apply (wp get_cap_wp getCTE_wp' | simp add: getSlotCap_def)+ + apply (clarsimp simp: cte_wp_at_caps_of_state descendants_range_def2) + apply (cases slot) + apply (strengthen empty_descendants_range_in + ex_tupI[where x=slot])+ + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def) + apply (frule(1) caps_of_state_valid) + apply (frule if_unsafe_then_capD[OF caps_of_state_cteD], clarsimp+) + apply (drule(1) ex_cte_cap_protects[OF _ caps_of_state_cteD + empty_descendants_range_in _ order_refl]; clarsimp) + apply (intro conjI impI; auto)[1] + apply (clarsimp simp: cte_wp_at_ctes_of descendants_range'_def2 + empty_descendants_range_in') + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def is_aligned_weaken untypedBits_defs) + apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (frule(1) descendants_range_ex_cte'[OF empty_descendants_range_in' _ order_refl], + (simp add: isCap_simps add_mask_fold)+) + by (intro conjI impI; clarsimp) + +end + +lemma deleteObjects_ex_cte_cap_wp_to': + "\invs' and ex_cte_cap_wp_to' P slot and (\s. descendants_of' p (ctes_of s) = {}) + and cte_wp_at' (\cte. \idx d. cteCap cte = UntypedCap d ptr sz idx) p\ + deleteObjects ptr sz + \\rv. ex_cte_cap_wp_to' P slot\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule hoare_pre) + apply (simp add: ex_cte_cap_wp_to'_def) + apply wps + apply (wp hoare_vcg_ex_lift) + apply (rule_tac idx=idx in deleteObjects_cte_wp_at') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: ex_cte_cap_wp_to'_def + cte_wp_at_ctes_of) + apply (rule_tac x=cref in exI, simp) + apply (frule_tac p=cref in if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (frule descendants_range_ex_cte'[rotated, OF _ order_refl, where p=p], + (simp add: isCap_simps empty_descendants_range_in')+) + apply (auto simp: add_mask_fold) + done + +lemma updateCap_cte_cap_wp_to': + "\\s. cte_wp_at' (\cte. p' \ cte_refs' (cteCap cte) (irq_node' s) \ P (cteCap cte) + \ p' \ cte_refs' cap (irq_node' s) \ P cap) p s + \ ex_cte_cap_wp_to' P p' s\ + updateCap p cap + \\rv. ex_cte_cap_wp_to' P p'\" + apply (simp add: ex_cte_cap_wp_to'_def cte_wp_at_ctes_of updateCap_def) + apply (rule hoare_pre, (wp getCTE_wp | wps)+) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (rule_tac x=cref in exI) + apply auto + done + +crunch ct_in_state'[wp]: doMachineOp "ct_in_state' P" + (simp: crunch_simps ct_in_state'_def) + +crunch st_tcb_at'[wp]: doMachineOp "st_tcb_at' P p" + (simp: crunch_simps ct_in_state'_def) + +lemma ex_cte_cap_wp_to_irq_state_independent_H[simp]: + "irq_state_independent_H (ex_cte_cap_wp_to' P slot)" + by (simp add: ex_cte_cap_wp_to'_def) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma updateFreeIndex_ctes_of: + "\\s. P (modify_map (ctes_of s) ptr (cteCap_update (capFreeIndex_update (\_. idx))))\ + updateFreeIndex ptr idx + \\r s. P (ctes_of s)\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCap_ctes_of_wp getCTE_wp' | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule rsubst[where P=P]) + apply (case_tac cte) + apply (clarsimp simp: modify_map_def fun_eq_iff) + done + +lemma updateFreeIndex_cte_cap_wp_to'[wp]: + "\\s. cte_wp_at' (isUntypedCap o cteCap) p s + \ ex_cte_cap_wp_to' P p' s\ + updateFreeIndex p idx + \\rv. ex_cte_cap_wp_to' P p'\" + apply (simp add: updateFreeIndex_def updateTrackedFreeIndex_def getSlotCap_def) + apply (wp updateCap_cte_cap_wp_to' getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (clarsimp simp: isCap_simps ex_cte_cap_wp_to'_def split: option.split) + done + +lemma setCTE_ct_in_state: + "\ct_in_state' P\ setCTE p cte \\rv. ct_in_state' P\" + apply (rule hoare_name_pre_state) + apply (rule hoare_pre, wp ct_in_state'_decomp setCTE_pred_tcb_at') + apply (auto simp: ct_in_state'_def) + done + +crunch ct_in_state[wp]: updateFreeIndex "ct_in_state' P" +crunch nosch[wp]: updateFreeIndex "\s. P (ksSchedulerAction s)" + +lemma resetUntypedCap_invs_etc: + "\invs' and valid_untyped_inv_wcap' ui + (Some (UntypedCap dev ptr sz idx)) + and ct_active' + and K (\ptr_base ptr' ty us slots. ui = Retype slot True ptr_base ptr' ty us slots dev)\ + resetUntypedCap slot + \\_. invs' and valid_untyped_inv_wcap' ui (Some (UntypedCap dev ptr sz 0)) + and ct_active' + and pspace_no_overlap' ptr sz\, \\_. invs'\" + (is "\invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) and ct_active' and ?asm\ + ?f \\_. invs' and ?vu2 and ct_active' and ?psp\, \\_. invs'\") + apply (simp add: resetUntypedCap_def getSlotCap_def + liftE_bind_return_bindE_returnOk bindE_assoc) + apply (rule hoare_vcg_seqE[rotated]) + apply simp + apply (rule getCTE_sp) + apply (rule hoare_name_pre_stateE) + apply (clarsimp split del: if_split) + apply (subgoal_tac "capAligned ?cap") + prefer 2 + apply (frule cte_wp_at_valid_objs_valid_cap', clarsimp+) + apply (clarsimp simp: cte_wp_at_ctes_of capAligned_def valid_cap_simps') + apply (cases "idx = 0") + apply (clarsimp simp: cte_wp_at_ctes_of unlessE_def split del: if_split) + apply wp + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (rule cte_wp_at_pspace_no_overlapI'[where cref=slot], + (simp_all add: cte_wp_at_ctes_of)+)[1] + apply (clarsimp simp: unlessE_def cte_wp_at_ctes_of + split del: if_split) + apply (rule_tac B="\_. invs' and valid_untyped_inv_wcap' ?ui (Some ?cap) + and ct_active' and ?psp" in hoare_vcg_seqE[rotated]) + apply clarsimp + apply (rule hoare_pre) + apply (simp add: sch_act_simple_def) + apply (wps ) + apply (wp deleteObject_no_overlap[where idx=idx] + deleteObjects_invs'[where idx=idx and p=slot] + hoare_vcg_ex_lift hoare_vcg_const_Ball_lift + deleteObjects_cte_wp_at'[where idx=idx] + deleteObjects_descendants[where p=slot] + deleteObjects_nosch + deleteObjects_ct_active'[where idx=idx and cref=slot] + deleteObjects_ex_cte_cap_wp_to'[where p=slot]) + apply (clarsimp simp: cte_wp_at_ctes_of descendants_range'_def2 + empty_descendants_range_in' + capAligned_def sch_act_simple_def) + apply (strengthen refl) + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (frule if_unsafe_then_capD'[OF ctes_of_cte_wpD], clarsimp+) + apply (erule rev_mp[where P="Ball S f" for S f] + rev_mp[where P="ex_cte_cap_wp_to' P p s" for P p s])+ + apply (strengthen descendants_range_ex_cte'[rotated, OF _ order_refl, mk_strg D _ E]) + apply (clarsimp simp: isCap_simps empty_descendants_range_in' add_mask_fold) + apply auto[1] + apply (cases "dev \ sz < resetChunkBits") + apply (simp add: pred_conj_def unless_def) + apply (rule hoare_pre) + apply (strengthen exI[where x=sz]) + apply (wp updateFreeIndex_clear_invs' + hoare_vcg_ex_lift + hoare_vcg_const_Ball_lift + updateFreeIndex_descendants_of2 + sch_act_simple_lift + pspace_no_overlap'_lift + doMachineOp_psp_no_overlap + updateFreeIndex_ctes_of + updateFreeIndex_cte_wp_at + | simp | wps | wp (once) ex_cte_cap_to'_pres)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + modify_map_def) + apply auto[1] + apply simp + apply (rule hoare_pre, rule hoare_post_impErr, + rule_tac P="\i. invs' and ?psp and ct_active' and valid_untyped_inv_wcap' ?ui + (Some (UntypedCap dev ptr sz (if i = 0 then idx + else (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - i) * 2 ^ resetChunkBits)))" + and E="\_. invs'" + in mapME_x_validE_nth) + apply (rule hoare_pre) + apply simp + apply (wp preemptionPoint_invs + updateFreeIndex_clear_invs' + hoare_vcg_ex_lift + updateFreeIndex_descendants_of2 + updateFreeIndex_ctes_of + updateFreeIndex_cte_wp_at + doMachineOp_psp_no_overlap + hoare_vcg_ex_lift hoare_vcg_const_Ball_lift + pspace_no_overlap'_lift[OF preemptionPoint_inv] + pspace_no_overlap'_lift + updateFreeIndex_ct_in_state[unfolded ct_in_state'_def] + | strengthen invs_pspace_aligned' invs_pspace_distinct' + | simp add: ct_in_state'_def + sch_act_simple_def + | rule hoare_vcg_conj_lift_R + | wp (once) preemptionPoint_inv + | wps + | wp (once) ex_cte_cap_to'_pres)+ + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps + conj_comms) + apply (subgoal_tac "getFreeIndex ptr + (rev [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] ! i) + = (length [ptr , ptr + 2 ^ resetChunkBits .e. getFreeRef ptr idx - 1] - Suc i) * + 2 ^ resetChunkBits") + apply clarsimp + apply (frule ctes_of_valid[OF ctes_of_cte_wpD], clarsimp+) + apply (subgoal_tac "resetChunkBits < word_bits \ sz < word_bits") + apply (strengthen is_aligned_weaken[OF is_aligned_mult_triv2]) + apply (subst nat_less_power_trans2[THEN order_less_imp_le]) + apply (clarsimp simp add: upto_enum_step_def getFreeRef_def) + apply (rule less_imp_diff_less) + apply (simp add: unat_div td_gal_lt[symmetric] power_add[symmetric]) + apply (cases "idx = 0") + apply (simp add: gt0_iff_gem1[symmetric, folded word_neq_0_conv]) + apply (simp add: valid_cap_simps') + apply (subst unat_minus_one) + apply (clarsimp simp: valid_cap_simps') + apply (drule of_nat64_0) + apply (erule order_le_less_trans, simp) + apply simp + apply (clarsimp simp: unat_of_nat valid_cap_simps') + apply (erule order_less_le_trans[rotated], simp) + apply simp + apply (auto simp: Kernel_Config.resetChunkBits_def minUntypedSizeBits_def)[1] + apply (simp add: valid_cap_simps' Kernel_Config.resetChunkBits_def capAligned_def) + apply (simp add: nth_rev) + apply (simp add: upto_enum_step_def upto_enum_word getFreeIndex_def + getFreeRef_def + del: upt.simps) + apply (intro conjI impI, simp_all)[1] + apply (subgoal_tac "resetChunkBits < word_bits") + apply (rule word_unat.Abs_eqD[OF _ word_unat.Rep]) + apply (simp add: word_of_nat_plus Abs_fnat_hom_mult[symmetric]) + apply (simp only: unats_def word_bits_def[symmetric]) + apply (clarsimp simp: unat_div nat_mult_power_less_eq) + apply (rule less_imp_diff_less) + apply (simp add: td_gal_lt[symmetric] power_add[symmetric]) + apply (simp only: unat_lt2p word_bits_def) + apply (simp add: Kernel_Config.resetChunkBits_def word_bits_def) + apply (clarsimp simp: cte_wp_at_ctes_of getFreeRef_def + upto_enum_step_def upto_enum_word) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (simp add: reset_ineq_eq_idx_0) + apply simp + apply clarsimp + done + +end + +lemma whenE_reset_resetUntypedCap_invs_etc: + "\invs' and valid_untyped_inv_wcap' ui + (Some (UntypedCap dev ptr sz idx)) + and ct_active' + and K (\ptr_base ty us slots. ui = Retype slot reset ptr_base ptr' ty us slots dev)\ + whenE reset (resetUntypedCap slot) + \\_. invs' and valid_untyped_inv_wcap' ui (Some (UntypedCap dev ptr sz (if reset then 0 else idx))) + and ct_active' + and pspace_no_overlap' (if reset then ptr else ptr') sz\, \\_. invs'\" + apply (rule hoare_pre) + apply (wp whenE_wp resetUntypedCap_invs_etc[where idx=idx, + simplified pred_conj_def conj_assoc] + | simp)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def) + apply (drule_tac cref=slot in cte_wp_at_pspace_no_overlapI', + simp add: cte_wp_at_ctes_of, simp+) + done + +crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" + +end + +lemma (in range_cover) funky_aligned: + "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" + apply (rule aligned_add_aligned) + apply (rule is_aligned_andI1) + apply (rule aligned) + apply (rule is_aligned_mult_triv2) + apply simp + done + +defs canonicalAddressAssert_def: + "canonicalAddressAssert p \ True" (* FIXME AARCH64: (for CRefine) + might need this to be either canonical_user or something in the kernel region -- + both are liftable from AInvs via valid_vspace_uses, but + valid_vspace_uses will first have to be added to the state relation *) + +context begin interpretation Arch . (*FIXME: arch_split*) + +lemma inv_untyped_corres': + "\ untypinv_relation ui ui' \ \ + corres (dc \ (=)) + (einvs and valid_untyped_inv ui and ct_active) + (invs' and valid_untyped_inv' ui' and ct_active') + (invoke_untyped ui) (invokeUntyped ui')" + apply (cases ui) + apply (rule corres_name_pre) + apply (clarsimp simp only: valid_untyped_inv_wcap + valid_untyped_inv_wcap' + Invocations_A.untyped_invocation.simps + Invocations_H.untyped_invocation.simps + untypinv_relation.simps) + apply (rename_tac cref oref reset ptr ptr' dc us slots dev s s' ao' sz sz' idx idx') + proof - + fix cref reset ptr ptr_base us slots dev ao' sz sz' idx idx' s s' + + let ?ui = "Invocations_A.Retype cref reset ptr_base ptr (APIType_map2 (Inr ao')) us slots dev" + let ?ui' = "Invocations_H.untyped_invocation.Retype + (cte_map cref) reset ptr_base ptr ao' us (map cte_map slots) dev" + + assume invs: "invs (s :: det_state)" "ct_active s" "valid_list s" "valid_sched s" + and invs': "invs' s'" "ct_active' s'" + and sr: "(s, s') \ state_relation" + and vui: "valid_untyped_inv_wcap ?ui (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz idx)) s" + (is "valid_untyped_inv_wcap _ (Some ?cap) s") + and vui': "valid_untyped_inv_wcap' ?ui' (Some (UntypedCap dev (ptr && ~~ mask sz') sz' idx')) s'" + assume ui: "ui = ?ui" and ui': "ui' = ?ui'" + + have cte_at: "cte_wp_at ((=) ?cap) cref s" (is "?cte_cond s") + using vui by (simp add:cte_wp_at_caps_of_state) + + have ptr_sz_simp[simp]: "ptr_base = ptr && ~~ mask sz + \ sz' = sz \ idx' = idx \ 2 \ sz" + using cte_at vui vui' sr invs + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (drule pspace_relation_cte_wp_atI'[OF state_relation_pspace_relation]) + apply (simp add:cte_wp_at_ctes_of) + apply (simp add:invs_valid_objs) + apply (clarsimp simp:is_cap_simps isCap_simps) + apply (frule cte_map_inj_eq) + apply ((erule cte_wp_at_weakenE | simp + | clarsimp simp: cte_wp_at_caps_of_state)+)[5] + apply (clarsimp simp:cte_wp_at_caps_of_state cte_wp_at_ctes_of) + apply (drule caps_of_state_valid_cap,fastforce) + apply (clarsimp simp:valid_cap_def untyped_min_bits_def) + done + + have obj_bits_low_bound[simp]: + "minUntypedSizeBits \ obj_bits_api (APIType_map2 (Inr ao')) us" + using vui + apply clarsimp + apply (cases ao') + apply (simp_all add: obj_bits_api_def slot_bits_def arch_kobj_size_def default_arch_object_def + APIType_map2_def bit_simps untyped_min_bits_def minUntypedSizeBits_def + split: apiobject_type.splits) + done + + have cover: "range_cover ptr sz + (obj_bits_api (APIType_map2 (Inr ao')) us) (length slots)" + and vslot: "slots\ []" + using vui + by (auto simp: cte_wp_at_caps_of_state) + + have misc'[simp]: + "distinct (map cte_map slots)" + using vui' + by (auto simp: cte_wp_at_ctes_of) + + have intvl_eq[simp]: + "ptr && ~~ mask sz = ptr \ {ptr + of_nat k |k. k < 2 ^ sz} = {ptr..ptr + 2 ^ sz - 1}" + using cover + apply (subgoal_tac "is_aligned (ptr &&~~ mask sz) sz") + apply (rule intvl_range_conv) + apply (simp) + apply (drule range_cover.sz) + apply simp + apply (rule is_aligned_neg_mask,simp) + done + + have delete_objects_rewrite: + "ptr && ~~ mask sz = ptr \ delete_objects ptr sz = + do y \ modify (clear_um {ptr + of_nat k |k. k < 2 ^ sz}); + modify (detype {ptr && ~~ mask sz..ptr + 2 ^ sz - 1}) + od" + using cover + apply (clarsimp simp:delete_objects_def freeMemory_def word_size_def) + apply (subgoal_tac "is_aligned (ptr &&~~ mask sz) sz") + apply (subst mapM_storeWord_clear_um[simplified word_size_def word_size_bits_def]; + clarsimp simp: range_cover_def word_bits_def) + apply (drule_tac z=sz in order_trans[OF obj_bits_low_bound]; + simp add: minUntypedSizeBits_def) + apply (rule is_aligned_neg_mask) + apply simp + done + + have of_nat_length: "(of_nat (length slots)::machine_word) - (1::machine_word) < (of_nat (length slots)::machine_word)" + using vslot + using range_cover.range_cover_le_n_less(1)[OF cover,where p = "length slots"] + apply - + apply (case_tac slots) + apply clarsimp+ + apply (subst add.commute) + apply (subst word_le_make_less[symmetric]) + apply (rule less_imp_neq) + apply (simp add:word_bits_def minus_one_norm) + apply (rule word_of_nat_less) + apply auto + done + + have not_0_ptr[simp]: "ptr\ 0" + using cte_at invs + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (drule(1) caps_of_state_valid)+ + apply (simp add:valid_cap_def) + done + + have size_eq[simp]: "APIType_capBits ao' us = obj_bits_api (APIType_map2 (Inr ao')) us" + apply (case_tac ao') + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type) + apply (clarsimp simp: APIType_capBits_def objBits_simps' arch_kobj_size_def default_arch_object_def + obj_bits_api_def APIType_map2_def slot_bits_def pageBitsForSize_def bit_simps)+ + done + + have non_reset_idx_le[simp]: "\ reset \ idx < 2^sz" + using vui + apply (clarsimp simp: cte_wp_at_caps_of_state ) + apply (erule le_less_trans) + apply (rule unat_less_helper) + apply simp + apply (rule and_mask_less') + using cover + apply (clarsimp simp:range_cover_def) + done + + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex usableUntypedRange.simps + + have vc'[simp] : "s' \' capability.UntypedCap dev (ptr && ~~ mask sz) sz idx" + using vui' invs' + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (case_tac cte) + apply clarsimp + apply (erule ctes_of_valid_cap') + apply (simp add:invs_valid_objs') + done + + have nidx[simp]: "ptr + (of_nat (length slots) * 2^obj_bits_api (APIType_map2 (Inr ao')) us) - (ptr && ~~ mask sz) + = (ptr && mask sz) + (of_nat (length slots) * 2^obj_bits_api (APIType_map2 (Inr ao')) us)" + apply (subst word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr]) + apply simp + done + + have idx_compare'[simp]:"unat ((ptr && mask sz) + (of_nat (length slots)<< obj_bits_api (APIType_map2 (Inr ao')) us)) \ 2 ^ sz" + apply (rule le_trans[OF unat_plus_gt]) + apply (simp add:range_cover.unat_of_nat_n_shift[OF cover] range_cover_unat) + apply (insert range_cover.range_cover_compare_bound[OF cover]) + apply simp + done + + have idx_compare''[simp]: + "unat ((ptr && mask sz) + (of_nat (length slots) * (2::machine_word) ^ obj_bits_api (APIType_map2 (Inr ao')) us)) < 2 ^ sz + \ ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1 + < ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us" + apply (rule word_leq_le_minus_one,simp) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split) + apply (simp add:shiftl_t2n range_cover_unat[OF cover] field_simps) + apply (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + + note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] + + have idx_compare'''[simp]: + "\unat (of_nat (length slots) * (2::machine_word) ^ obj_bits_api (APIType_map2 (Inr ao')) us) < 2 ^ sz; + ptr && ~~ mask sz = ptr\ + \ ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us - 1 + < ptr + of_nat (length slots) * 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us " + apply (rule word_leq_le_minus_one,simp) + apply (simp add:is_aligned_neg_mask_eq'[symmetric]) + apply (rule neq_0_no_wrap) + apply (rule machine_word_plus_mono_right_split[where sz = sz]) + apply (simp add:is_aligned_mask)+ + apply (simp add:range_cover.sz[where 'a=machine_word_len, folded word_bits_def, OF cover])+ + done + + have maxDomain:"ksCurDomain s' \ maxDomain" + using invs' + by (simp add:invs'_def valid_state'_def) + + have sz_mask_less: + "unat (ptr && mask sz) < 2 ^ sz" + using range_cover.sz[OF cover] + by (simp add: unat_less_helper and_mask_less_size word_size) + + have overlap_ranges1: + "{x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us + * of_nat (length slots) - 1} \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}" + apply (rule order_trans[rotated]) + apply (rule range_cover_subset'[OF cover], simp add: vslot) + apply (clarsimp simp: atLeastAtMost_iff field_simps) + done + + have overlap_ranges2: + "idx \ unat (ptr && mask sz) + \ {x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us + * of_nat (length slots) - 1} \ {(ptr && ~~ mask sz) + of_nat idx..(ptr && ~~ mask sz) + 2 ^ sz - 1}" + apply (rule order_trans[OF overlap_ranges1]) + apply (clarsimp simp add: atLeastatMost_subset_iff) + apply (rule order_trans, rule word_plus_mono_right) + apply (erule word_of_nat_le) + apply (simp add: add.commute word_plus_and_or_coroll2 word_and_le2) + apply (simp add: add.commute word_plus_and_or_coroll2) + done + + have overlap_ranges: + "{x. ptr \ x \ x \ ptr + 2 ^ obj_bits_api (APIType_map2 (Inr ao')) us * of_nat (length slots) - 1} + \ usable_untyped_range (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))" + apply (cases reset, simp_all add: usable_untyped_range.simps) + apply (rule order_trans, rule overlap_ranges1) + apply (simp add: blah word_and_le2) + apply (rule overlap_ranges2) + apply (cut_tac vui) + apply (clarsimp simp: cte_wp_at_caps_of_state) + done + + have sz_limit[simp]: "sz \ maxUntypedSizeBits" + using vc' unfolding valid_cap'_def by clarsimp + + note set_cap_free_index_invs_spec = set_free_index_invs[where cap = "cap.UntypedCap + dev (ptr && ~~ mask sz) sz (if reset then 0 else idx)" + ,unfolded free_index_update_def free_index_of_def,simplified] + + note msimp[simp add] = neg_mask_add_mask + note if_split[split del] + show " corres (dc \ (=)) ((=) s) ((=) s') + (invoke_untyped ?ui) + (invokeUntyped ?ui')" + apply (clarsimp simp:invokeUntyped_def invoke_untyped_def getSlotCap_def bind_assoc) + apply (insert cover) + apply (rule corres_guard_imp) + apply (rule corres_split_norE) + apply (rule corres_whenE, simp) + apply (rule resetUntypedCap_corres[where ui=ui and ui'=ui']) + apply (simp add: ui ui') + apply simp + apply simp + apply (rule corres_symb_exec_l_Ex) + apply (rule_tac F = "cap = cap.UntypedCap dev (ptr && ~~ mask sz) + sz (if reset then 0 else idx)" in corres_gen_asm) + apply (rule corres_add_noop_lhs) + apply (rule corres_split_nor[OF cNodeNoOverlap _ return_wp stateAssert_wp]) + apply (clarsimp simp: canonicalAddressAssert_def) + apply (rule corres_split[OF updateFreeIndex_corres]) + apply (simp add:isCap_simps)+ + apply (clarsimp simp:getFreeIndex_def bits_of_def shiftL_nat shiftl_t2n + free_index_of_def) + apply (insert range_cover.range_cover_n_less[OF cover] vslot) + apply (rule createNewObjects_corres_helper) + apply simp+ + apply (simp add: insertNewCaps_def) + apply (rule corres_split_retype_createNewCaps[where sz = sz,OF corres_rel_imp]) + apply (rule inv_untyped_corres_helper1) + apply simp + apply simp + apply ((wp retype_region_invs_extras[where sz = sz] + retype_region_plain_invs [where sz = sz] + retype_region_descendants_range_ret[where sz = sz] + retype_region_caps_overlap_reserved_ret[where sz = sz] + retype_region_cte_at_other[where sz = sz] + retype_region_distinct_sets[where sz = sz] + retype_region_ranges[where p=cref and sz = sz] + retype_ret_valid_caps [where sz = sz] + retype_region_arch_objs [where sza = "\_. sz"] + hoare_vcg_const_Ball_lift + set_tuple_pick distinct_tuple_helper + retype_region_obj_at_other3[where sz = sz] + | assumption)+)[1] + apply (wp set_tuple_pick createNewCaps_cte_wp_at'[where sz= sz] + hoare_vcg_ex_lift distinct_tuple_helper + createNewCaps_parent_helper [where p="cte_map cref" and sz = sz] + createNewCaps_valid_pspace_extras [where ptr=ptr and sz = sz] + createNewCaps_ranges'[where sz = sz] + hoare_vcg_const_Ball_lift createNewCaps_valid_cap'[where sz = sz] + createNewCaps_descendants_range_ret'[where sz = sz] + createNewCaps_caps_overlap_reserved_ret'[where sz = sz]) + apply clarsimp + apply (erule cte_wp_at_weakenE') + apply (case_tac c, simp) + apply hypsubst + apply (case_tac c,clarsimp simp:isCap_simps) + apply (clarsimp simp: getFreeIndex_def is_cap_simps bits_of_def shiftL_nat) + apply (clarsimp simp:conj_comms) + apply (strengthen invs_mdb invs_valid_objs + invs_valid_pspace invs_arch_state invs_psp_aligned + caps_region_kernel_window_imp[where p=cref] + invs_cap_refs_in_kernel_window)+ + apply (clarsimp simp:conj_comms bits_of_def) + apply (wp set_cap_free_index_invs_spec set_cap_caps_no_overlap set_cap_no_overlap) + apply (rule hoare_vcg_conj_lift) + apply (rule hoare_strengthen_post[OF set_cap_sets]) + apply (clarsimp simp:cte_wp_at_caps_of_state) + apply (wp set_cap_no_overlap hoare_vcg_ball_lift + set_cap_free_index_invs_spec + set_cap_descendants_range_in + set_untyped_cap_caps_overlap_reserved[where + idx="if reset then 0 else idx"] + set_cap_cte_wp_at + | strengthen exI[where x=cref])+ + apply (clarsimp simp:conj_comms ball_conj_distrib simp del:capFreeIndex_update.simps) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace' invs_arch_state' + imp_consequent[where Q = "(\x. x \ cte_map ` set slots)"] + | clarsimp simp: conj_comms simp del: capFreeIndex_update.simps)+ + apply ((wp updateFreeIndex_forward_invs' updateFreeIndex_caps_overlap_reserved + updateFreeIndex_caps_no_overlap'' updateFreeIndex_pspace_no_overlap' + hoare_vcg_const_Ball_lift updateFreeIndex_cte_wp_at + updateFreeIndex_descendants_range_in')+)[1] + apply clarsimp + apply (clarsimp simp:conj_comms) + apply (strengthen invs_mdb invs_valid_objs + invs_valid_pspace invs_arch_state invs_psp_aligned + invs_distinct) + apply (clarsimp simp:conj_comms ball_conj_distrib ex_in_conv) + apply ((rule validE_R_validE)?, + rule_tac Q'="\_ s. valid_etcbs s \ valid_list s \ invs s \ ct_active s + \ valid_untyped_inv_wcap ui + (Some (cap.UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap {ptr && ~~ mask sz..(ptr && ~~ mask sz) + 2 ^ sz - 1} s) + " in hoare_post_imp_R) + apply (simp add: whenE_def, wp) + apply (rule validE_validE_R, rule hoare_post_impErr, rule reset_untyped_cap_invs_etc, auto)[1] + apply wp + apply (clarsimp simp: ui cte_wp_at_caps_of_state + bits_of_def untyped_range.simps) + apply (frule(1) valid_global_refsD2[OF _ invs_valid_global_refs]) + apply (cut_tac cref="cref" and reset=reset + in invoke_untyped_proofs.intro, + simp_all add: cte_wp_at_caps_of_state)[1] + apply (rule conjI, (assumption | rule refl))+ + apply (simp split: if_split) + + apply (simp add: invoke_untyped_proofs.simps) + apply (strengthen if_split[where P="\v. v \ unat x" for x, THEN iffD2] + exI[where x=cref]) + apply (simp add: arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps invoke_untyped_proofs.idx_le_new_offs + if_split[where P="\v. v \ unat x" for x]) + apply (frule range_cover.sz(1), fold word_bits_def) + apply (frule cte_wp_at_pspace_no_overlapI, + simp add: cte_wp_at_caps_of_state, simp split: if_split, + simp add: invoke_untyped_proofs.szw) + apply (simp add: field_simps conj_comms ex_in_conv + cte_wp_at_caps_of_state + in_get_cap_cte_wp_at + atLeastatMost_subset_iff[where b=x and d=x for x] + word_and_le2) + apply (intro conjI impI) + + (* offs *) + apply (drule(1) invoke_untyped_proofs.idx_le_new_offs) + apply simp + + (* usable untyped range *) + apply (simp add: shiftL_nat shiftl_t2n overlap_ranges) + + apply (rule order_trans, erule invoke_untyped_proofs.subset_stuff) + apply (simp add: blah word_and_le2) + + apply (drule invoke_untyped_proofs.usable_range_disjoint) + apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) + + apply ((rule validE_validE_R)?, rule hoare_post_impErr, + rule whenE_reset_resetUntypedCap_invs_etc[where ptr="ptr && ~~ mask sz" + and ptr'=ptr and sz=sz and idx=idx and ui=ui' and dev=dev]) + + prefer 2 + apply simp + apply clarsimp + apply (simp only: ui') + apply (frule(2) invokeUntyped_proofs.intro) + apply (clarsimp simp: cte_wp_at_ctes_of + invokeUntyped_proofs.caps_no_overlap' + invokeUntyped_proofs.ps_no_overlap' + invokeUntyped_proofs.descendants_range + if_split[where P="\v. v \ getFreeIndex x y" for x y] + empty_descendants_range_in' + invs_pspace_aligned' invs_pspace_distinct' + invs_ksCurDomain_maxDomain' + cong: if_cong) + apply (strengthen refl) + apply (frule invokeUntyped_proofs.idx_le_new_offs) + apply (frule invokeUntyped_proofs.szw) + apply (frule invokeUntyped_proofs.descendants_range(2), simp) + apply (clarsimp simp: getFreeIndex_def conj_comms shiftL_nat + is_aligned_weaken[OF range_cover.funky_aligned] + invs_valid_pspace' isCap_simps + arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps) + + apply (intro conjI) + (* pspace_no_overlap' *) + apply (cases reset, simp_all)[1] + apply (rule order_trans[rotated], + erule invokeUntyped_proofs.idx_compare') + apply (simp add: shiftl_t2n mult.commute) + apply (drule invokeUntyped_proofs.subset_stuff, simp, + erule order_trans, simp add: blah word_and_le2 add_mask_fold) + apply (auto simp: add_mask_fold split: if_split)[1] + apply (drule invokeUntyped_proofs.usableRange_disjoint, simp) + apply (clarsimp simp only: pred_conj_def invs ui) + apply (strengthen vui) + apply (cut_tac vui invs invs') + apply (clarsimp simp: cte_wp_at_caps_of_state valid_sched_etcbs) + apply (cut_tac vui' invs') + apply (clarsimp simp: ui cte_wp_at_ctes_of if_apply_def2 ui') + done +qed + +lemmas inv_untyped_corres = inv_untyped_corres' + +crunches insertNewCap, doMachineOp + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: crunch_wps) + +lemma sts_valid_untyped_inv': + "\valid_untyped_inv' ui\ setThreadState st t \\rv. valid_untyped_inv' ui\" + apply (cases ui, simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF setThreadState_ksInterruptState]) + apply (wp hoare_vcg_const_Ball_lift hoare_vcg_ex_lift | simp)+ + done + +crunch nosch[wp]: invokeUntyped "\s. P (ksSchedulerAction s)" + (simp: crunch_simps zipWithM_x_mapM + wp: crunch_wps unless_wp mapME_x_inv_wp preemptionPoint_inv) + +crunch no_0_obj'[wp]: insertNewCap no_0_obj' + (wp: crunch_wps) + +lemma insertNewCap_valid_pspace': + "\\s. valid_pspace' s \ s \' cap + \ slot \ parent \ caps_overlap_reserved' (untypedRange cap) s + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) parent s + \ \ isZombie cap \ descendants_range' cap parent (ctes_of s)\ + insertNewCap parent slot cap + \\rv. valid_pspace'\" + apply (simp add: valid_pspace'_def) + apply (wp insertNewCap_valid_mdb) + apply simp_all + done + +crunches insertNewCap + for tcb'[wp]: "tcb_at' t" + and inQ[wp]: "obj_at' (inQ d p) t" + and norqL1[wp]: "\s. P (ksReadyQueuesL1Bitmap s)" + and norqL2[wp]: "\s. P (ksReadyQueuesL2Bitmap s)" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: "valid_idle'" + and global_refs': "\s. P (global_refs' s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and irq_states' [wp]: valid_irq_states' + and vq'[wp]: valid_queues' + and irqs_masked' [wp]: irqs_masked' + and valid_machine_state'[wp]: valid_machine_state' + and pspace_domain_valid[wp]: pspace_domain_valid + and ct_not_inQ[wp]: "ct_not_inQ" + and tcbState_inv[wp]: "obj_at' (\tcb. P (tcbState tcb)) t" + and tcbDomain_inv[wp]: "obj_at' (\tcb. P (tcbDomain tcb)) t" + and tcbPriority_inv[wp]: "obj_at' (\tcb. P (tcbPriority tcb)) t" + (wp: crunch_wps) + +crunch if_unsafe_then_cap'[wp]: updateNewFreeIndex "if_unsafe_then_cap'" + +lemma insertNewCap_ifunsafe'[wp]: + "\if_unsafe_then_cap' and ex_cte_cap_to' slot\ + insertNewCap parent slot cap + \\rv s. if_unsafe_then_cap' s\" + apply (simp add: insertNewCap_def) + apply (rule hoare_pre) + apply (wp getCTE_wp' | clarsimp simp: ifunsafe'_def3)+ + apply (clarsimp simp: ex_cte_cap_to'_def cte_wp_at_ctes_of cteCaps_of_def) + apply (drule_tac x=cref in spec) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=crefa in exI, fastforce) + apply clarsimp + apply (rule_tac x=cref' in exI, fastforce) + done + +crunch if_live_then_nonz_cap'[wp]: updateNewFreeIndex "if_live_then_nonz_cap'" + +lemma insertNewCap_iflive'[wp]: + "\if_live_then_nonz_cap'\ insertNewCap parent slot cap \\rv. if_live_then_nonz_cap'\" + apply (simp add: insertNewCap_def) + apply (wp setCTE_iflive' getCTE_wp') + apply (clarsimp elim!: cte_wp_at_weakenE') + done + +lemma insertNewCap_cte_wp_at'': + "\cte_wp_at' (\cte. P (cteCap cte)) p and K (\ P NullCap)\ + insertNewCap parent slot cap + \\rv s. cte_wp_at' (P \ cteCap) p s\" + apply (simp add: insertNewCap_def tree_cte_cteCap_eq) + apply (wp getCTE_wp') + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def) + done + +lemmas insertNewCap_cte_wp_at' = insertNewCap_cte_wp_at''[unfolded o_def] + +lemma insertNewCap_cap_to'[wp]: + "\ex_cte_cap_to' p\ insertNewCap parent slot cap \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node'[OF insertNewCap_ksInterrupt]) + apply (wp hoare_vcg_ex_lift insertNewCap_cte_wp_at') + apply clarsimp + done + +lemma insertNewCap_nullcap: + "\P and cte_wp_at' (\cte. cteCap cte = NullCap) slot\ insertNewCap parent slot cap \Q\ + \ \P\ insertNewCap parent slot cap \Q\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "cte_wp_at' (\cte. cteCap cte = NullCap) slot s") + apply fastforce + apply (clarsimp simp: insertNewCap_def in_monad cte_wp_at_ctes_of liftM_def + dest!: use_valid [OF _ getCTE_sp[where P="(=) s" for s], OF _ refl]) + done + +lemma insertNewCap_valid_global_refs': + "\valid_global_refs' and + cte_wp_at' (\cte. capRange cap \ capRange (cteCap cte) + \ capBits cap \ capBits (cteCap cte)) parent\ + insertNewCap parent slot cap + \\rv. valid_global_refs'\" + apply (simp add: valid_global_refs'_def valid_refs'_cteCaps valid_cap_sizes_cteCaps) + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=global_refs', OF insertNewCap_global_refs']) + apply (rule hoare_use_eq [where f=gsMaxObjectSize]) + apply wp+ + apply (clarsimp simp: cte_wp_at_ctes_of cteCaps_of_def ball_ran_eq) + apply (frule power_increasing[where a=2], simp) + apply (blast intro: order_trans) + done + +lemma insertNewCap_valid_irq_handlers: + "\valid_irq_handlers' and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + insertNewCap parent slot cap + \\rv. valid_irq_handlers'\" + apply (simp add: insertNewCap_def valid_irq_handlers'_def irq_issued'_def) + apply (wp | wp (once) hoare_use_eq[where f=ksInterruptState, OF updateNewFreeIndex_ksInterrupt])+ + apply (simp add: cteCaps_of_def) + apply (wp | wp (once) hoare_use_eq[where f=ksInterruptState, OF setCTE_ksInterruptState] + getCTE_wp)+ + apply (clarsimp simp: cteCaps_of_def cte_wp_at_ctes_of ran_def) + apply auto + done + +lemma insertNewCap_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain' and ct_active'\ insertNewCap parent slot cap \\_. ct_idle_or_in_cur_domain'\" +apply (wp ct_idle_or_in_cur_domain'_lift_futz[where Q=\]) +apply (rule_tac Q="\_. obj_at' (\tcb. tcbState tcb \ Structures_H.thread_state.Inactive) t and obj_at' (\tcb. d = tcbDomain tcb) t" + in hoare_strengthen_post) +apply (wp | clarsimp elim: obj_at'_weakenE)+ +apply (auto simp: obj_at'_def) +done + +crunch ksDomScheduleIdx[wp]: insertNewCap "\s. P (ksDomScheduleIdx s)" + (wp: crunch_simps hoare_drop_imps) + +lemma capRange_subset_capBits: + "capAligned cap \ capAligned cap' + \ capRange cap \ capRange cap' + \ capRange cap \ {} + \ capBits cap \ capBits cap'" + supply + is_aligned_neg_mask_eq[simp del] + is_aligned_neg_mask_weaken[simp del] + apply (simp add: capRange_def capAligned_def is_aligned_no_overflow + split: if_split_asm del: atLeastatMost_subset_iff) + apply (frule_tac c="capUntypedPtr cap" in subsetD) + apply (simp only: mask_in_range[symmetric]) + apply (simp add: is_aligned_neg_mask_eq) + apply (drule_tac c="(capUntypedPtr cap && ~~ mask (capBits cap)) + || (~~ capUntypedPtr cap' && mask (capBits cap))" in subsetD) + apply (simp_all only: mask_in_range[symmetric]) + apply (simp add: word_ao_dist is_aligned_neg_mask_eq) + apply (simp add: word_ao_dist) + apply (cases "capBits cap = 0") + apply simp + apply (drule_tac f="\x. x !! (capBits cap - 1)" + and x="a || b" for a b in arg_cong) + apply (simp add: word_ops_nth_size word_bits_def word_size) + apply auto + done + +lemma insertNewCap_urz[wp]: + "\untyped_ranges_zero' and valid_objs' and valid_mdb'\ + insertNewCap parent slot cap \\rv. untyped_ranges_zero'\" + apply (simp add: insertNewCap_def updateNewFreeIndex_def) + apply (wp getCTE_cteCap_wp + | simp add: updateTrackedFreeIndex_def getSlotCap_def case_eq_if_isUntypedCap + split: option.split split del: if_split + | wps | wp (once) getCTE_wp')+ + apply (clarsimp simp: cte_wp_at_ctes_of fun_upd_def[symmetric]) + apply (strengthen untyped_ranges_zero_fun_upd[mk_strg I E]) + apply (intro conjI impI; clarsimp simp: isCap_simps) + apply (auto simp add: cteCaps_of_def untypedZeroRange_def isCap_simps) + done + +crunches insertNewCap + for valid_arch'[wp]: valid_arch_state' + (wp: crunch_wps) + +lemma insertNewCap_invs': + "\invs' and ct_active' + and valid_cap' cap + and cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + sameRegionAs (cteCap cte) cap) parent + and K (\ isZombie cap) and (\s. descendants_range' cap parent (ctes_of s)) + and caps_overlap_reserved' (untypedRange cap) + and ex_cte_cap_to' slot + and (\s. ksIdleThread s \ capRange cap) + and (\s. \irq. cap = IRQHandlerCap irq \ irq_issued' irq s)\ + insertNewCap parent slot cap + \\rv. invs'\" + apply (rule insertNewCap_nullcap) + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp insertNewCap_valid_pspace' sch_act_wf_lift + valid_queues_lift cur_tcb_lift tcb_in_cur_domain'_lift + insertNewCap_valid_global_refs' + valid_irq_node_lift insertNewCap_valid_irq_handlers) + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (frule ctes_of_valid[rotated, where p=parent, OF valid_pspace_valid_objs']) + apply (fastforce simp: cte_wp_at_ctes_of) + apply (auto simp: isCap_simps sameRegionAs_def3 + intro!: capRange_subset_capBits + elim: valid_capAligned) + done + +lemma insertNewCap_irq_issued'[wp]: + "\\s. P (irq_issued' irq s)\ insertNewCap parent slot cap \\rv s. P (irq_issued' irq s)\" + by (simp add: irq_issued'_def, wp) + +lemma insertNewCap_ct_in_state'[wp]: + "\ct_in_state' p\insertNewCap parent slot cap \\rv. ct_in_state' p\" + unfolding ct_in_state'_def + apply (rule hoare_pre) + apply wps + apply wp + apply simp + done + +lemma zipWithM_x_insertNewCap_invs'': + "\\s. invs' s \ ct_active' s \ (\tup \ set ls. s \' snd tup) + \ cte_wp_at' (\cte. isUntypedCap (cteCap cte) \ + (\tup \ set ls. sameRegionAs (cteCap cte) (snd tup))) parent s + \ (\tup \ set ls. \ isZombie (snd tup)) + \ (\tup \ set ls. ex_cte_cap_to' (fst tup) s) + \ (\tup \ set ls. descendants_range' (snd tup) parent (ctes_of s)) + \ (\tup \ set ls. ksIdleThread s \ capRange (snd tup)) + \ (\tup \ set ls. caps_overlap_reserved' (capRange (snd tup)) s) + \ distinct_sets (map capRange (map snd ls)) + \ (\irq. IRQHandlerCap irq \ set (map snd ls) \ irq_issued' irq s) + \ distinct (map fst ls)\ + mapM (\(x, y). insertNewCap parent x y) ls + \\rv. invs'\" + apply (induct ls) + apply (simp add: mapM_def sequence_def) + apply (wp, simp) + apply (simp add: mapM_Cons) + including no_pre apply wp + apply (thin_tac "valid P f Q" for P f Q) + apply clarsimp + apply (rule hoare_pre) + apply (wp insertNewCap_invs' + hoare_vcg_const_Ball_lift + insertNewCap_cte_wp_at' insertNewCap_ranges + hoare_vcg_all_lift insertNewCap_pred_tcb_at')+ + apply (clarsimp simp: cte_wp_at_ctes_of invs_mdb' invs_valid_objs' dest!:valid_capAligned) + apply (drule caps_overlap_reserved'_subseteq[OF _ untypedRange_in_capRange]) + apply (auto simp:comp_def) + done + +lemma createNewCaps_not_isZombie[wp]: + "\\\ createNewCaps ty ptr bits sz d \\rv s. (\cap \ set rv. \ isZombie cap)\" + apply (simp add: createNewCaps_def toAPIType_def + cong: option.case_cong if_cong apiobject_type.case_cong) + apply (wpsimp wp: undefined_valid simp: isCap_simps) + done + +lemma createNewCaps_cap_to': + "\\s. ex_cte_cap_to' p s \ 0 < n + \ range_cover ptr sz (APIType_capBits ty us) n + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s\ + createNewCaps ty ptr n us d + \\rv. ex_cte_cap_to' p\" + apply (simp add: ex_cte_cap_to'_def) + apply (wp hoare_vcg_ex_lift + hoare_use_eq_irq_node' [OF createNewCaps_ksInterrupt + createNewCaps_cte_wp_at']) + apply fastforce + done + +lemma createNewCaps_idlethread_ranges[wp]: + "\\s. 0 < n \ range_cover ptr sz (APIType_capBits tp us) n + \ ksIdleThread s \ {ptr .. (ptr && ~~ mask sz) + 2 ^ sz - 1}\ + createNewCaps tp ptr n us d + \\rv s. \cap\set rv. ksIdleThread s \ capRange cap\" + apply (rule hoare_as_subst [OF createNewCaps_it]) + apply (rule hoare_assume_pre) + apply (rule hoare_chain, rule createNewCaps_range_helper2) + apply fastforce + apply blast + done + +lemma createNewCaps_IRQHandler[wp]: + "\\\ + createNewCaps tp ptr sz us d + \\rv s. IRQHandlerCap irq \ set rv \ P rv s\" + apply (simp add: createNewCaps_def split del: if_split) + apply (rule hoare_pre) + apply (wp | wpc | simp add: image_def | rule hoare_pre_cont)+ + done + +lemma createNewCaps_ct_active': + "\ct_active' and pspace_aligned' and pspace_distinct' and pspace_no_overlap' ptr sz and K (range_cover ptr sz (APIType_capBits ty us) n \ 0 < n)\ + createNewCaps ty ptr n us d + \\_. ct_active'\" + apply (simp add: ct_in_state'_def) + apply (rule hoare_pre) + apply wps + apply (wp createNewCaps_pred_tcb_at'[where sz=sz]) + apply simp + done + +crunch gsMaxObjectSize[wp]: deleteObjects "\s. P (gsMaxObjectSize s)" + (simp: unless_def wp: crunch_wps) + +crunch gsMaxObjectSize[wp]: updateFreeIndex "\s. P (gsMaxObjectSize s)" + +crunch ksIdleThread[wp]: updateFreeIndex "\s. P (ksIdleThread s)" + +lemma invokeUntyped_invs'': + assumes insertNew_Q[wp]: "\p cref cap. + \Q\ insertNewCap p cref cap \\_. Q\" + assumes createNew_Q: "\tp ptr n us sz dev. \\s. Q s + \ range_cover ptr sz (APIType_capBits tp us) n + \ (tp = APIObjectType ArchTypes_H.apiobject_type.CapTableObject \ 0 < us) + \ 0 < n \ valid_pspace' s \ pspace_no_overlap' ptr sz s\ + createNewCaps tp ptr n us dev \\_. Q\" + assumes set_free_Q[wp]: "\slot idx. \invs' and Q\ updateFreeIndex slot idx \\_.Q\" + assumes reset_Q: "\Q'\ resetUntypedCap (case ui of Invocations_H.Retype src_slot _ _ _ _ _ _ _ \ src_slot) \\_. Q\" + shows "\invs' and valid_untyped_inv' ui + and (\s. (case ui of Invocations_H.Retype _ reset _ _ _ _ _ _ \ reset) \ Q' s) + and Q and ct_active'\ + invokeUntyped ui + \\rv. invs' and Q\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp only: pred_conj_def valid_untyped_inv_wcap') + proof - + fix s sz idx + assume vui1: "valid_untyped_inv_wcap' ui + (Some (case ui of + Invocations_H.untyped_invocation.Retype slot reset ptr_base ptr ty us slots d \ + capability.UntypedCap d (ptr && ~~ mask sz) sz idx)) s" + assume misc: "invs' s" "Q s" "ct_active' s" + "(case ui of + Invocations_H.untyped_invocation.Retype x reset _ _ _ _ _ _ \ reset) \ + Q' s" + + obtain cref reset ptr tp us slots dev + where pf: "invokeUntyped_proofs s cref reset (ptr && ~~ mask sz) ptr tp us slots sz idx dev" + and ui: "ui = Invocations_H.Retype cref reset (ptr && ~~ mask sz) ptr tp us slots dev" + using vui1 misc + apply (cases ui, simp only: Invocations_H.untyped_invocation.simps) + apply (frule(2) invokeUntyped_proofs.intro) + apply clarsimp + apply (unfold cte_wp_at_ctes_of) + apply (drule meta_mp; clarsimp) + done + + note vui = vui1[simplified ui Invocations_H.untyped_invocation.simps] + + have cover: "range_cover ptr sz (APIType_capBits tp us) (length slots)" + and slots: "cref \ set slots" "distinct slots" "slots \ []" + and tps: "tp = APIObjectType ArchTypes_H.apiobject_type.CapTableObject \ 0 < us" + "tp = APIObjectType ArchTypes_H.apiobject_type.Untyped \ minUntypedSizeBits \ us \ us \ maxUntypedSizeBits" + using vui + by (clarsimp simp: ui cte_wp_at_ctes_of)+ + + note not_0_ptr[simp] = invokeUntyped_proofs.not_0_ptr [OF pf] + note subset_stuff[simp] = invokeUntyped_proofs.subset_stuff[OF pf] + + have non_detype_idx_le[simp]: "~ reset \ idx < 2^sz" + using vui ui + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (erule le_less_trans) + apply (rule unat_less_helper) + apply simp + apply (rule le_less_trans) + apply (rule word_and_le1) + apply (simp add:mask_def) + apply (rule word_leq_le_minus_one) + apply simp + apply (clarsimp simp:range_cover_def) + done + + note blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost + atLeastatMost_empty_iff split_paired_Ex usableUntypedRange.simps + note descendants_range[simp] = invokeUntyped_proofs.descendants_range[OF pf] + note vc'[simp] = invokeUntyped_proofs.vc'[OF pf] + note ps_no_overlap'[simp] = invokeUntyped_proofs.ps_no_overlap'[OF pf] + note caps_no_overlap'[simp] = invokeUntyped_proofs.caps_no_overlap'[OF pf] + note ex_cte_no_overlap' = invokeUntyped_proofs.ex_cte_no_overlap'[OF pf] + note cref_inv = invokeUntyped_proofs.cref_inv[OF pf] + note slots_invD = invokeUntyped_proofs.slots_invD[OF pf] + note nidx[simp] = add_minus_neg_mask[where ptr = ptr] + note idx_compare' = invokeUntyped_proofs.idx_compare'[OF pf] + note sz_limit[simp] = invokeUntyped_proofs.sz_limit[OF pf] + + have valid_global_refs': "valid_global_refs' s" + using misc by auto + + have mapM_insertNewCap_Q: + "\caps. \Q\ mapM (\(x, y). insertNewCap cref x y) (zip slots caps) \\rv. Q\" + by (wp mapM_wp' | clarsimp)+ + + note reset_Q' = reset_Q[simplified ui, simplified] + + note neg_mask_add_mask = word_plus_and_or_coroll2[symmetric,where w = "mask sz" and t = ptr,symmetric] + note msimp[simp add] = misc neg_mask_add_mask + show "\(=) s\ invokeUntyped ui \\rv s. invs' s \ Q s\" + including no_pre + apply (clarsimp simp:invokeUntyped_def getSlotCap_def ui) + apply (rule validE_valid) + apply (rule hoare_pre) + apply (rule_tac B="\_ s. invs' s \ Q s \ ct_active' s + \ valid_untyped_inv_wcap' ui + (Some (UntypedCap dev (ptr && ~~ mask sz) sz (if reset then 0 else idx))) s + \ (reset \ pspace_no_overlap' (ptr && ~~ mask sz) sz s) + " in hoare_vcg_seqE[rotated]) + apply (simp only: whenE_def) + apply wp + apply (rule hoare_post_impErr, rule combine_validE, + rule resetUntypedCap_invs_etc, rule valid_validE, rule reset_Q') + apply (clarsimp simp only: if_True) + apply auto[1] + apply simp + apply wp[1] + prefer 2 + apply (cut_tac vui1 misc) + apply (clarsimp simp: ui cte_wp_at_ctes_of simp del: misc) + apply auto[1] + apply (rule hoare_pre) + apply (wp createNewObjects_wp_helper[where sz = sz]) + apply (simp add: slots)+ + apply (rule cover) + apply (simp add: slots)+ + apply (clarsimp simp:insertNewCaps_def) + apply (wp zipWithM_x_insertNewCap_invs'' + set_tuple_pick distinct_tuple_helper + hoare_vcg_const_Ball_lift + createNewCaps_invs'[where sz = sz] + createNewCaps_valid_cap[where sz = sz,OF cover] + createNewCaps_parent_helper[where sz = sz] + createNewCaps_cap_to'[where sz = sz] + createNewCaps_descendants_range_ret'[where sz = sz] + createNewCaps_caps_overlap_reserved_ret'[where sz = sz] + createNewCaps_ranges[where sz = sz] + createNewCaps_ranges'[where sz = sz] + createNewCaps_IRQHandler + createNewCaps_ct_active'[where sz=sz] + mapM_insertNewCap_Q + | simp add: zipWithM_x_mapM slots tps)+ + apply (wp hoare_vcg_all_lift) + apply (wp hoare_strengthen_post[OF createNewCaps_IRQHandler]) + apply (intro impI) + apply (erule impE) + apply (erule(1) snd_set_zip_in_set) + apply (simp add: conj_comms, wp createNew_Q[where sz=sz]) + apply (wp hoare_strengthen_post[OF createNewCaps_range_helper[where sz = sz]]) + apply (clarsimp simp: slots) + apply (clarsimp simp:conj_comms ball_conj_distrib pred_conj_def + simp del:capFreeIndex_update.simps) + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' + invs_valid_pspace' invs_arch_state' + imp_consequent[where Q = "(\x. x \ set slots)"] + | clarsimp simp: conj_comms simp del: capFreeIndex_update.simps)+ + apply (wp updateFreeIndex_forward_invs' updateFreeIndex_caps_overlap_reserved + updateFreeIndex_caps_no_overlap'' updateFreeIndex_pspace_no_overlap' + hoare_vcg_const_Ball_lift + updateFreeIndex_cte_wp_at + updateCap_cte_cap_wp_to') + apply (wp updateFreeIndex_caps_overlap_reserved + updateFreeIndex_descendants_range_in' getCTE_wp | simp)+ + apply (clarsimp simp only: ui) + apply (frule(2) invokeUntyped_proofs.intro) + apply (frule invokeUntyped_proofs.idx_le_new_offs) + apply (frule invokeUntyped_proofs.szw) + apply (frule invokeUntyped_proofs.descendants_range(2), simp) + apply (frule invokeUntyped_proofs.idx_compare') + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps getFreeIndex_def + shiftL_nat shiftl_t2n mult.commute + if_split[where P="\x. x \ unat v" for v] + invs_valid_pspace' invs_ksCurDomain_maxDomain' + invokeUntyped_proofs.caps_no_overlap' + invokeUntyped_proofs.usableRange_disjoint + split del: if_split) + apply (strengthen refl) + apply simp + apply (intro conjI; assumption?) + apply (erule is_aligned_weaken[OF range_cover.funky_aligned]) + apply (simp add: APIType_capBits_def objBits_simps' bit_simps untypedBits_defs + split: object_type.split apiobject_type.split)[1] + apply (cases reset) + apply (clarsimp simp: bit_simps) + apply (clarsimp simp: invokeUntyped_proofs.ps_no_overlap') + apply (drule invs_valid_global') + apply (clarsimp simp: valid_global_refs'_def cte_at_valid_cap_sizes_0) + apply (auto)[1] + apply (frule valid_global_refsD', clarsimp) + apply (clarsimp simp: Int_commute) + apply (erule disjoint_subset2[rotated]) + apply (simp add: blah word_and_le2) + apply (rule order_trans, erule invokeUntyped_proofs.subset_stuff) + apply (simp add: blah word_and_le2 add_mask_fold) + apply (frule valid_global_refsD2', clarsimp) + apply (clarsimp simp: global_refs'_def) + apply (erule notE, erule subsetD[rotated], simp add: blah word_and_le2) + done +qed + +lemma invokeUntyped_invs'[wp]: + "\invs' and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. invs'\" + apply (wp invokeUntyped_invs''[where Q=\, simplified hoare_post_taut, simplified]) + apply auto + done + +crunch pred_tcb_at'[wp]: updateFreeIndex "pred_tcb_at' pr P p" + +lemma resetUntypedCap_st_tcb_at': + "\invs' and st_tcb_at' (P and ((\) Inactive) and ((\) IdleThreadState)) t + and cte_wp_at' (\cp. isUntypedCap (cteCap cp)) slot + and ct_active' and sch_act_simple and (\s. descendants_of' slot (ctes_of s) = {})\ + resetUntypedCap slot + \\_. st_tcb_at' P t\" + apply (rule hoare_name_pre_state) + apply (clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (simp add: resetUntypedCap_def) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp preemptionPoint_inv + deleteObjects_st_tcb_at'[where p=slot] getSlotCap_wp + | simp add: unless_def + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: cte_wp_at_ctes_of) + apply (strengthen refl) + apply (rule exI, strengthen refl) + apply (frule cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD], clarsimp+) + apply (clarsimp simp: valid_cap_simps' capAligned_def empty_descendants_range_in' + descendants_range'_def2 + elim!: pred_tcb'_weakenE) + done + +lemma inv_untyp_st_tcb_at'[wp]: + "\invs' and st_tcb_at' (P and ((\) Inactive) and ((\) IdleThreadState)) tptr + and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. st_tcb_at' P tptr\" + apply (rule hoare_pre) + apply (rule hoare_strengthen_post) + apply (rule invokeUntyped_invs''[where Q="st_tcb_at' P tptr"]; + wp createNewCaps_pred_tcb_at') + apply (auto simp: valid_pspace'_def)[1] + apply (wp resetUntypedCap_st_tcb_at' | simp)+ + apply (cases ui, clarsimp simp: cte_wp_at_ctes_of isCap_simps) + apply (clarsimp elim!: pred_tcb'_weakenE) + done + +lemma inv_untyp_tcb'[wp]: + "\invs' and st_tcb_at' active' tptr + and valid_untyped_inv' ui and ct_active'\ + invokeUntyped ui + \\rv. tcb_at' tptr\" + apply (rule hoare_chain [OF inv_untyp_st_tcb_at'[where tptr=tptr and P="\"]]) + apply (clarsimp elim!: pred_tcb'_weakenE) + apply fastforce + apply (clarsimp simp: pred_tcb_at'_def) + done + +crunch ksInterruptState_eq[wp]: invokeUntyped "\s. P (ksInterruptState s)" + (wp: crunch_wps mapME_x_inv_wp preemptionPoint_inv + simp: crunch_simps unless_def) + +crunches deleteObjects, updateFreeIndex + for valid_irq_states'[wp]: "valid_irq_states'" + (wp: doMachineOp_irq_states' crunch_wps + simp: freeMemory_def no_irq_storeWord unless_def) + +lemma resetUntypedCap_IRQInactive: + "\valid_irq_states'\ + resetUntypedCap slot + \\_ _. True\, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + (is "\?P\ resetUntypedCap slot \?Q\,\?E\") + apply (simp add: resetUntypedCap_def) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp[where P=valid_irq_states' and E="?E", THEN hoare_post_impErr] + doMachineOp_irq_states' preemptionPoint_inv hoare_drop_imps + | simp add: no_irq_clearMemory if_apply_def2)+ + done + +lemma inv_untyped_IRQInactive: + "\valid_irq_states'\ + invokeUntyped ui + -, \\rv s. intStateIRQTable (ksInterruptState s) rv \ irqstate.IRQInactive\" + unfolding invokeUntyped_def + by (wpsimp wp: whenE_wp resetUntypedCap_IRQInactive) + +end +end diff --git a/proof/refine/AARCH64/VSpace_R.thy b/proof/refine/AARCH64/VSpace_R.thy new file mode 100644 index 0000000000..6dca8c3363 --- /dev/null +++ b/proof/refine/AARCH64/VSpace_R.thy @@ -0,0 +1,2970 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* + AARCH64 VSpace refinement +*) + +theory VSpace_R +imports TcbAcc_R +begin + +lemma cteCaps_of_ctes_of_lift: + "(\P. \\s. P (ctes_of s)\ f \\_ s. P (ctes_of s)\) \ \\s. P (cteCaps_of s) \ f \\_ s. P (cteCaps_of s)\" + unfolding cteCaps_of_def . + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + "vspace_at_asid' vs asid \ \s. \ap pool entry. + armKSASIDTable (ksArchState s) (ucast (asid_high_bits_of (ucast asid))) = Some ap \ + ko_at' (ASIDPool pool) ap s \ + pool (ucast (asid_low_bits_of (ucast asid))) = Some entry \ + apVSpace entry = vs \ + (\pt_t. page_table_at' pt_t vs s)" +(* FIXME AARCH64: checkPTAt currently used in both contexts (Normal and VSRoot) -- if we later + (in CRefine) need more precision here (= VSRootPT_T), we'll need to split + checkPTAt into separate functions *) + +lemma findVSpaceForASID_vs_at_wp: + "\\s. \pm. asid \ 0 \ asid_wf asid \ vspace_at_asid' pm asid s \ P pm s\ + findVSpaceForASID asid + \P\,-" + unfolding findVSpaceForASID_def + apply (wpsimp wp: getASID_wp simp: checkPTAt_def getASIDPoolEntry_def getPoolPtr_def) + apply (fastforce simp: asid_low_bits_of_def ucast_ucast_a is_down ucast_ucast_mask + asid_low_bits_def asidRange_def mask_2pm1[symmetric] + le_mask_asidBits_asid_wf vspace_at_asid'_def page_table_at'_def) + done + +crunches findVSpaceForASID, haskell_fail + for inv[wp]: "P" + (simp: const_def crunch_simps wp: loadObject_default_inv crunch_wps ignore_del: getObject) + +lemma asidBits_asid_bits[simp]: + "asidBits = asid_bits" + by (simp add: bit_simps' asid_bits_def asidBits_def) + +(* FIXME AARCH64: Added to crunch_param_rules in Crunch_Instances_NonDet as + trans[OF liftE_bindE return_bind]; move to monad equations instead and give it the name below *) +lemma liftE_return_bindE: + "liftE (return x) >>=E f = f x" + by (rule Crunch.crunch_param_rules(8)) + +crunches getIRQState + for inv[wp]: P + +lemma dmo_invs_no_cicd_lift': (* FIXME AARCH64: move up *) + assumes "\P. f \\s. P (irq_masks s)\" + assumes "\P p. f \\s. P (underlying_memory s p)\" + shows "doMachineOp f \all_invs_but_ct_idle_or_in_cur_domain'\" + apply (wp dmo_invs_no_cicd' assms) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" in use_valid, + rule assms, rule refl) + apply simp + done + +lemma dmo_invs_lift': (* FIXME AARCH64: move up *) + assumes "\P. f \\s. P (irq_masks s)\" + assumes "\P p. f \\s. P (underlying_memory s p)\" + shows "doMachineOp f \invs'\" + apply (wp dmo_invs' assms) + apply clarsimp + apply (drule_tac Q="\_ m'. underlying_memory m' p = underlying_memory m p" in use_valid, + rule assms, rule refl) + apply simp + done + +lemma dmos_invs_no_cicd'[wp]: + "doMachineOp isb \invs_no_cicd'\" + "doMachineOp dsb \invs_no_cicd'\" + "\w. doMachineOp (setSCTLR w) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_hcr w) \invs_no_cicd'\" + "\w x. doMachineOp (set_gic_vcpu_ctrl_lr w x) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_apr w) \invs_no_cicd'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_vmcr w) \invs_no_cicd'\" + "\w. doMachineOp (setHCR w) \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_hcr \invs_no_cicd'\" + "\w. doMachineOp (get_gic_vcpu_ctrl_lr w) \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_apr \invs_no_cicd'\" + "doMachineOp get_gic_vcpu_ctrl_vmcr \invs_no_cicd'\" + "doMachineOp enableFpuEL01 \invs_no_cicd'\" + "\r. doMachineOp (readVCPUHardwareReg r) \invs_no_cicd'\" + "\r v. doMachineOp (writeVCPUHardwareReg r v) \invs_no_cicd'\" + "doMachineOp check_export_arch_timer \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift')+ + +lemma dmos_invs'[wp]: + "doMachineOp isb \invs'\" + "doMachineOp dsb \invs'\" + "\w. doMachineOp (setSCTLR w) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_hcr w) \invs'\" + "\w x. doMachineOp (set_gic_vcpu_ctrl_lr w x) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_apr w) \invs'\" + "\w. doMachineOp (set_gic_vcpu_ctrl_vmcr w) \invs'\" + "\w. doMachineOp (setHCR w) \invs'\" + "doMachineOp get_gic_vcpu_ctrl_hcr \invs'\" + "\w. doMachineOp (get_gic_vcpu_ctrl_lr w) \invs'\" + "doMachineOp get_gic_vcpu_ctrl_apr \invs'\" + "doMachineOp get_gic_vcpu_ctrl_vmcr \invs'\" + "doMachineOp enableFpuEL01 \invs'\" + "\r. doMachineOp (readVCPUHardwareReg r) \invs'\" + "\r v. doMachineOp (writeVCPUHardwareReg r v) \invs'\" + "doMachineOp check_export_arch_timer \invs'\" + by (wp dmo_invs_lift')+ + +lemma valid_irq_node_lift_asm: + assumes x: "\P. \\s. P (irq_node' s)\ f \\rv s. P (irq_node' s)\" + assumes y: "\p. \real_cte_at' p and Q\ f \\rv. real_cte_at' p\" + shows "\\s. valid_irq_node' (irq_node' s) s \ Q s\ f \\rv s. valid_irq_node' (irq_node' s) s\" + apply (simp add: valid_irq_node'_def) + apply (rule hoare_pre) + apply (rule hoare_use_eq_irq_node' [OF x]) + apply (wp hoare_vcg_all_lift y) + apply simp + done + +lemma isIRQActive_corres: + "corres (=) \ \ (is_irq_active irqVTimerEvent) (isIRQActive irqVTimerEvent)" + apply (clarsimp simp: isIRQActive_def getIRQState_def is_irq_active_def get_irq_state_def) + apply (clarsimp simp: is_irq_active_def isIRQActive_def + get_irq_state_def irq_state_relation_def + getIRQState_def getInterruptState_def + state_relation_def interrupt_state_relation_def) + apply (fastforce split: irq_state.splits irqstate.splits) + done + +lemma getIRQState_wp: + "\\s. P (intStateIRQTable (ksInterruptState s) irq) s \ getIRQState irq \\rv s. P rv s\" + unfolding getIRQState_def getInterruptState_def + by (wpsimp simp: comp_def) + +lemma maskInterrupt_irq_states': + "\valid_irq_states' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. valid_irq_states'\" + by (wpsimp wp: dmo_maskInterrupt) + (auto simp add: valid_irq_states_def valid_irq_masks'_def) + +crunch ksIdleThread[wp]: storeWordUser "\s. P (ksIdleThread s)" +crunch ksIdleThread[wp]: asUser "\s. P (ksIdleThread s)" + (wp: crunch_wps simp: crunch_simps) +crunch ksQ[wp]: asUser "\s. P (ksReadyQueues s)" + (wp: crunch_wps simp: crunch_simps) + +lemma maskInterrupt_invs': + "\invs' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. invs'\" + by (wpsimp wp: maskInterrupt_irq_states' dmo_maskInterrupt simp: invs'_def valid_state'_def) + (auto simp: valid_irq_states_def valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + +lemma dmo_machine_op_lift_invs'[wp]: + "doMachineOp (machine_op_lift f) \invs'\" + by (wpsimp wp: dmo_invs' simp: machine_op_lift_def in_monad machine_rest_lift_def select_f_def) + +lemma dmo'_gets_wp: + "\\s. Q (f (ksMachineState s)) s\ doMachineOp (gets f) \Q\" + unfolding doMachineOp_def by (wpsimp simp: in_monad) + +lemma maskInterrupt_invs_no_cicd': + "\invs_no_cicd' + and (\s. \b \ intStateIRQTable (ksInterruptState s) irq \ irqstate.IRQInactive)\ + doMachineOp (maskInterrupt b irq) + \\rv. invs_no_cicd'\" + by (wpsimp wp: maskInterrupt_irq_states' dmo_maskInterrupt simp: invs_no_cicd'_def) + (auto simp: valid_irq_states_def valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def) + +(* FIXME AARCH64: this is a big block of VCPU-related lemmas in an attempt to consolidate them; + there may be an opportunity to factor most of these out into a separate theory *) +(* setObject for VCPU invariant preservation *) + +lemma setObject_vcpu_cur_domain[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksCurDomain s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ct[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksCurThread s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_it[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksIdleThread s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_sched[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksSchedulerAction s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_L1[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksReadyQueuesL1Bitmap s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_L2[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksReadyQueuesL2Bitmap s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksInt[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksInterruptState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksArch[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksArchState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_gs[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (gsMaxObjectSize s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_maschine_state[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksMachineState s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksDomSchedule[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksDomSchedule s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_ksDomScheduleIdx[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (ksDomScheduleIdx s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setObject_vcpu_gsUntypedZeroRanges[wp]: + "setObject ptr (vcpu::vcpu) \\s. P (gsUntypedZeroRanges s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + + +crunches vcpuEnable, vcpuSave, vcpuDisable, vcpuRestore + for pspace_aligned'[wp]: pspace_aligned' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_aligned'[wp]: "\pspace_aligned'\ vcpuSwitch param_a \\_. pspace_aligned'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +crunches vcpuEnable, vcpuSave, vcpuDisable, vcpuRestore + for pspace_distinct'[wp]: pspace_distinct' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_distinct'[wp]: "\pspace_distinct'\ vcpuSwitch param_a \\_. pspace_distinct'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma setObject_vcpu_ctes_of[wp]: + "\ \s. P (ctes_of s)\ setObject p (t :: vcpu) \\_ s. P (ctes_of s)\" + apply (rule ctes_of_from_cte_wp_at[where Q="\", simplified]) + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad) + apply simp + done + +lemma setObject_vcpu_untyped_ranges_zero'[wp]: + "setObject ptr (vcpu::vcpu) \untyped_ranges_zero'\" + by (rule hoare_lift_Pf[where f=cteCaps_of]; wp cteCaps_of_ctes_of_lift) + +lemma setVCPU_if_live[wp]: + "\\s. if_live_then_nonz_cap' s \ (live' (injectKO vcpu) \ ex_nonz_cap_to' v s)\ + setObject v (vcpu::vcpu) \\_. if_live_then_nonz_cap'\" + apply (wpsimp wp: setObject_iflive' [where P=\] + | simp add: objBits_simps vcpuBits_def pageBits_def)+ + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply simp + done + +lemma setVCPU_if_unsafe[wp]: + "setObject v (vcpu::vcpu) \if_unsafe_then_cap'\" + apply (wp setObject_ifunsafe') + apply (clarsimp simp: updateObject_default_def in_monad) + apply (clarsimp simp: updateObject_default_def in_monad bind_def) + apply wp + apply simp + done + +lemma projectKO_opt_no_vcpu[simp]: + "projectKO_opt (KOArch (KOVCPU v)) = (None::'a::no_vcpu option)" + by (rule ccontr) (simp add: project_koType not_vcpu[symmetric]) + +lemma setObject_vcpu_obj_at'_no_vcpu[wp]: + "setObject ptr (v::vcpu) \\s. P (obj_at' (P'::'a::no_vcpu \ bool) t s)\" + apply (wp setObject_ko_wp_at[where + P'="\ko. \obj. projectKO_opt ko = Some obj \ P' (obj::'a::no_vcpu)" for P', + folded obj_at'_real_def]) + apply (clarsimp simp: updateObject_default_def in_monad not_vcpu[symmetric]) + apply (simp add: objBits_simps) + apply (simp add: vcpuBits_def pageBits_def) + apply (clarsimp split del: if_split) + apply (erule rsubst[where P=P]) + apply normalise_obj_at' + apply (clarsimp simp: obj_at'_real_def ko_wp_at'_def) + done + +lemmas setVCPU_pred_tcb'[wp] = + setObject_vcpu_obj_at'_no_vcpu + [where P'="\ko. P (proj (tcb_to_itcb' ko))" for P proj, folded pred_tcb_at'_def] + +lemma setVCPU_valid_idle'[wp]: + "setObject v (vcpu::vcpu) \valid_idle'\" + unfolding valid_idle'_def by (rule hoare_lift_Pf[where f=ksIdleThread]; wp) + +lemma setVCPU_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ setObject p (v::vcpu) \\rv s. P (ksReadyQueues s)\" + by (wp setObject_qs updateObject_default_inv | simp)+ + +lemma setVCPU_valid_queues'[wp]: + "setObject v (vcpu::vcpu) \valid_queues'\" + unfolding valid_queues'_def + by (rule hoare_lift_Pf[where f=ksReadyQueues]; wp hoare_vcg_all_lift updateObject_default_inv) + +lemma setVCPU_ct_not_inQ[wp]: + "setObject v (vcpu::vcpu) \ct_not_inQ\" + apply (wp ct_not_inQ_lift) + apply (rule hoare_lift_Pf[where f=ksCurThread]; wp) + apply assumption + done + +(* TODO: move *) +lemma getObject_ko_at_vcpu [wp]: + "\\\ getObject p \\rv::vcpu. ko_at' rv p\" + by (rule getObject_ko_at | simp add: objBits_simps vcpuBits_def pageBits_def)+ + +lemma corres_gets_gicvcpu_numlistregs: + "corres (=) \ \ (gets (arm_gicvcpu_numlistregs \ arch_state)) + (gets (armKSGICVCPUNumListRegs \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma corres_gets_current_vcpu[corres]: + "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma setObject_VCPU_corres: + "vcpu_relation vcpuObj vcpuObj' + \ corres dc (vcpu_at vcpu) + (vcpu_at' vcpu) + (set_vcpu vcpu vcpuObj) + (setObject vcpu vcpuObj')" + apply (simp add: set_vcpu_def) + apply (rule corres_guard_imp) + apply (rule setObject_other_corres [where P="\ko::vcpu. True"], simp) + apply (clarsimp simp: obj_at'_def) + apply (erule map_to_ctes_upd_other, simp, simp) + apply (simp add: a_type_def is_other_obj_relation_type_def) + apply (simp add: objBits_simps) + apply simp + apply (simp add: objBits_simps vcpuBits_def pageBits_def) + apply (simp add: other_obj_relation_def asid_pool_relation_def) + apply (clarsimp simp: typ_at_to_obj_at'[symmetric] obj_at_def exs_valid_def + assert_def a_type_def return_def fail_def) + apply (fastforce split: Structures_A.kernel_object.split_asm if_split_asm) + apply (simp add: typ_at_to_obj_at_arches) + done + +lemma setObject_vcpu_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ + setObject ptr (vcpu::vcpu) + \\rv s. P (cte_wp_at' P' p s)\" + apply (wp setObject_cte_wp_at2'[where Q="\"]) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply (rule equals0I) + apply (clarsimp simp: updateObject_default_def in_monad projectKO_opts_defs) + apply simp + done + +crunches vcpuSave, vcpuRestore, vcpuDisable, vcpuEnable + for ctes[wp]: "\s. P (ctes_of s)" + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_ctes[wp]: "\\s. P (ctes_of s)\ vcpuSwitch vcpu \\_ s. P (ctes_of s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +crunches + vgicUpdate, vgicUpdateLR, vcpuWriteReg, vcpuReadReg, vcpuRestoreRegRange, vcpuSaveRegRange, + vcpuSave + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and no_0_obj'[wp]: no_0_obj' + (wp: crunch_wps ignore_del: setObject) + +crunches vcpuSave, vcpuRestore, vcpuDisable, vcpuEnable + for cte_wp_at'[wp]: "\s. P (cte_wp_at' P' p s)" + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +crunches vcpuDisable, vcpuEnable, vcpuSave, vcpuRestore + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getObject_inv_vcpu loadObject_default_inv) + +lemma vcpuSwitch_no_0_obj'[wp]: "\no_0_obj'\ vcpuSwitch v \\_. no_0_obj'\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma vcpuSwitch_cte_wp_at'[wp]: + "\\s. P (cte_wp_at' P' p s)\ vcpuSwitch param_a \\_ s. P (cte_wp_at' P' p s)\ " + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma vcpuUpdate_corres[corres]: + "\v1 v2. vcpu_relation v1 v2 \ vcpu_relation (f v1) (f' v2) \ + corres dc (vcpu_at v) (vcpu_at' v) + (vcpu_update v f) (vcpuUpdate v f')" + by (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres + simp: vcpu_update_def vcpuUpdate_def vcpu_relation_def) + +lemma vgicUpdate_corres[corres]: + "\vgic vgic'. vgic_map vgic = vgic' \ vgic_map (f vgic) = (f' vgic') + \ corres dc (vcpu_at v) (vcpu_at' v) (vgic_update v f) (vgicUpdate v f')" + by (corresKsimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) + +lemma vgicUpdateLR_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v) + (vgic_update_lr v idx val) (vgicUpdateLR v idx val)" + by (corresKsimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) + +lemma vcpuReadReg_corres[corres]: + "corres (=) (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_read_reg v r) (vcpuReadReg v r)" + apply (simp add: vcpu_read_reg_def vcpuReadReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_underlying_split[OF getObject_vcpu_corres]) + apply (wpsimp simp: vcpu_relation_def)+ + done + +lemma vcpuWriteReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_write_reg v r val) (vcpuWriteReg v r val)" + apply (simp add: vcpu_write_reg_def vcpuWriteReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule vcpuUpdate_corres) + apply (fastforce simp: vcpu_relation_def)+ + done + +lemma vcpuSaveReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_save_reg v r) (vcpuSaveReg v r)" + apply (clarsimp simp: vcpu_save_reg_def vcpuSaveReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_split[OF corres_machine_op[where r="(=)"]]) + apply (rule corres_Id; simp) + apply (rule vcpuUpdate_corres, fastforce simp: vcpu_relation_def vgic_map_def) + apply wpsimp+ + done + +lemma vcpuSaveRegRange_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_save_reg_range v rf rt) (vcpuSaveRegRange v rf rt)" + apply (clarsimp simp: vcpu_save_reg_range_def vcpuSaveRegRange_def) + apply (rule corres_mapM_x[OF _ _ _ _ subset_refl]) + apply (wpsimp wp: vcpuSaveReg_corres)+ + done + +lemma vcpuRestoreReg_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_restore_reg v r) (vcpuRestoreReg v r)" + apply (clarsimp simp: vcpu_restore_reg_def vcpuRestoreReg_def) + apply (rule corres_guard_imp) + apply (rule corres_assert_gen_asm2) + apply (rule corres_split[OF getObject_vcpu_corres]) + apply (rule corres_machine_op) + apply (rule corres_Id) + apply (fastforce simp: vcpu_relation_def) + apply (wpsimp wp: corres_Id simp: vcpu_relation_def vgic_map_def)+ + done + +lemma vcpuRestoreRegRange_corres[corres]: + "corres dc (vcpu_at v) (vcpu_at' v and no_0_obj') + (vcpu_restore_reg_range v rf rt) (vcpuRestoreRegRange v rf rt)" + apply (clarsimp simp: vcpu_restore_reg_range_def vcpuRestoreRegRange_def) + apply (rule corres_mapM_x[OF _ _ _ _ subset_refl]) + apply (wpsimp wp: vcpuRestoreReg_corres)+ + done + +lemma saveVirtTimer_corres[corres]: + "corres dc (vcpu_at vcpu_ptr) (vcpu_at' vcpu_ptr and no_0_obj') + (save_virt_timer vcpu_ptr) (saveVirtTimer vcpu_ptr)" + unfolding save_virt_timer_def saveVirtTimer_def + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuSaveReg_corres], simp) + apply (rule corres_split_dc[OF corres_machine_op], (rule corres_Id; simp)) + apply (rule corres_split_dc[OF vcpuSaveReg_corres], simp)+ + apply (rule corres_split_eqr[OF corres_machine_op], (rule corres_Id; simp))+ + apply (fold dc_def) + apply (rule vcpuUpdate_corres) + apply (simp add: vcpu_relation_def) + apply wpsimp+ + done + +lemma restoreVirtTimer_corres[corres]: + "corres dc (vcpu_at vcpu_ptr) (vcpu_at' vcpu_ptr and no_0_obj') + (restore_virt_timer vcpu_ptr) (restoreVirtTimer vcpu_ptr)" + unfolding restore_virt_timer_def restoreVirtTimer_def IRQ_def + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuRestoreReg_corres], simp)+ + apply (rule corres_split_eqr[OF corres_machine_op], (rule corres_Id; simp))+ + apply (rule corres_split[OF getObject_vcpu_corres]) + apply (rule corres_split_eqr[OF vcpuReadReg_corres]) + apply (clarsimp simp: vcpu_relation_def) + apply (rule corres_split_dc[OF vcpuWriteReg_corres])+ + apply (rule corres_split_dc[OF vcpuRestoreReg_corres], simp)+ + apply (rule corres_split_eqr[OF isIRQActive_corres]) + apply (rule corres_split_dc[OF corres_when], simp) + apply (simp add: irq_vppi_event_index_def irqVPPIEventIndex_def IRQ_def) + apply (rule corres_machine_op, simp) + apply (rule corres_Id; wpsimp) + apply (fold dc_def) + apply (rule vcpuRestoreReg_corres) + apply (wpsimp simp: if_apply_def2 isIRQActive_def)+ + done + +lemma vcpuSave_corres: + "corres dc (vcpu_at (fst cvcpu)) (vcpu_at' (fst cvcpu) and no_0_obj') + (vcpu_save (Some cvcpu)) (vcpuSave (Some cvcpu))" + apply (clarsimp simp add: vcpu_save_def vcpuSave_def armvVCPUSave_def) + apply (cases cvcpu, clarsimp, rename_tac v active) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[where r'=dc]) + apply (rule corres_when, simp) + apply (rule corres_split[OF vcpuSaveReg_corres]) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule saveVirtTimer_corres) + apply wpsimp+ + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_eqr) + apply (rule corres_trivial) + apply (fastforce simp add: state_relation_def arch_state_relation_def) + apply (simp add: mapM_discarded) + apply (rule corres_split[OF corres_mapM_x[OF _ _ _ _ subset_refl]]) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (clarsimp, fold dc_def) + apply (rule vgicUpdateLR_corres) + apply wpsimp+ + apply (rule corres_split[OF vcpuSaveRegRange_corres]) + apply (rule corres_machine_op) + apply (rule corres_Id; wpsimp) + apply (wpsimp wp: mapM_x_wp_inv hoare_vcg_imp_lift' + simp: if_apply_def2)+ + done + +lemma vcpuDisable_corres: + "corres dc (\s. (\v. vcpuopt = Some v) \ vcpu_at (the vcpuopt) s) + (\s. ((\v. vcpuopt = Some v) \ vcpu_at' (the vcpuopt) s) \ no_0_obj' s) + (vcpu_disable vcpuopt) + (vcpuDisable vcpuopt)" + apply (cases vcpuopt; clarsimp simp: vcpu_disable_def vcpuDisable_def) + (* no current VCPU *) + subgoal + apply (clarsimp simp: doMachineOp_bind do_machine_op_bind empty_fail_cond) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op] + | rule corres_machine_op corres_Id + | wpsimp)+ + done + (* have current VCPU *) + apply (rename_tac vcpu) + apply (clarsimp simp: doMachineOp_bind do_machine_op_bind bind_assoc IRQ_def) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_eqr[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF vgicUpdate_corres]) + apply (clarsimp simp: vgic_map_def) + apply (rule corres_split_dc[OF vcpuSaveReg_corres]) + apply (rule corres_split_dc[OF vcpuSaveReg_corres]) + apply (rule corres_split_dc[OF corres_machine_op] + corres_split_dc[OF saveVirtTimer_corres] + | rule corres_machine_op corres_Id + | wpsimp)+ + done + +lemma vcpuEnable_corres: + "corres dc (vcpu_at vcpu) (vcpu_at' vcpu and no_0_obj') + (vcpu_enable vcpu) (vcpuEnable vcpu)" + apply (simp add: vcpu_enable_def vcpuEnable_def doMachineOp_bind do_machine_op_bind bind_assoc) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF vcpuRestoreReg_corres])+ + apply (rule corres_split[OF getObject_vcpu_corres], rename_tac vcpu') + apply (case_tac vcpu') + apply (rule corres_split_dc[OF corres_machine_op] + | rule corres_split_dc[OF vcpuRestoreReg_corres] + | rule corres_machine_op corres_Id restoreVirtTimer_corres + | wpsimp simp: vcpu_relation_def vgic_map_def)+ + done + +lemma vcpuRestore_corres: + "corres dc (vcpu_at vcpu) + (vcpu_at' vcpu and no_0_obj') + (vcpu_restore vcpu) + (vcpuRestore vcpu)" + apply (simp add: vcpu_restore_def vcpuRestore_def gicVCPUMaxNumLR_def) + apply (rule corres_guard_imp) + apply (rule corres_split_dc[OF corres_machine_op] + | (rule corres_machine_op corres_Id; wpsimp))+ + apply (rule corres_split[OF getObject_vcpu_corres], rename_tac vcpu') + apply (rule corres_split[OF corres_gets_gicvcpu_numlistregs]) + apply (case_tac vcpu' + , clarsimp simp: comp_def vcpu_relation_def vgic_map_def mapM_x_mapM + uncurry_def split_def mapM_map_simp) + apply (simp add: doMachineOp_bind do_machine_op_bind bind_assoc empty_fail_cond) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split_dc[OF corres_machine_op]) + apply (rule corres_Id; wpsimp) + apply (rule corres_split) + apply (rule corres_machine_op, rule corres_Id; wpsimp wp: no_fail_mapM) + apply (rule corres_split_dc[OF vcpuRestoreRegRange_corres]) + apply (rule vcpuEnable_corres) + apply wpsimp+ + done + +lemma vcpuSwitch_corres: + assumes "vcpu' = vcpu" + shows + "corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ + ((arm_current_vcpu \ arch_state) s \ None + \ vcpu_at ((fst \ the \ arm_current_vcpu \ arch_state) s) s)) + (\s. (vcpu' \ None \ vcpu_at' (the vcpu') s) \ + ((armHSCurVCPU \ ksArchState) s \ None + \ vcpu_at' ((fst \ the \ armHSCurVCPU \ ksArchState) s) s) \ + no_0_obj' s) + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + proof - + have modify_current_vcpu: + "\a b. corres dc \ \ (modify (\s. s\arch_state := arch_state s\arm_current_vcpu := Some (a, b)\\)) + (modifyArchState (armHSCurVCPU_update (\_. Some (a, b))))" + by (clarsimp simp add: modifyArchState_def state_relation_def arch_state_relation_def + intro!: corres_modify) + have get_current_vcpu: "corres (=) \ \ (gets (arm_current_vcpu \ arch_state)) + (gets (armHSCurVCPU \ ksArchState))" + apply clarsimp + apply (rule_tac P = "(arm_current_vcpu (arch_state s)) = (armHSCurVCPU (ksArchState s'))" + in TrueE; + simp add: state_relation_def arch_state_relation_def) + done + show ?thesis + apply (simp add: vcpu_switch_def vcpuSwitch_def assms) + apply (cases vcpu) + apply (all \simp, rule corres_underlying_split[OF _ _ gets_sp gets_sp], + rule corres_guard_imp[OF get_current_vcpu TrueI TrueI], + rename_tac rv rv', case_tac rv ; + clarsimp simp add: when_def\) + apply (rule corres_machine_op[OF corres_underlying_trivial[OF no_fail_isb]] TrueI TrueI + vcpuDisable_corres modify_current_vcpu + vcpuEnable_corres + vcpuRestore_corres + vcpuSave_corres + hoare_post_taut conjI + corres_underlying_split corres_guard_imp + | clarsimp simp add: when_def | wpsimp | assumption)+ + done + qed + +lemma aligned_distinct_relation_vcpu_atI'[elim]: + "\ vcpu_at p s; pspace_relation (kheap s) (ksPSpace s'); + pspace_aligned' s'; pspace_distinct' s' \ + \ vcpu_at' p s'" + apply (clarsimp simp add: obj_at_def a_type_def) + apply (simp split: Structures_A.kernel_object.split_asm + if_split_asm arch_kernel_obj.split_asm) + apply (drule(1) pspace_relation_absD) + apply (clarsimp simp: other_obj_relation_def) + apply (case_tac z ; simp) + apply (rename_tac vcpu) + apply (case_tac vcpu; simp) + apply (clarsimp simp: vcpu_relation_def obj_at'_def typ_at'_def ko_wp_at'_def) + apply (fastforce simp add: pspace_aligned'_def pspace_distinct'_def dom_def) + done + +lemma vcpuSwitch_corres': + assumes "vcpu' = vcpu" + shows + "corres dc (\s. (vcpu \ None \ vcpu_at (the vcpu) s) \ + ((arm_current_vcpu \ arch_state) s \ None + \ vcpu_at ((fst \ the \ arm_current_vcpu \ arch_state) s) s)) + (pspace_aligned' and pspace_distinct' and no_0_obj') + (vcpu_switch vcpu) + (vcpuSwitch vcpu')" + apply (rule stronger_corres_guard_imp, + rule vcpuSwitch_corres[OF assms]) + apply simp + apply (simp add: assms) + apply (rule conjI) + apply clarsimp + apply (rule aligned_distinct_relation_vcpu_atI' ; clarsimp simp add: state_relation_def, assumption?) + apply (clarsimp simp add: state_relation_def arch_state_relation_def) + apply (rule aligned_distinct_relation_vcpu_atI'; assumption) + done + +crunches + vgicUpdateLR, vcpuWriteReg, vcpuReadReg, vcpuRestoreRegRange, vcpuSaveRegRange, vcpuSave, + vcpuSwitch + for nosch[wp]: "\s. P (ksSchedulerAction s)" + and it'[wp]: "\s. P (ksIdleThread s)" + (ignore: doMachineOp wp: crunch_wps) + +lemma modifyArchState_hyp[wp]: + "modifyArchState x \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: modifyArchState_def wp: | subst doMachineOp_bind)+ + +abbreviation + "live_vcpu_at_tcb p s \ \x. ko_at' x p s \ + (case atcbVCPUPtr (tcbArch x) of None \ \_. True + | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x) s" + +lemma valid_case_option_post_wp': + "(\x. \P x\ f \\rv. Q x\) \ + \case ep of Some x \ P x | _ \ \_. True\ + f \\rv. case ep of Some x \ Q x | _ \ \_. True\" + by (cases ep, simp_all add: hoare_vcg_prop) + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vgicUpdateLR, vcpuWriteReg, vcpuReadReg, + vcpuRestoreRegRange, vcpuSaveRegRange + for ksQ[wp]: "\s. P (ksReadyQueues s)" + (wp: crunch_wps) + +lemma vcpuSave_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ vcpuSave param_a \\_ s. P (ksReadyQueues s)\" + supply option.case_cong_weak[cong] + apply (wpsimp simp: vcpuSave_def modifyArchState_def armvVCPUSave_def | simp)+ + apply (rule_tac S="set gicIndices" in mapM_x_wp) + apply wpsimp+ + done + +lemma vcpuSwitch_ksQ[wp]: + "\\s. P (ksReadyQueues s)\ vcpuSwitch param_a \\_ s. P (ksReadyQueues s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | simp)+ + +lemma hyp_live'_vcpu_regs[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuRegs_update f vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_vgic[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVGIC_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_VPPIMasked[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVPPIMasked_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma hyp_live'_vcpu_VTimer[simp]: + "hyp_live' (KOArch (KOVCPU (vcpuVTimer_update f' vcpu))) = hyp_live' (KOArch (KOVCPU vcpu))" + by (simp add: hyp_live'_def arch_live'_def) + +lemma live'_vcpu_regs[simp]: + "live' (KOArch (KOVCPU (vcpuRegs_update f vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_vgic[simp]: + "live' (KOArch (KOVCPU (vcpuVGIC_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_VPPIMasked[simp]: + "live' (KOArch (KOVCPU (vcpuVPPIMasked_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma live'_vcpu_VTimer[simp]: + "live' (KOArch (KOVCPU (vcpuVTimer_update f' vcpu))) = live' (KOArch (KOVCPU vcpu))" + by (simp add: live'_def) + +lemma setVCPU_regs_vcpu_live: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuRegs_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_vgic_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_VPPIMasked_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVPPIMasked_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma setVCPU_VTimer_vcpu_live[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuVTimer_update f vcpu) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +lemma vgicUpdate_vcpu_live[wp]: + "vgicUpdate v f \ ko_wp_at' (is_vcpu' and hyp_live') p \" + by (wpsimp simp: vgicUpdate_def vcpuUpdate_def wp: setVCPU_vgic_vcpu_live) + +lemma setVCPU_regs_vgic_vcpu_live: + "\ko_wp_at' (is_vcpu' and hyp_live') p and ko_at' vcpu v\ + setObject v (vcpuRegs_update f (vcpuVGIC_update f' vcpu)) \\_. ko_wp_at' (is_vcpu' and hyp_live') p\" + apply (wp setObject_ko_wp_at, simp) + apply (simp add: objBits_simps) + apply (clarsimp simp: vcpuBits_def pageBits_def) + apply (clarsimp simp: pred_conj_def is_vcpu'_def ko_wp_at'_def obj_at'_real_def) + done + +(* FIXME: move *) +lemma setVCPU_regs_vgic_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuRegs_update f (vcpuVGIC_update f' vcpu)) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_regs_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState])+ + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_regs_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuRegs_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_regs_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_vgic_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVGIC_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_VPPIMasked_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVPPIMasked_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma setVCPU_VTimer_valid_arch': + "\valid_arch_state' and ko_at' vcpu v\ setObject v (vcpuVTimer_update f vcpu) \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv) + apply (wp hoare_vcg_imp_lift hoare_vcg_all_lift setVCPU_vgic_vcpu_live + | rule hoare_lift_Pf[where f=ksArchState]) + apply (clarsimp simp: pred_conj_def o_def) + done + +lemma state_refs_of'_vcpu_empty: + "ko_at' (vcpu::vcpu) v s \ (state_refs_of' s)(v := {}) = state_refs_of' s" + by (rule ext) (clarsimp simp: state_refs_of'_def obj_at'_def) + +lemma state_hyp_refs_of'_vcpu_absorb: + "ko_at' vcpu v s \ + (state_hyp_refs_of' s)(v := vcpu_tcb_refs' (vcpuTCBPtr vcpu)) = state_hyp_refs_of' s" + by (rule ext) (clarsimp simp: state_hyp_refs_of'_def obj_at'_def) + +lemma setObject_vcpu_valid_objs': + "\valid_objs'\ setObject v (vcpu::vcpu) \\_. valid_objs'\" + apply (wp setObject_valid_objs') + apply (clarsimp simp: in_monad updateObject_default_def valid_obj'_def) + apply simp + done + +lemma setVCPU_valid_arch': + "\valid_arch_state' and (\s. \a. armHSCurVCPU (ksArchState s) = Some (v,a) \ hyp_live' (KOArch (KOVCPU vcpu))) \ + setObject v (vcpu::vcpu) + \\_. valid_arch_state'\" + apply (simp add: valid_arch_state'_def valid_asid_table'_def option_case_all_conv pred_conj_def) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_imp_lift' setObject_ko_wp_at + | simp add: objBits_simps vcpuBits_def pageBits_def)+ + apply (clarsimp simp: is_vcpu'_def ko_wp_at'_def) + done + +lemma setVCPU_valid_queues [wp]: + "\valid_queues\ setObject p (v::vcpu) \\_. valid_queues\" + by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vcpuUpdate, vcpuSaveRegRange, vgicUpdateLR + for valid_queues[wp]: valid_queues + (ignore: doMachineOp wp: mapM_x_wp) + +lemma vcpuSave_valid_queues[wp]: + "\Invariants_H.valid_queues\ vcpuSave param_a \\_. Invariants_H.valid_queues\" + by (wpsimp simp: vcpuSave_def armvVCPUSave_def wp: mapM_x_wp cong: option.case_cong_weak | simp)+ + +lemma vcpuSwitch_valid_queues[wp]: + "\Invariants_H.valid_queues\ vcpuSwitch param_a \\_. Invariants_H.valid_queues\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | simp)+ + +lemma setObject_vcpu_no_tcb_update: + "\ vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \ \ valid_objs' and ko_at' (vcpu :: vcpu) p\ setObject p (f vcpu) \ \_. valid_objs' \" + apply (rule_tac Q="valid_objs' and (ko_at' vcpu p and valid_obj' (KOArch (KOVCPU vcpu)))" in hoare_pre_imp) + apply (clarsimp) + apply (simp add: valid_obj'_def) + apply (rule setObject_valid_objs') + apply (clarsimp simp add: obj_at'_def) + apply (frule updateObject_default_result) + apply (clarsimp simp add: valid_obj'_def) + done + +lemma vcpuUpdate_valid_objs'[wp]: + "\vcpu. vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \valid_objs'\ vcpuUpdate vr f \\_. valid_objs'\" + apply (wpsimp simp: vcpuUpdate_def) + apply (rule_tac vcpu=vcpu in setObject_vcpu_no_tcb_update) + apply wpsimp+ + done + +crunches + vgicUpdate, vcpuSaveReg, vgicUpdateLR, vcpuSaveRegRange, vcpuSave, + vcpuDisable, vcpuEnable, vcpuRestore, vcpuSwitch + for valid_objs'[wp]: valid_objs' + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + (wp: mapM_wp_inv simp: mapM_x_mapM) + +lemma setVCPU_regs_r_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuRegs_update (\_. (vcpuRegs vcpu)(r:=rval)) vcpu) \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuRegs_update (\_. (vcpuRegs vcpu)(r:=rval)) vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_regs_valid_arch' setVCPU_regs_vcpu_live + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_vgic_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVGIC_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_vgic_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VPPIMasked_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVPPIMasked_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVPPIMasked_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_VPPIMasked_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VTimer_invs_cicd': + "\invs_no_cicd' and ko_at' vcpu v\ + setObject v (vcpuVTimer_update f vcpu) + \\_. invs_no_cicd'\" + unfolding valid_state'_def valid_pspace'_def valid_mdb'_def invs_no_cicd'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. (vcpuVTimer_update f vcpu)"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_VTimer_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma vgicUpdate_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vgicUpdate f v \\_. invs_no_cicd'\" + by (wpsimp simp: vgicUpdate_def vcpuUpdate_def wp: setVCPU_vgic_invs_cicd') + +lemma vcpuRestoreReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuRestoreReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuRestoreReg_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuReadReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuReadReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuReadReg_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuSaveReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuSaveReg v r \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_r_invs_cicd' + | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuWriteReg_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuWriteReg vcpu_ptr r v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_r_invs_cicd' + | subst doMachineOp_bind | rule empty_fail_bind)+ + +crunches vcpuRestoreRegRange, vcpuSaveRegRange, vgicUpdateLR + for invs_no_cicd'[wp]: invs_no_cicd' + (wp: mapM_x_wp ignore: loadObject) + +lemma saveVirtTimer_invs_no_cicd'[wp]: + "\invs_no_cicd'\ saveVirtTimer vcpu_ptr \\_. invs_no_cicd'\" + by (wpsimp simp: saveVirtTimer_def vcpuUpdate_def read_cntpct_def + wp: setVCPU_VTimer_invs_cicd' dmo'_gets_wp) + +lemma restoreVirtTimer_invs_no_cicd'[wp]: + "\invs_no_cicd'\ restoreVirtTimer vcpu_ptr \\_. invs_no_cicd'\" + by (wpsimp simp: restoreVirtTimer_def vcpuUpdate_def read_cntpct_def if_apply_def2 + isIRQActive_def + wp: setVCPU_VTimer_invs_cicd' maskInterrupt_invs_no_cicd' getIRQState_wp dmo'_gets_wp) + +lemma vcpuEnable_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuEnable v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuEnable_def | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma dmo_maskInterrupt_True_invs_no_cicd'[wp]: + "doMachineOp (maskInterrupt True irq) \invs_no_cicd'\" + apply (wp dmo_maskInterrupt) + apply (clarsimp simp: invs_no_cicd'_def valid_state'_def) + apply (simp add: valid_irq_masks'_def valid_machine_state'_def + ct_not_inQ_def ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) + done + +lemma vcpuDisable_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuDisable v \\_. invs_no_cicd'\" + unfolding vcpuDisable_def + by (wpsimp wp: doMachineOp_typ_ats + simp: vcpuDisable_def doMachineOp_typ_at' split: option.splits + | subst doMachineOp_bind | rule empty_fail_bind conjI)+ + +lemma vcpuRestore_invs_no_cicd'[wp]: + "\invs_no_cicd'\ vcpuRestore v \\_. invs_no_cicd'\" + including no_pre + apply (wpsimp simp: vcpuRestore_def uncurry_def split_def doMachineOp_mapM_x gets_wp + | subst doMachineOp_bind | rule empty_fail_bind)+ + apply (rule_tac S="(\i. (of_nat i, vgicLR (vcpuVGIC vcpu) i)) ` {0..invs_no_cicd'\ vcpuSave v \\_. invs_no_cicd'\" + by (wpsimp simp: vcpuSave_def armvVCPUSave_def wp: mapM_x_wp cong: option.case_cong_weak + | assumption)+ +lemma valid_arch_state'_armHSCurVCPU_update[simp]: + "ko_wp_at' (is_vcpu' and hyp_live') v s \ + valid_arch_state' s \ valid_arch_state' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + by (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def + cur_tcb'_def) + +lemma dmo_vcpu_hyp: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ doMachineOp f \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: doMachineOp_def) + +lemma vcpuSaveReg_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v \ vcpuSaveReg v' r \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +lemma vcpuWriteReg_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v \ vcpuWriteReg v' r val \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +crunches + vcpuRestoreRegRange, vcpuSaveRegRange, vgicUpdateLR, vcpuReadReg + for hyp[wp]: "ko_wp_at' (is_vcpu' and hyp_live') v" + (wp: crunch_wps setVCPU_regs_vcpu_live dmo_vcpu_hyp) + +lemma saveVirtTimer_hyp[wp]: + "saveVirtTimer vcpu_ptr \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: saveVirtTimer_def vcpuUpdate_def wp: dmo_vcpu_hyp vgicUpdate_vcpu_live) + +lemma restoreVirtTimer_hyp[wp]: + "restoreVirtTimer vcpu_ptr \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: restoreVirtTimer_def vcpuUpdate_def isIRQActive_def + wp: dmo_vcpu_hyp vgicUpdate_vcpu_live) + +lemma vcpuDisable_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuDisable (Some x) \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuDisable_def wp: dmo_vcpu_hyp vgicUpdate_vcpu_live | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuEnable_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuEnable x \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuEnable_def wp: dmo_vcpu_hyp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuRestore_hyp[wp]: + "\ko_wp_at' (is_vcpu' and hyp_live') v\ vcpuRestore x \\_. ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuRestore_def wp: dmo_vcpu_hyp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma armvVCPUSave_hyp[wp]: + "armvVCPUSave x y \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: armvVCPUSave_def wp: dmo_vcpu_hyp) + +lemma vcpuSave_hyp[wp]: + "vcpuSave x \ko_wp_at' (is_vcpu' and hyp_live') v\" + apply (wpsimp simp: vcpuSave_def wp: dmo_vcpu_hyp mapM_x_wp' + | subst doMachineOp_bind + | rule empty_fail_bind)+ + apply (simp add: pred_conj_def) + done + +lemma vcpuSwitch_hyp[wp]: + "vcpuSwitch x \ko_wp_at' (is_vcpu' and hyp_live') v\" + by (wpsimp simp: vcpuSwitch_def wp: dmo_vcpu_hyp) + +lemma getObject_vcpu_ko_at': + "(vcpu::vcpu, s') \ fst (getObject p s) \ s' = s \ ko_at' vcpu p s" + apply (rule context_conjI) + apply (drule use_valid, rule getObject_inv[where P="(=) s"]; simp add: loadObject_default_inv) + apply (drule use_valid, rule getObject_ko_at; clarsimp simp: obj_at_simps vcpuBits_def) + done + +lemma vcpuUpdate_valid_arch_state'[wp]: + "\vcpu. vcpuTCBPtr (f vcpu) = vcpuTCBPtr vcpu \ + \valid_arch_state'\ vcpuUpdate vr f \\_. valid_arch_state'\" + including no_pre + apply (wpsimp simp: vcpuUpdate_def + wp: setVCPU_valid_arch') + by (clarsimp simp: valid_def in_monad hyp_live'_def arch_live'_def valid_arch_state'_def + obj_at'_real_def ko_wp_at'_def is_vcpu'_def + dest!: getObject_vcpu_ko_at')+ + +crunches vcpuRestoreReg + for valid_arch_state'[wp]: valid_arch_state' + +crunches vgicUpdateLR, vcpuSave, vcpuDisable, vcpuEnable, vcpuRestore + for valid_arch_state'[wp]: valid_arch_state' + (wp: crunch_wps ignore: doMachineOp) + +lemma vcpuSwitch_valid_arch_state'[wp]: + "\valid_arch_state' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. valid_arch_state'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + dmo_vcpu_hyp vcpuSave_valid_arch_state' + | strengthen valid_arch_state'_armHSCurVCPU_update | simp)+ + apply (auto simp: valid_arch_state'_def pred_conj_def) + done + +lemma invs_no_cicd'_armHSCurVCPU_update[simp]: + "ko_wp_at' (is_vcpu' and hyp_live') v s \ invs_no_cicd' s \ + invs_no_cicd' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + by (clarsimp simp: invs_no_cicd'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def + cur_tcb'_def) + +lemma invs'_armHSCurVCPU_update[simp]: + "ko_wp_at' (is_vcpu' and hyp_live') v s \ + invs' s \ invs' (s\ksArchState := armHSCurVCPU_update (\_. Some (v, b)) (ksArchState s)\)" + apply (clarsimp simp: invs'_def valid_state'_def valid_queues_def valid_queues_no_bitmap_def + bitmapQ_defs valid_global_refs'_def valid_arch_state'_def global_refs'_def + valid_queues'_def valid_irq_node'_def valid_irq_handlers'_def + irq_issued'_def irqs_masked'_def valid_machine_state'_def + cur_tcb'_def) + done + +lemma armHSCurVCPU_None_invs'[wp]: + "modifyArchState (armHSCurVCPU_update Map.empty) \invs'\" + apply (wpsimp simp: modifyArchState_def) + by (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_arch_state'_def valid_global_refs'_def global_refs'_def) + +lemma setVCPU_vgic_invs': + "\invs' and ko_at' vcpu v\ + setObject v (vcpuVGIC_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVGIC_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_vgic_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_regs_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuRegs_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuRegs_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_regs_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VPPIMasked_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuVPPIMasked_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVPPIMasked_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_VPPIMasked_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma setVCPU_VTimer_invs': + "\invs' and ko_at' vcpu v\ setObject v (vcpuVTimer_update f vcpu) \\_. invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def valid_mdb'_def + valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def + supply fun_upd_apply[simp del] + apply (wpsimp wp: setObject_vcpu_no_tcb_update + [where f="\vcpu. vcpuVTimer_update f vcpu"] + sch_act_wf_lift tcb_in_cur_domain'_lift valid_queues_lift + setObject_state_refs_of' setObject_state_hyp_refs_of' valid_global_refs_lift' + valid_irq_node_lift_asm [where Q=\] valid_irq_handlers_lift' + cteCaps_of_ctes_of_lift irqs_masked_lift ct_idle_or_in_cur_domain'_lift + valid_irq_states_lift' hoare_vcg_all_lift hoare_vcg_disj_lift + setObject_typ_at' cur_tcb_lift + setVCPU_VTimer_valid_arch' + simp: objBits_simps archObjSize_def vcpuBits_def pageBits_def + state_refs_of'_vcpu_empty state_hyp_refs_of'_vcpu_absorb) + apply (clarsimp simp: if_live_then_nonz_cap'_def obj_at'_real_def) + apply (fastforce simp: ko_wp_at'_def) + done + +lemma vcpuWriteReg_invs'[wp]: + "vcpuWriteReg vcpu_ptr r v \invs'\" + by (wpsimp simp: vcpuWriteReg_def vcpuUpdate_def wp: setVCPU_regs_invs') + +lemma vcpuSaveReg_invs'[wp]: + "vcpuSaveReg v r \invs'\" + by (wpsimp simp: vcpuSaveReg_def vcpuUpdate_def wp: setVCPU_regs_invs') + +lemma saveVirtTimer_invs'[wp]: + "saveVirtTimer vcpu_ptr \invs'\" + unfolding saveVirtTimer_def + by (wpsimp wp: dmo'_gets_wp setVCPU_vgic_invs' setVCPU_regs_invs' dmo_maskInterrupt_True + setVCPU_VTimer_invs' + simp: doMachineOp_bind vcpuUpdate_def read_cntpct_def check_export_arch_timer_def) + +lemma vcpuDisable_invs'[wp]: + "vcpuDisable v \invs'\" + unfolding vcpuDisable_def isb_def setHCR_def setSCTLR_def set_gic_vcpu_ctrl_hcr_def + getSCTLR_def get_gic_vcpu_ctrl_hcr_def dsb_def vgicUpdate_def vcpuUpdate_def + vcpuSaveReg_def enableFpuEL01_def + by (wpsimp wp: dmo'_gets_wp setVCPU_vgic_invs' setVCPU_regs_invs' dmo_maskInterrupt_True + simp: doMachineOp_bind empty_fail_cond) + +lemma vcpuInvalidateActive_invs'[wp]: + "vcpuInvalidateActive \invs'\" + unfolding vcpuInvalidateActive_def by wpsimp + +crunches + vcpuRestoreReg, vcpuRestoreRegRange, vcpuSaveReg, vcpuSaveRegRange, vgicUpdateLR, vcpuReadReg + for invs'[wp]: invs' + (wp: crunch_wps setVCPU_regs_invs' setVCPU_vgic_invs' simp: vcpuUpdate_def + ignore: doMachineOp vcpuUpdate) + +lemma restoreVirtTimer_invs'[wp]: + "restoreVirtTimer vcpu_ptr \ invs'\" + unfolding restoreVirtTimer_def + by (wpsimp wp: maskInterrupt_invs' getIRQState_wp dmo'_gets_wp dmo_machine_op_lift_invs' + simp: IRQ_def if_apply_def2 read_cntpct_def isIRQActive_def) + +lemma vcpuEnable_invs'[wp]: + "vcpuEnable v \ invs'\" + unfolding vcpuEnable_def + by (wpsimp | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuRestore_invs'[wp]: + "\invs'\ vcpuRestore v \\_. invs'\" + unfolding vcpuRestore_def + by (wpsimp simp: vcpuRestore_def uncurry_def split_def doMachineOp_mapM_x + wp: mapM_x_wp[OF _ subset_refl] + | subst doMachineOp_bind | rule empty_fail_bind)+ + +lemma vcpuSave_invs': + "\invs'\ vcpuSave v \\_. invs'\" + by (wpsimp simp: vcpuSave_def doMachineOp_mapM armvVCPUSave_def + get_gic_vcpu_ctrl_apr_def get_gic_vcpu_ctrl_vmcr_def + get_gic_vcpu_ctrl_hcr_def getSCTLR_def + wp: dmo'_gets_wp vgicUpdate_invs' mapM_x_wp[OF _ subset_refl]) + +lemma vcpuSwitch_invs'[wp]: + "\invs' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. invs'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + dmo_vcpu_hyp vcpuSave_invs' + | strengthen invs'_armHSCurVCPU_update | simp)+ + apply (auto simp: invs'_def valid_state'_def valid_arch_state'_def pred_conj_def) + done + +lemma vcpuSwitch_invs_no_cicd'[wp]: + "\invs_no_cicd' and (case v of None \ \ | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x)\ + vcpuSwitch v \\_. invs_no_cicd'\" + apply (wpsimp simp: vcpuSwitch_def modifyArchState_def + wp: vcpuDisable_hyp[simplified pred_conj_def] vcpuSave_hyp[unfolded pred_conj_def] + gets_wp vcpuSave_invs_no_cicd' dmo_vcpu_hyp + | strengthen invs_no_cicd'_armHSCurVCPU_update | simp)+ + apply (auto simp: invs_no_cicd'_def valid_state'_def valid_arch_state'_def pred_conj_def) + done + +crunches loadVMID + for inv: P + +lemma updateASIDPoolEntry_valid_arch_state'[wp]: + "updateASIDPoolEntry f asid \valid_arch_state'\" + unfolding updateASIDPoolEntry_def + by (wpsimp wp: getObject_inv simp: loadObject_default_def) + +crunches armContextSwitch, setGlobalUserVSpace + for valid_arch_state'[wp]: valid_arch_state' + +(* FIXME AARCH64 consolidated VCPU block ends here *) + +lemma setVMRoot_valid_arch_state'[wp]: + "\valid_arch_state' and live_vcpu_at_tcb p\ + setVMRoot p + \\rv. valid_arch_state'\" + apply (simp add: setVMRoot_def getThreadVSpaceRoot_def) + apply ((wpsimp wp: hoare_vcg_ex_lift hoare_drop_imps + getObject_tcb_wp valid_case_option_post_wp' + simp: if_apply_def2 + | wp hoare_vcg_all_lift)+) + done + +crunches setVMRoot + for ksQ[wp]: "\s. P (ksReadyQueues s)" + (simp: updateObject_default_def o_def loadObject_default_def if_apply_def2 + wp: crunch_wps getObject_inv) + +lemma handleVMFault_corres: + "corres (fr \ dc) (tcb_at thread and pspace_aligned and pspace_distinct) \ + (handle_vm_fault thread fault) (handleVMFault thread fault)" + supply if_split[split del] + apply (rule corres_cross_over_guard[where Q="tcb_at' thread"]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp add: AARCH64_H.handleVMFault_def handle_vm_fault_def) + apply (cases fault) + (* ARMDataAbort *) + apply (simp add: curVCPUActive_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + (* only do S1 translation if current VCPU active *) + apply (simp add: bind_liftE_distrib bindE_assoc) + apply (rule corres_splitEE[OF corres_liftE_lift[OF corres_gets_current_vcpu]]) + apply (clarsimp simp: liftE_return_bindE bindE_assoc) + apply (rule corres_split_eqrE[OF corres_if]) + apply fastforce + apply (rule corres_split_eqrE, simp) + apply (rule corres_returnOkTT, simp) + apply simp + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + apply (rule corres_returnOkTT, simp) + apply wpsimp+ + apply (rule corres_returnOkTT, simp) + apply (rule corres_trivial) + apply simp + apply (wpsimp simp: if_apply_def2)+ + (* ARMPrefetchAbort *) + apply (simp add: curVCPUActive_def) + apply (rule corres_guard_imp) + apply (rule corres_splitEE,simp) + apply (rule asUser_corres') + apply (rule corres_no_failI [where R="(=)"]) + apply (rule no_fail_getRestartPC) + apply fastforce + apply (rule corres_splitEE,simp, + rule corres_machine_op [where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + (* only do S1 translation if current VCPU active *) + apply (simp add: bind_liftE_distrib bindE_assoc) + apply (rule corres_splitEE[OF corres_liftE_lift[OF corres_gets_current_vcpu]]) + apply (clarsimp simp: liftE_return_bindE bindE_assoc) + apply (rule corres_split_eqrE[OF corres_if]) + apply fastforce + apply (rule corres_split_eqrE, simp) + apply (rule corres_returnOkTT, simp) + apply simp + apply (rule corres_splitEE, simp, + rule corres_machine_op[where r="(=)"], + rule corres_Id refl, rule refl, simp, simp)+ + apply (rule corres_returnOkTT, simp) + apply wpsimp+ + apply (rule corres_returnOkTT, simp) + apply (rule corres_trivial, simp) + apply (wpsimp simp: if_apply_def2)+ + done + +crunches findFreeVMID, loadVMID + for no_0_obj'[wp]: no_0_obj' + (wp: crunch_wps getObject_inv simp: o_def loadObject_default_def) + +lemma mask_is_asid_low_bits_of[simp]: + "(ucast asid :: machine_word) && mask asid_low_bits = ucast (asid_low_bits_of asid)" + by (word_eqI_solve simp: asid_low_bits_of_def asid_low_bits_def) + +declare corres_gets_asid[corres] (* FIXME AARCH64: declare at origin *) +declare getPoolPtr_corres[corres] (* FIXME AARCH64: declare at origin *) +declare getObject_ASIDPool_corres[corres] (* FIXME AARCH64: declare at origin *) + +lemma getASIDPoolEntry_corres'[corres]: + "asid' = ucast asid \ + corres (\r r'. r = map_option abs_asid_entry r') + (\s. pspace_aligned s \ pspace_distinct s \ + (\p. pool_for_asid asid s = Some p \ asid_pool_at p s) \ 0 < asid) + \ + (gets (entry_for_asid asid)) + (getASIDPoolEntry asid')" + unfolding entry_for_asid_def getASIDPoolEntry_def + apply (clarsimp simp: gets_obind_bind_eq entry_for_pool_def obind_comp_dist + cong: option.case_cong) + apply (corres corres: getPoolPtr_corres | corres_cases_both)+ + apply (rule monadic_rewrite_corres_l) + apply (monadic_rewrite_l gets_oapply_liftM_rewrite) + apply (rule monadic_rewrite_refl) + apply (corres simp: liftM_def asid_pool_relation_def asid_pools_at_eq corres: corres_returnTT + | corres_cases)+ + done + +lemma getASIDPoolEntry_get_the_corres[corres]: + "asid' = ucast asid \ + corres (\r r'. map_option abs_asid_entry r' = Some r) + (\s. pspace_aligned s \ pspace_distinct s \ entry_for_asid asid s \ None \ 0 < asid) + \ + (gets_the (entry_for_asid asid)) + (getASIDPoolEntry asid')" + apply (simp add: gets_the_def cong: corres_weak_cong) + apply (rule corres_bind_return2) + apply (corres simp: entry_for_asid_def entry_for_pool_def in_omonad obj_at_def) + done + +lemma loadVMID_corres[corres]: + "asid' = ucast asid \ + corres (=) + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (load_vmid asid) (loadVMID asid')" + unfolding load_vmid_def loadVMID_def + apply corres + apply (corres_cases, rule corres_inst[where P=\ and P'=\], clarsimp) + apply (corres_cases, rule corres_returnTT, clarsimp simp: abs_asid_entry_def) + apply wpsimp+ + apply (clarsimp simp: vspace_for_asid_def) + apply clarsimp + done + +lemma updateASIDPoolEntry_corres[corres]: + assumes eq: "asid' = ucast asid" + assumes abs: "\e. map_option abs_asid_entry (f' e) = f (abs_asid_entry e)" + shows "corres dc + ((\s. entry_for_asid asid s \ None \ 0 < asid) + and pspace_aligned and pspace_distinct) + \ + (update_asid_pool_entry f asid) + (updateASIDPoolEntry f' asid')" + unfolding update_asid_pool_entry_def updateASIDPoolEntry_def + apply (simp add: gets_the_def bind_assoc eq) + apply (corres simp: liftM_def + term_simp: asid_pool_relation_def asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast ucast_ucast_mask2 + is_down ucast_and_mask) + apply (rule ext) + apply (clarsimp simp: asid_pool_relation_def asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast ucast_ucast_mask2 + is_down ucast_and_mask abs) + apply (erule notE) + apply word_eqI_solve + apply wpsimp+ + apply (clarsimp simp: entry_for_asid_def entry_for_pool_def asid_pools_at_eq) + apply simp + done + +lemma gets_armKSVMIDTable_corres[corres]: + "corres (\t t'. t' = map_option UCAST(16 \ 64) \ t) + \ \ + (gets (arm_vmid_table \ arch_state)) (gets (armKSVMIDTable \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma storeVMID_corres[corres]: + "\ asid' = ucast asid; vmid' = vmid \ \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (store_vmid asid vmid) (storeVMID asid' vmid')" + unfolding store_vmid_def storeVMID_def + apply (corres simp: abs_asid_entry_def corres: corres_modify_tivial) + apply (fastforce simp: state_relation_def arch_state_relation_def) + apply wpsimp+ + apply (clarsimp simp: vspace_for_asid_def) + apply simp + done + +lemma invalidateASID_corres[corres]: + "asid' = ucast asid \ + corres dc + ((\s. entry_for_asid asid s \ None \ 0 < asid) and pspace_aligned and pspace_distinct) + \ + (invalidate_asid asid) (invalidateASID asid')" + unfolding invalidate_asid_def invalidateASID_def + by (corres simp: abs_asid_entry_def entry_for_asid_def) + +lemma gets_armKSNextVMID_corres[corres]: + "corres (=) \ \ + (gets (arm_next_vmid \ arch_state)) (gets (armKSNextVMID \ ksArchState))" + by (simp add: state_relation_def arch_state_relation_def) + +lemma take_vmid_minBound_maxBound: + "take (length [minBound .e. maxBound :: vmid]) + ([next_vmid .e. maxBound] @ [minBound .e. next_vmid]) + = [next_vmid .e. maxBound] @ init [minBound .e. next_vmid]" + for next_vmid :: vmid + using leq_maxBound[where x=next_vmid] + by (simp add: word_le_nat_alt init_def upto_enum_word minBound_word) + +(* FIXME AARCH64: move to SubMonad *) +lemmas corres_machine_op_Id = corres_machine_op[OF corres_Id] +lemmas corres_machine_op_Id_eq[corres_term] = corres_machine_op_Id[where r="(=)"] +lemmas corres_machine_op_Id_dc[corres_term] = corres_machine_op_Id[where r="dc::unit \ unit \ bool"] + +lemma invalidateVMIDEntry_corres[corres]: + "vmid' = vmid \ + corres dc \ \ (invalidate_vmid_entry vmid) (invalidateVMIDEntry vmid')" + unfolding invalidate_vmid_entry_def invalidateVMIDEntry_def + by (corres' \fastforce simp: state_relation_def arch_state_relation_def\ + corres: corres_modify_tivial) + +lemma valid_vmid_tableD: + "\ valid_vmid_table s; vmid_table s vmid = Some asid \ \ 0 < asid" + apply (subgoal_tac "asid \ 0") + apply (simp add: word_neq_0_conv) + apply (fastforce simp: valid_vmid_table_def) + done + +lemma findFreeVMID_corres[corres]: + "corres (=) + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct) + \ + find_free_vmid findFreeVMID" + unfolding find_free_vmid_def findFreeVMID_def + apply (simp only: take_vmid_minBound_maxBound) + apply corres + apply corres_cases_both (* case find .. of *) + (* Only None case left over *) + apply corres + apply (clarsimp dest!: findNoneD) + apply (drule bspec, rule UnI1, simp, rule order_refl) + apply clarsimp + apply (corres corres: corres_modify_tivial (* FIXME AARCH64: fix typo *) + simp: state_relation_def arch_state_relation_def maxBound_word minBound_word) + apply wpsimp+ + apply (clarsimp dest!: findNoneD) + apply (drule bspec, rule UnI1, simp, rule order_refl) + apply (clarsimp simp: vmid_inv_def) + apply (frule (1) valid_vmid_tableD) + apply (drule (1) is_inv_SomeD) + apply (clarsimp simp: entry_for_asid_def) + apply (clarsimp simp: vmid_for_asid_2_def in_omonad entry_for_pool_def pool_for_asid_def + if_option_eq) + apply simp + done + +lemma getVMID_corres[corres]: + "asid' = ucast asid \ + corres (=) + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct + and (\s. vspace_for_asid asid s \ None)) + \ + (get_vmid asid) (getVMID asid')" + unfolding get_vmid_def getVMID_def + by (corres wp: hoare_drop_imps simp: vspace_for_asid_def entry_for_asid_def | corres_cases_both)+ + +lemma armContextSwitch_corres[corres]: + "asid' = ucast asid \ + corres dc + (vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct + and (\s. vspace_for_asid asid s \ None)) + \ + (arm_context_switch pt asid) (armContextSwitch pt asid')" + unfolding arm_context_switch_def armContextSwitch_def + by corres + +lemma setVMRoot_corres [corres]: + assumes "t' = t" + shows "corres dc (tcb_at t and valid_vspace_objs and valid_asid_table and + vmid_inv and valid_vmid_table and pspace_aligned and pspace_distinct and + valid_objs and valid_global_arch_objs) + (no_0_obj') + (set_vm_root t) (setVMRoot t')" +proof - + have global: + "(\s. P s \ valid_global_arch_objs s) \ + corres dc P Q set_global_user_vspace setGlobalUserVSpace" for P Q + unfolding set_global_user_vspace_def setGlobalUserVSpace_def o_def[where g=arch_state] + by (corresKsimp corres: corres_gets_global_pt corres_machine_op) + + show ?thesis + unfolding set_vm_root_def setVMRoot_def catchFailure_def withoutFailure_def throw_def + apply (rule corres_cross_over_guard[where Q="no_0_obj' and pspace_distinct' and pspace_aligned'"]) + apply (clarsimp simp add: pspace_distinct_cross pspace_aligned_cross state_relation_def) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=) \ cte_map" and P=\ and P'=\]) + apply (simp add: getThreadVSpaceRoot_def locateSlotTCB_def locateSlotBasic_def + tcbVTableSlot_def cte_map_def objBits_def cte_level_bits_def + objBitsKO_def tcb_cnode_index_def to_bl_1 assms cteSizeBits_def) + apply (rule_tac R="\thread_root. valid_vspace_objs and valid_asid_table and vmid_inv and + valid_vmid_table and pspace_aligned and pspace_distinct and + valid_objs and valid_global_arch_objs and + cte_wp_at ((=) thread_root) thread_root_slot and + tcb_at (fst thread_root_slot) and + K (snd thread_root_slot = tcb_cnode_index 1)" + and R'="\thread_root. no_0_obj'" + in corres_split[OF getSlotCap_corres]) + apply simp + apply simp + apply (rename_tac cap cap') + apply (rule_tac Q="no_0_obj' and (\_. isValidVTableRoot cap' \ cap' = NullCap)" + in corres_cross_over_guard) + apply clarsimp + apply (drule (1) tcb_cap_wp_at[where ref="tcb_cnode_index 1" and + Q="\cap. is_valid_vtable_root cap \ cap=Structures_A.NullCap"]) + apply (simp add: tcb_cap_cases_def) + apply clarsimp + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (erule disjE; simp?) + apply (clarsimp simp: is_valid_vtable_root_def + split: cap.splits arch_cap.splits option.splits pt_type.splits) + apply (simp add: isValidVTableRoot_def isVTableRoot_def) + apply (rule corres_guard_imp) + apply (rule_tac P="valid_vspace_objs and valid_asid_table and pspace_aligned and + valid_vmid_table and vmid_inv and pspace_distinct and valid_objs and + valid_global_arch_objs and cte_wp_at ((=) cap) thread_root_slot" + in corres_assert_gen_asm2) + prefer 3 + apply assumption + apply (case_tac cap; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rename_tac acap acap') + apply (case_tac acap; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rename_tac pt_t m) + apply (case_tac pt_t; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (case_tac m; clarsimp simp: isCap_simps catch_throwError intro!: global) + apply (rule corres_guard_imp) + apply (rule corres_split_catch [where f=lfr and E'="\_. \"]) + apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) + apply (rule whenE_throwError_corres; simp add: lookup_failure_map_def) + apply (rule armContextSwitch_corres) + apply (wpsimp wp: find_vspace_for_asid_wp findVSpaceForASID_inv hoare_drop_imps)+ + apply (rule global, assumption) + apply wpsimp+ + apply (frule (1) cte_wp_at_valid_objs_valid_cap) + apply (clarsimp simp: valid_cap_def mask_def wellformed_mapdata_def) + apply (wpsimp wp: get_cap_wp simp: getThreadVSpaceRoot_def)+ + apply (auto dest!: tcb_at_cte_at_1) + done +qed + +lemma dMo_no_0_obj'[wp]: + "doMachineOp f \no_0_obj'\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (simp add: no_0_obj'_def) + +lemma dMo_riscvKSASIDTable_inv[wp]: + "doMachineOp f \\s. P (armKSASIDTable (ksArchState s))\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (clarsimp) + +lemma dMo_valid_arch_state'[wp]: + "\\s. P (valid_arch_state' s)\ doMachineOp f \\_ s. P (valid_arch_state' s)\" + apply (simp add: doMachineOp_def split_def) + apply wp + by (clarsimp) + +crunches vcpuDisable, vcpuEnable, vcpuSave, vcpuRestore, deleteASID + for no_0_obj'[wp]: no_0_obj' + (simp: crunch_simps wp: crunch_wps getObject_inv getObject_inv_vcpu loadObject_default_inv) + +lemma asid_high_bits_of_ucast_ucast[simp]: + "asid_high_bits_of (ucast (ucast asid :: machine_word)) = asid_high_bits_of asid" + by (simp add: ucast_down_ucast_id is_down) + +lemma invalidateTLBByASID_corres[corres]: + "asid' = ucast asid \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_tlb_by_asid asid) (invalidateTLBByASID asid')" + unfolding invalidate_tlb_by_asid_def invalidateTLBByASID_def + apply corres + (* when vs case .. of *) + apply (corres_cases; (solves \rule corres_inst[where P=\ and P'=\], clarsimp\)?) + (* when-True case *) + apply (clarsimp, corres) + apply wpsimp+ + done + +lemma invalidate_vmid_entry_entry_for_asid[wp]: + "invalidate_vmid_entry vmid \\s. P (entry_for_asid asid s)\" + unfolding invalidate_vmid_entry_def + by wpsimp + +lemma invalidateASIDEntry_corres[corres]: + "asid' = ucast asid \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_asid_entry asid) (invalidateASIDEntry asid')" + unfolding invalidate_asid_entry_def invalidateASIDEntry_def + by (corres simp: vspace_for_asid_def) + +lemma deleteASID_corres [corres]: + assumes "asid' = ucast asid" "pm' = pm" + shows "corres dc (invs and K (asid \ 0)) no_0_obj' + (delete_asid asid pm) (deleteASID asid' pm')" + unfolding delete_asid_def deleteASID_def using assms + apply simp + apply (corres simp: liftM_def | corres_cases_both)+ + apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def ucast_ucast_a is_down + asid_pool_relation_def abs_asid_entry_def split: option.splits) + apply corres + apply (rule ext) + apply (clarsimp simp: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def + ucast_ucast_a is_down asid_pool_relation_def) + apply (erule notE) + apply word_eqI_solve + apply (corres corres: getCurThread_corres) + apply (wpsimp simp: cur_tcb_def[symmetric] + wp: set_asid_pool_None_vmid_inv set_asid_pool_vspace_objs_unmap_single) + apply (wp getASID_wp)+ + apply (rename_tac p pool pool' a b) + apply (rule_tac Q="\_ s. invs s \ + (\high. asid_table s high = Some p \ + vmid_for_asid s (asid_of high (asid_low_bits_of asid)) = + None)" in hoare_strengthen_post) + apply (wp hoare_vcg_ex_lift invalidate_asid_entry_vmid_for_asid) + apply (fastforce simp: asid_pools_at_eq ako_asid_pools_of) + apply (wp hoare_drop_imp hoare_vcg_all_lift) + apply (wp invalidate_tlb_by_asid_invs hoare_vcg_ex_lift) + apply wp + apply (clarsimp, wp) + apply (wp getASID_wp) + apply wp + apply (wp hoare_vcg_all_lift hoare_drop_imp) + apply (fastforce simp: pool_for_asid_def vspace_for_asid_def entry_for_asid_def word_neq_0_conv + entry_for_pool_def in_omonad + intro!: pool_for_asid_ap_at) + apply simp + done + +lemma valid_arch_state_unmap_strg': + "valid_arch_state' s \ + valid_arch_state' (s\ksArchState := + armKSASIDTable_update (\_. (armKSASIDTable (ksArchState s))(ptr := None)) + (ksArchState s)\)" + apply (simp add: valid_arch_state'_def valid_asid_table'_def) + apply (auto simp: ran_def split: if_split_asm option.splits) + done + +crunch armKSASIDTable_inv[wp]: invalidateASIDEntry + "\s. P (armKSASIDTable (ksArchState s))" + (wp: getObject_inv crunch_wps simp: loadObject_default_def) + +lemma is_aligned_asid_low_bits_of_zero: + "is_aligned asid asid_low_bits \ asid_low_bits_of asid = 0" + apply (simp add: is_aligned_mask word_eq_iff word_size asid_bits_defs asid_bits_of_defs nth_ucast) + apply (intro iffI allI; drule_tac x=n in spec; fastforce) + done + +lemma asid_high_bits_of_0[simp]: + "asid_high_bits_of 0 = 0" + by (simp add: asid_high_bits_of_def) + +lemma asid_low_bits_of_0[simp]: + "asid_low_bits_of 0 = 0" + by (simp add: asid_low_bits_of_def) + +lemma invalidate_asid_entry_asid_pool_doms[wp]: + "invalidate_asid_entry asid \\s. P (asid_pools_of s ||> dom)\" + unfolding invalidate_asid_entry_def invalidate_asid_def invalidate_vmid_entry_def + apply wpsimp + apply (fastforce simp: opt_map_def split: option.splits elim!: rsubst[where P=P]) + done + +declare getCurThread_corres[corres] (* FIXME AARCH64: declare at origin *) + +lemma valid_asid_table_None_upd: + "valid_asid_table_2 table pools \ valid_asid_table_2 (table(idx := None)) pools" + unfolding valid_asid_table_2_def + by (auto simp: ran_def inj_on_def) + +lemma asid_low_le_mask_asidBits[simp]: + "UCAST(asid_low_len \ asid_len) asid_low \ mask asid_low_bits" + by (rule ucast_leq_mask, simp add: asid_low_bits_def) + +lemma ucast_eq_from_zip_asid_low_bits: + "\(x, y) \ set (zip [0 .e. mask asid_low_bits] [0 .e. mask asid_low_bits]); + is_aligned asid asid_low_bits\ + \ ucast asid + y = ucast (asid + x)" for asid :: AARCH64_A.asid + apply (clarsimp simp: in_set_zip upto_enum_word_nth) + apply (subst add.commute[where a=asid]) + apply (drule nat_le_Suc_less_imp)+ + apply (simp add: ucast_add_mask_aligned[where n=asid_low_bits] mask_def word_le_nat_alt + asid_low_bits_def unat_of_nat_eq ucast_of_nat is_down ucast_of_nat_small) + done + +lemma deleteASIDPool_corres: + assumes "base' = ucast base" "ptr' = ptr" + shows "corres dc (invs and K (is_aligned base asid_low_bits) and asid_pool_at ptr) + (no_0_obj') + (delete_asid_pool base ptr) (deleteASIDPool base' ptr)" + using assms + apply (simp add: delete_asid_pool_def deleteASIDPool_def) + apply (corres simp: liftM_def mapM_discarded) + apply corres_split (* deal with mapM_x manually *) + apply (rule_tac P="\s. invs s \ pool_for_asid base s = Some ptr \ + (asid_pools_of s ||> dom) ptr = Some (dom pool) \ + is_aligned base asid_low_bits" + and P'="no_0_obj'" in corres_mapM_x') + (* mapM_x body *) + apply corres + (* "when" condition *) + apply (clarsimp simp: asid_pool_relation_def in_set_zip upto_enum_word_nth) + apply (simp add: ucast_of_nat is_down asid_low_bits_def ucast_of_nat_small) + apply (rule corres_gen_asm[where F="is_aligned base asid_low_bits"]) + apply (corres term_simp: ucast_eq_from_zip_asid_low_bits mask_def) + apply clarsimp + apply (rename_tac low low' s s' entry) + apply (clarsimp simp: vspace_for_asid_def entry_for_asid_def pool_for_asid_def + in_omonad asid_high_bits_of_add asid_low_bits_of_add + mask_def entry_for_pool_def + dest!: set_zip_leftD) + apply (rule conjI, fastforce) + apply (clarsimp simp flip: word_neq_0_conv mask_2pm1) + apply (prop_tac "valid_asid_map s", fastforce) + apply (prop_tac "base = 0 \ low = 0") + apply (simp add: asid_low_bits_def) + apply (subst (asm) word_plus_and_or_coroll, word_eqI, force) + apply (fastforce simp: word_or_zero) + apply (clarsimp simp: valid_asid_map_def entry_for_asid_def obind_None_eq + pool_for_asid_def entry_for_pool_def in_omonad) + apply blast + apply fastforce + apply (wpsimp wp: invalidate_tlb_by_asid_invs)+ + apply (simp add: mask_def asid_low_bits_def) + apply (corres' \fastforce simp: asid_high_bits_of_def asid_low_bits_def up_ucast_inj_eq + state_relation_def arch_state_relation_def\ + corres: corres_modify_tivial) + (* mapM_x wp conditions *) + apply (rename_tac table table' pool pool') + apply (rule hoare_strengthen_post) + apply (rule_tac I="\s. invs s \ is_aligned base asid_low_bits \ table = asid_table s \ + pool_for_asid base s = Some ptr \ + (asid_pools_of s ||> dom) ptr = Some (dom pool)" and + V="\xs s. \asid_low \ set xs. + vmid_for_asid s (asid_of (asid_high_bits_of base) + (ucast asid_low)) = None" + in mapM_x_inv_wp3) + apply (wpsimp wp: invalidate_asid_entry_vmid_for_asid_add hoare_vcg_op_lift + invalidate_tlb_by_asid_invs) + apply (rule conjI; clarsimp) + apply (drule arg_cong[where f=set], drule sym[where t="set xs" for xs]) + apply fastforce + apply (clarsimp simp: vmid_for_asid_def obind_None_eq) + apply (rule ccontr) + apply (clarsimp simp: entry_for_pool_def in_omonad pool_for_asid_def) + apply (fastforce dest: dom_eq_All) + (* mapM_x invariant implies post condition; + some manual massaging to avoid massive duplication *) + apply (simp (no_asm) del: fun_upd_apply) + apply (strengthen invs_vmid_inv invs_valid_global_arch_objs invs_implies + invs_valid_vmid_table valid_asid_table_None_upd) + (* can't move these into previous strengthen, otherwise will be applied too early *) + apply (strengthen invs_arm_asid_table_unmap invs_valid_asid_table) + apply (clarsimp simp: o_def) + apply (rename_tac asid_low) + apply (erule_tac x="ucast asid_low" in allE) + apply (fastforce simp: ucast_up_ucast_id is_up) + apply (wpsimp wp: mapM_x_wp' getASID_wp)+ + apply (fastforce simp: is_aligned_asid_low_bits_of_zero pool_for_asid_def in_omonad) + apply (clarsimp simp: is_aligned_asid_low_bits_of_zero) + done + +crunch typ_at' [wp]: setVMRoot "\s. P (typ_at' T p s)" + (simp: crunch_simps loadObject_default_def wp: crunch_wps getObject_inv) + +lemmas setVMRoot_typ_ats [wp] = typ_at_lifts [OF setVMRoot_typ_at'] + +lemma getObject_PTE_corres'': + assumes "p' = p" + shows "corres pte_relation' (pte_at pt_t p and pspace_aligned and pspace_distinct) \ + (get_pte pt_t p) (getObject p')" + using assms getObject_PTE_corres by simp + +crunches unmapPageTable, unmapPage + for aligned'[wp]: "pspace_aligned'" + and distinct'[wp]: "pspace_distinct'" + and ctes [wp]: "\s. P (ctes_of s)" + and typ_at'[wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +crunches storePTE + for no_0_obj'[wp]: no_0_obj' + and valid_arch'[wp]: valid_arch_state' + and cur_tcb'[wp]: cur_tcb' + +lemma unmapPageTable_corres: + assumes "asid' = ucast asid" "vptr' = vptr" "pt' = pt" + shows "corres dc + (invs and (\s. vspace_for_asid asid s \ Some pt) and K (0 < asid \ vptr \ user_region)) + no_0_obj' + (unmap_page_table asid vptr pt) + (unmapPageTable asid' vptr' pt')" + apply (clarsimp simp: assms unmap_page_table_def unmapPageTable_def ignoreFailure_def const_def) + apply (corres corres: findVSpaceForASID_corres lookupPTFromLevel_corres storePTE_corres' + corres_returnTT + wp: pt_lookup_from_level_wp + | corres_cases_left)+ + apply (fastforce simp: pte_at_def dest: vspace_for_asid_vs_lookup) + apply simp + done + +(* FIXME AARCH64: move (all arches) *) +lemma corres_split_strengthen_ftE: + "\ corres (ftr \ r') P P' f j; + \rv rv'. r' rv rv' \ corres (ftr' \ r) (R rv) (R' rv') (g rv) (k rv'); + \Q\ f \R\,-; \Q'\ j \R'\,- \ + \ corres (dc \ r) (P and Q) (P' and Q') (f >>=E (\rv. g rv)) (j >>=E (\rv'. k rv'))" + apply (rule_tac r'=r' in corres_splitEE) + apply (erule corres_rel_imp) + apply (case_tac x, auto)[1] + apply (rule corres_rel_imp, assumption) + apply (case_tac x, auto)[1] + apply (simp add: validE_R_def)+ + done + +lemma checkMappingPPtr_corres: + "\ pte_relation' pte pte'; pptr' = pptr \ \ + corres (lfr \ dc) \ \ + (whenE (AARCH64_A.is_PagePTE pte \ pptr_from_pte pte \ pptr) + (throwError ExceptionTypes_A.InvalidRoot)) + (checkMappingPPtr pptr' pte')" + apply (simp add: liftE_bindE checkMappingPPtr_def) + apply (cases pte; simp add: pte_base_addr_def pptr_from_pte_def) + apply (auto simp: whenE_def unlessE_def corres_returnOk lookup_failure_map_def) + done + +crunch inv[wp]: checkMappingPPtr "P" + (wp: crunch_wps loadObject_default_inv simp: crunch_simps) + +lemmas liftE_get_pte_corres = getObject_PTE_corres[THEN corres_liftE_rel_sum[THEN iffD2]] + +lemma invalidateTLBByASIDVA_corres[corres]: + "\ asid' = ucast asid; vptr' = vptr \ \ + corres dc + (pspace_aligned and pspace_distinct and (\s. vspace_for_asid asid s \ None)) + \ + (invalidate_tlb_by_asid_va asid vptr) (invalidateTLBByASIDVA asid' vptr')" + unfolding invalidate_tlb_by_asid_va_def invalidateTLBByASIDVA_def + by (corres term_simp: wordBits_def word_bits_def word_size + | corres_cases_left + | rule corres_inst[where P=\ and P'=\], clarsimp)+ + +crunches lookupPTSlot + for inv: "P" + +lemma unmapPage_corres[corres]: + assumes "sz' = sz" "asid' = ucast asid" "vptr' = vptr" "pptr' = pptr" + shows "corres dc (invs and K (valid_unmap sz (asid,vptr) \ vptr \ user_region)) + (no_0_obj') + (unmap_page sz asid vptr pptr) + (unmapPage sz' asid' vptr' pptr')" + apply (clarsimp simp: assms unmap_page_def unmapPage_def ignoreFailure_def const_def) + apply (corres corres: findVSpaceForASID_corres lookupPTSlot_corres[@lift_corres_args] + getObject_PTE_corres' checkMappingPPtr_corres corres_returnTT + simp: lookup_failure_map_def + wp: hoare_drop_imp lookupPTSlot_inv + | corres_cases_both)+ + apply (clarsimp simp: valid_unmap_def cong: conj_cong) + apply (fastforce dest: vspace_for_asid_vs_lookup pt_lookup_slot_vs_lookup_slotI + intro: vs_lookup_slot_pte_at) + apply simp + done + +definition + "mapping_map \ \(pte, r, level) (pte', r'). pte_relation' pte pte' \ r' = r" + +definition + "page_invocation_map pgi pgi' \ case pgi of + AARCH64_A.PageMap c slot m \ + \c' m'. pgi' = PageMap c' (cte_map slot) m' \ + acap_relation c c' \ + mapping_map m m' + | AARCH64_A.PageUnmap c ptr \ + \c'. pgi' = PageUnmap c' (cte_map ptr) \ + acap_relation c c' + | AARCH64_A.PageGetAddr ptr \ + pgi' = PageGetAddr ptr + | AARCH64_A.PageFlush type vstart vend pstart vs asid \ + pgi' = PageFlush type vstart vend pstart vs (ucast asid)" + +definition + "valid_page_inv' pgi \ + case pgi of + PageMap cap ptr m \ + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) ptr and valid_cap' (ArchObjectCap cap) + | PageUnmap cap ptr \ + K (isFrameCap cap) and + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) ptr and valid_cap' (ArchObjectCap cap) + | PageGetAddr ptr \ \ + | PageFlush ty start end pstart space asid \ \" + +lemma message_info_to_data_eqv: + "wordFromMessageInfo (message_info_map mi) = message_info_to_data mi" + apply (cases mi) + apply (simp add: wordFromMessageInfo_def msgLengthBits_def msgExtraCapBits_def msgMaxExtraCaps_def shiftL_nat) + done + +lemma message_info_from_data_eqv: + "message_info_map (data_to_message_info rv) = messageInfoFromWord rv" + using shiftr_mask_eq[where 'a=64 and n=12] + by (auto simp: data_to_message_info_def messageInfoFromWord_def Let_def not_less + msgLengthBits_def msgExtraCapBits_def msgMaxExtraCaps_def mask_def + shiftL_nat msgMaxLength_def msgLabelBits_def) + +lemma setMessageInfo_corres: + "mi' = message_info_map mi \ + corres dc (tcb_at t and pspace_aligned and pspace_distinct) \ + (set_message_info t mi) (setMessageInfo t mi')" + apply (simp add: setMessageInfo_def set_message_info_def) + apply (subgoal_tac "wordFromMessageInfo (message_info_map mi) = + message_info_to_data mi") + apply (simp add: asUser_setRegister_corres msg_info_register_def + msgInfoRegister_def) + apply (simp add: message_info_to_data_eqv) + done + + +lemma set_mi_invs'[wp]: "\invs' and tcb_at' t\ setMessageInfo t a \\x. invs'\" + by (simp add: setMessageInfo_def) wp + +lemma set_mi_tcb' [wp]: + "\ tcb_at' t \ setMessageInfo receiver msg \\rv. tcb_at' t\" + by (simp add: setMessageInfo_def) wp + + +lemma setMRs_typ_at': + "\\s. P (typ_at' T p s)\ setMRs receiver recv_buf mrs \\rv s. P (typ_at' T p s)\" + by (simp add: setMRs_def zipWithM_x_mapM split_def, wp crunch_wps) + +lemmas setMRs_typ_at_lifts[wp] = typ_at_lifts [OF setMRs_typ_at'] + +lemma set_mrs_invs'[wp]: + "\ invs' and tcb_at' receiver \ setMRs receiver recv_buf mrs \\rv. invs' \" + apply (simp add: setMRs_def) + apply (wp dmo_invs' no_irq_mapM no_irq_storeWord crunch_wps| + simp add: zipWithM_x_mapM split_def)+ + done + +crunches unmapPage + for cte_at'[wp]: "cte_at' p" + (wp: crunch_wps simp: crunch_simps) + +lemma vs_lookup_slot_vspace_for_asidD: + "\ vs_lookup_slot level asid vref s = Some (level, slot); level \ max_pt_level; valid_asid_map s \ + \ vspace_for_asid asid s \ None" + by (fastforce simp: vs_lookup_slot_def vs_lookup_table_def vspace_for_asid_def in_omonad + valid_asid_map_def entry_for_asid_def vspace_for_pool_def obind_None_eq + simp flip: word_neq_0_conv + split: if_split_asm) + +lemma performPageInvocation_corres: + assumes "page_invocation_map pgi pgi'" + shows "corres dc (invs and valid_page_inv pgi) (no_0_obj' and valid_page_inv' pgi') + (perform_page_invocation pgi) (performPageInvocation pgi')" + apply (rule corres_cross_over_guard [where Q="no_0_obj' and valid_page_inv' pgi' and + pspace_aligned' and pspace_distinct'"]) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + using assms + unfolding perform_page_invocation_def performPageInvocation_def page_invocation_map_def + apply (cases pgi; clarsimp simp: valid_page_inv_def mapping_map_def) + apply (rename_tac cap ct_slot_ref ct_slot_idx pte slot level cap' pte') + apply (simp add: perform_pg_inv_map_def) + apply (corres corres: updateCap_same_master | fastforce | corres_cases)+ + apply (rule_tac F="arch_cap.is_FrameCap cap" in corres_gen_asm) + apply ((corres corres: corres_assert_opt_l simp: arch_cap.is_FrameCap_def + | corres_cases)+)[1] + apply clarsimp + apply (wp get_pte_wp hoare_drop_imp hoare_vcg_op_lift)+ + apply (clarsimp simp: invs_valid_objs invs_distinct invs_psp_aligned) + apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps same_ref_def) + apply (frule (3) vs_lookup_slot_pte_at) + apply (clarsimp simp: cap_master_cap_def split: arch_cap.splits) + apply (fastforce dest!: vs_lookup_slot_vspace_for_asidD) + apply (clarsimp simp: valid_page_inv'_def cte_wp_at_ctes_of) + apply (simp add: perform_pg_inv_unmap_def) + apply (corres corres: corres_assert_gen_asm_l simp: liftM_def) + apply (corres_cases_both; (solves \rule corres_trivial, clarsimp simp: arch_cap.is_FrameCap_def\)?) + apply (corres corres: getSlotCap_corres) + apply (rule_tac F="is_frame_cap old_cap" in corres_gen_asm) + apply (corres corres: updateCap_same_master + simp: is_frame_cap_def arch_cap.is_FrameCap_def update_map_data_def) + apply (wp get_cap_wp)+ + apply corres_cases_both + apply (corres simp: arch_cap.is_FrameCap_def corres: getSlotCap_corres) + apply (rule_tac F="is_frame_cap old_cap" in corres_gen_asm) + apply (corres corres: updateCap_same_master + simp: is_frame_cap_def arch_cap.is_FrameCap_def update_map_data_def) + apply (wpsimp wp: get_cap_wp hoare_vcg_op_lift)+ + apply (clarsimp simp: invs_valid_objs invs_psp_aligned invs_distinct) + apply (clarsimp simp: cte_wp_at_caps_of_state wellformed_pte_def + cap_master_cap_simps is_cap_simps update_map_data_def mdata_map_def + wellformed_mapdata_def valid_arch_cap_def) + apply (clarsimp simp: valid_page_inv'_def cte_wp_at_ctes_of) + apply (clarsimp simp: perform_pg_inv_get_addr_def fromPAddr_def) + apply (corres corres: setMRs_corres[@lift_corres_args] setMessageInfo_corres[@lift_corres_args] + simp: invs_valid_objs invs_psp_aligned invs_distinct) + apply (clarsimp simp: perform_flush_def) + apply (rename_tac type vstart vend pstart vs asid) + apply (case_tac type; + simp add: do_flush_def doFlush_def; + corres simp: doMachineOp_bind do_machine_op_bind empty_fail_bind) + done + +definition + "page_table_invocation_map pti pti' \ + case pti of + AARCH64_A.PageTableMap cap ptr pte pt_slot level \ + \cap' pte'. pti' = PageTableMap cap' (cte_map ptr) pte' pt_slot \ + cap_relation (Structures_A.ArchObjectCap cap) cap' \ + pte_relation' pte pte' + | AARCH64_A.PageTableUnmap cap ptr \ + \cap'. pti' = PageTableUnmap cap' (cte_map ptr) \ acap_relation cap cap'" + +definition + "valid_pti' pti \ + case pti of + PageTableMap cap slot pte pteSlot \ + cte_wp_at' (is_arch_update' cap) slot and valid_cap' cap + | PageTableUnmap cap slot \ + cte_wp_at' (is_arch_update' (ArchObjectCap cap)) slot and valid_cap' (ArchObjectCap cap) + and K (isPageTableCap cap)" + +(* extend with arch rules *) +lemmas store_pte_typ_ats[wp] = store_pte_typ_ats abs_atyp_at_lifts[OF store_pte_typ_at] + +lemma pte_bits_leq_pt_bits[simp, intro!]: + "pte_bits \ pt_bits pt_t" + by (simp add: bit_simps) + +lemma pt_bits_le_word_len[simplified, simp, intro!]: + "pt_bits pt_t < LENGTH(machine_word_len)" + by (simp add: bit_simps) + +lemma clear_page_table_corres: + "corres dc (pspace_aligned and pspace_distinct and pt_at pt_t p) + \ + (mapM_x (swp (store_pte pt_t) AARCH64_A.InvalidPTE) [p , p + 2^pte_bits .e. p + mask (pt_bits pt_t)]) + (mapM_x (swp storePTE AARCH64_H.InvalidPTE) [p , p + 2^pte_bits .e. p + mask (pt_bits pt_t)])" + apply (rule_tac F="is_aligned p (pt_bits pt_t)" in corres_req) + apply (clarsimp simp: obj_at_def a_type_def) + apply (clarsimp split: Structures_A.kernel_object.split_asm if_split_asm + arch_kernel_obj.split_asm) + apply (drule(1) pspace_alignedD) + apply (simp add: table_size_def pt_bits_def) + apply (simp add: mask_def flip: p_assoc_help) + apply (simp add: upto_enum_step_subtract[where x=p and y="p + 2^pte_bits"] + is_aligned_no_overflow + upto_enum_step_red[where us=pte_bits, simplified] + mapM_x_mapM liftM_def[symmetric]) + apply (rule corres_guard_imp, + rule_tac r'=dc and S="(=)" + and Q="\xs s. \x \ set xs. pte_at pt_t x s \ pspace_aligned s \ pspace_distinct s" + and Q'="\_. \" + in corres_mapM_list_all2, simp_all) + apply (rule corres_guard_imp, rule storePTE_corres) + apply (simp add:pte_relation_def)+ + apply (wp hoare_vcg_const_Ball_lift | simp)+ + apply (simp add: list_all2_refl) + apply (clarsimp simp: upto_enum_step_def pte_bits_def word_size_bits_def) + apply (erule page_table_pte_atI[simplified shiftl_t2n mult.commute bit_simps, simplified]) + apply (simp add: bit_simps word_less_nat_alt word_le_nat_alt unat_of_nat) + apply simp + done + +lemmas unmapPageTable_typ_ats[wp] = typ_at_lifts[OF unmapPageTable_typ_at'] + +lemma performPageTableInvocation_corres: + "page_table_invocation_map pti pti' \ + corres dc + (invs and valid_pti pti) (no_0_obj' and valid_pti' pti') + (perform_page_table_invocation pti) + (performPageTableInvocation pti')" + apply (rule corres_cross_over_guard [where Q="no_0_obj' and valid_pti' pti' and + pspace_aligned' and pspace_distinct'"]) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + apply (simp add: perform_page_table_invocation_def performPageTableInvocation_def + page_table_invocation_map_def) + apply (cases pti) + apply (rename_tac cap slot pte pte_slot) + apply (clarsimp simp: perform_pt_inv_map_def) + apply (rule corres_name_pre) + apply (clarsimp simp: valid_pti_def valid_pti'_def + split: arch_cap.splits capability.split_asm arch_capability.split_asm) + apply (rule corres_guard_imp) + apply (rule corres_split[OF updateCap_same_master]) + apply simp + apply (rule corres_split[OF storePTE_corres]) + apply assumption + apply (rule corres_machine_op, rule corres_Id; simp) + apply wpsimp+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def + invs_valid_objs invs_psp_aligned invs_distinct) + apply (case_tac cap; simp add: is_cap_simps cap_master_cap_simps) + apply (clarsimp simp: cte_wp_at_ctes_of valid_pti'_def) + apply (clarsimp simp: perform_pt_inv_unmap_def) + apply (rename_tac acap a b acap') + apply (rule_tac F="AARCH64_A.is_PageTableCap acap" in corres_req; clarsimp) + apply (clarsimp simp: valid_pti_def) + apply (clarsimp simp: AARCH64_A.is_PageTableCap_def split_def cong: option.case_cong) + apply (simp add: case_option_If2 split del: if_split) + apply (rule corres_guard_imp) + apply (rule corres_split_nor) + apply (rule corres_if3) + apply (fastforce simp: acap_map_data_def mdata_map_def is_PageTableCap_def) + apply (rule corres_split[OF unmapPageTable_corres]) + apply (clarsimp simp: mdata_map_def) + apply (clarsimp simp: mdata_map_def) + apply (rule refl) + apply (simp (no_asm) add: p_assoc_help flip: mask_2pm1) + apply (rule clear_page_table_corres) + apply wp+ + apply (rule corres_trivial, simp) + apply (simp add: liftM_def) + apply (rule corres_split[OF getSlotCap_corres[OF refl]]) + apply (rule_tac F="is_pt_cap x" in corres_gen_asm) + apply (rule updateCap_same_master) + apply (clarsimp simp: is_cap_simps update_map_data_def) + apply (wpsimp wp: get_cap_wp mapM_x_wp' hoare_vcg_all_lift hoare_vcg_imp_lift' + simp: wellformed_pte_def)+ + apply (clarsimp simp: valid_pti_def valid_arch_cap_def cte_wp_at_caps_of_state + invs_valid_objs invs_psp_aligned invs_distinct + cap_master_cap_simps is_cap_simps update_map_data_def + wellformed_mapdata_def) + apply (clarsimp simp: valid_pti'_def cte_wp_at_ctes_of) + done + +definition + "asid_pool_invocation_map ap \ case ap of + asid_pool_invocation.Assign asid p slot \ Assign (ucast asid) p (cte_map slot)" + +definition + "valid_apinv' ap \ + case ap of Assign asid p slot \ + cte_wp_at' (isArchCap isPageTableCap o cteCap) slot and K (0 < asid \ asid_wf asid)" + +definition + "valid_vcpuinv' vi \ case vi of + VCPUSetTCB v t \ vcpu_at' v and ex_nonz_cap_to' v and ex_nonz_cap_to' t + | VCPUInjectIRQ v n q \ \ + | VCPUReadRegister v rg \ \ + | VCPUWriteRegister v _ _ \ \ + | VCPUAckVPPI v _ \ \" + +lemma performASIDPoolInvocation_corres[corres]: + "\ ap' = asid_pool_invocation_map ap \ \ + corres dc + (valid_objs and pspace_aligned and pspace_distinct and valid_arch_state and valid_apinv ap) + (no_0_obj' and valid_apinv' ap') + (perform_asid_pool_invocation ap) + (performASIDPoolInvocation ap')" + apply (clarsimp simp: perform_asid_pool_invocation_def performASIDPoolInvocation_def) + apply (cases ap, simp add: asid_pool_invocation_map_def) + apply (corres corres: getSlotCap_corres corres_assert_gen_asm_l updateCap_same_master + simp: liftM_def store_asid_pool_entry_def + term_simp: cap.is_ArchObjectCap_def arch_cap.is_PageTableCap_def + update_map_data_def) + apply (fastforce simp: asid_pool_relation_def abs_asid_entry_def cap.is_ArchObjectCap_def + arch_cap.is_PageTableCap_def inv_def ucast_up_inj) + apply (wpsimp wp: set_cap_typ_at hoare_drop_imp get_cap_wp)+ + apply (clarsimp simp: valid_apinv_def cte_wp_at_caps_of_state cap_master_cap_simps is_cap_simps + arch_cap.is_PageTableCap_def is_vsroot_cap_def update_map_data_def in_omonad) + apply (drule (1) caps_of_state_valid_cap) + apply (simp add: valid_cap_def obj_at_def) + apply (clarsimp simp: valid_apinv'_def cte_wp_at_ctes_of) + apply (fastforce intro!: pspace_aligned_cross pspace_distinct_cross) + done + +crunches doMachineOp + for arch[wp]: "\s. P (ksArchState s)" + and irq_node'[wp]: "\s. P (irq_node' s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and ksInterruptState[wp]: "\s. P (ksInterruptState s)" + and cur'[wp]: "\s. P (ksCurThread s)" + and cteCaps_of[wp]: "\s. P (cteCaps_of s)" + and dmo_global_refs'[wp]: "\s. P (global_refs' s)" + and ksPSpace[wp]: "\s. P (ksPSpace s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and ksDomSchedule[wp]: "\s. P (ksDomSchedule s)" + and ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + +crunches vcpuSave, vcpuDisable, vcpuEnable, vcpuRestore + for obj_at'_no_vcpu[wp]: "\s. P (obj_at' (P' :: ('a :: no_vcpu) \ bool) t s)" + (simp: crunch_simps wp: crunch_wps) + +lemma vcpuSwitch_obj_at'_no_vcpu[wp]: + "vcpuSwitch param_a \\s. P (obj_at' (P' :: ('a :: no_vcpu) \ bool) t s)\" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma dmo_setVSpaceRoot_invs'[wp]: + "doMachineOp (setVSpaceRoot r a) \invs'\" + by (wp dmo_invs_lift') + +lemma dmo_setVSpaceRoot_irq_masks[wp]: + "doMachineOp (setVSpaceRoot r a) \\s. P (irq_masks (ksMachineState s))\" + unfolding doMachineOp_def + apply wpsimp + apply (drule use_valid, rule setVSpaceRoot_irq_masks; assumption) + done + +lemma dmo_setVSpaceRoot_memory[wp]: + "doMachineOp (setVSpaceRoot r a) \\s. P (underlying_memory (ksMachineState s))\" + unfolding doMachineOp_def + apply wpsimp + apply (drule use_valid, rule setVSpaceRoot_underlying_memory_inv; assumption) + done + +lemma dmo_setVSpaceRoot_invs_no_cicd'[wp]: + "doMachineOp (setVSpaceRoot r a) \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift') + +lemma getObject_tcb_hyp_sym_refs: + "\\s. sym_refs (state_hyp_refs_of' s)\ getObject p + \\rv. case atcbVCPUPtr (tcbArch rv) of None \ \_. True + | Some x \ ko_wp_at' (is_vcpu' and hyp_live') x\" + apply (wpsimp wp: getObject_tcb_wp) + apply (clarsimp simp: typ_at_tcb'[symmetric] typ_at'_def ko_wp_at'_def[of _ p] + split: option.splits) + apply (case_tac ko; simp) + apply (rename_tac tcb) + apply (rule_tac x=tcb in exI; rule conjI, clarsimp simp: obj_at'_def) + apply (clarsimp, rule context_conjI, clarsimp simp: obj_at'_def) + apply (drule ko_at_state_hyp_refs_ofD') + apply (simp add: hyp_refs_of'_def sym_refs_def) + apply (erule_tac x=p in allE, simp) + apply (drule state_hyp_refs_of'_elemD) + apply (clarsimp simp: hyp_refs_of_rev') + apply (simp add: ko_wp_at'_def, erule exE, + clarsimp simp: is_vcpu'_def hyp_live'_def arch_live'_def) + done + +lemma setASIDPool_valid_objs[wp]: + "setObject p (ap::asidpool) \valid_objs'\" + apply (wp setObject_valid_objs'[where P=\]) + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + apply simp + done + +lemma setASIDPool_valid_mdb[wp]: + "setObject p (ap::asidpool) \valid_mdb'\" + by (simp add: valid_mdb'_def) wp + +lemma setASIDPool_nosch[wp]: + "setObject p (ap::asidpool) \\s. P (ksSchedulerAction s)\" + by (wp setObject_nosch updateObject_default_inv|simp)+ + +lemma setASIDPool_ksQ[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueues s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_inQ[wp]: + "setObject ptr (ap::asidpool) \\s. P (obj_at' (inQ d p) t s)\" + apply (simp add: obj_at'_real_def) + apply (wpsimp wp: setObject_ko_wp_at simp: objBits_simps) + apply (simp add: pageBits_def) + apply simp + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +lemma setASIDPool_qsL1[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueuesL1Bitmap s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_qsL2[wp]: + "setObject p (ap::asidpool) \\s. P (ksReadyQueuesL2Bitmap s)\" + by (wp setObject_qs updateObject_default_inv|simp)+ + +lemma setASIDPool_tcb_obj_at'[wp]: + "\obj_at' (P::tcb \ bool) t\ setObject p (ap::asidpool) \\_. obj_at' P t\" + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma setASIDPool_valid_queues[wp]: + "setObject p (ap::asidpool) \valid_queues\" + by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ + +lemma setASIDPool_valid_queues'[wp]: + "setObject p (ap::asidpool) \valid_queues'\" + by (wp valid_queues_lift') + +lemma setASIDPool_state_refs'[wp]: + "setObject p (ap::asidpool) \\s. P (state_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def objBits_simps + in_magnitude_check state_refs_of'_def ps_clear_upd + elim!: rsubst[where P=P] del: ext intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (simp split: option.split) + done + +lemma setASIDPool_state_hyp_refs'[wp]: + "setObject p (ap::asidpool) \\s. P (state_hyp_refs_of' s)\" + apply (clarsimp simp: setObject_def valid_def in_monad split_def + updateObject_default_def objBits_simps + in_magnitude_check state_hyp_refs_of'_def ps_clear_upd + elim!: rsubst[where P=P] del: ext intro!: ext + split del: if_split cong: option.case_cong if_cong) + apply (simp split: option.split) + done + +lemma setASIDPool_iflive[wp]: + "setObject p (ap::asidpool) \if_live_then_nonz_cap'\" + apply (rule hoare_pre) + apply (rule setObject_iflive' [where P=\], simp) + apply (simp add: objBits_simps) + apply (auto simp: updateObject_default_def in_monad bit_simps live'_def hyp_live'_def + arch_live'_def) + done + +lemma setASIDPool_ksInt[wp]: + "setObject p (ap::asidpool) \\s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ + +lemma setASIDPool_ifunsafe[wp]: + "setObject p (ap::asidpool) \if_unsafe_then_cap'\" + apply (rule hoare_pre) + apply (rule setObject_ifunsafe' [where P=\], simp) + apply (auto simp: updateObject_default_def in_monad)[2] + apply wp + apply simp + done + +lemma setASIDPool_it'[wp]: + "setObject p (ap::asidpool) \\s. P (ksIdleThread s)\" + by (wp setObject_it updateObject_default_inv|simp)+ + +lemma setASIDPool_pred_tcb_at'[wp]: + "setObject p (ap::asidpool) \pred_tcb_at' proj P t\" + apply (simp add: pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma setASIDPool_idle[wp]: + "setObject p (ap::asidpool) \valid_idle'\" + unfolding valid_idle'_def + by (rule hoare_lift_Pf [where f="ksIdleThread"]; wp) + +lemma setASIDPool_irq_states'[wp]: + "setObject p (ap::asidpool) \valid_irq_states'\" + apply (rule hoare_pre) + apply (rule hoare_use_eq [where f=ksInterruptState, OF setObject_ksInterrupt]) + apply (simp, rule updateObject_default_inv) + apply (rule hoare_use_eq [where f=ksMachineState, OF setObject_ksMachine]) + apply (simp, rule updateObject_default_inv) + apply wp + apply assumption + done + +lemma setASIDPool_vms'[wp]: + "setObject p (ap::asidpool) \valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv + hoare_vcg_all_lift hoare_vcg_disj_lift | simp)+ + done + +lemma setASIDPool_ct_not_inQ[wp]: + "setObject p (ap::asidpool) \ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF setObject_nosch]) + apply (simp add: updateObject_default_def | wp)+ + apply (rule hoare_weaken_pre) + apply (wps setObject_ASID_ct) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setObject_asidpool_cur_domain[wp]: + "setObject p (ap::asidpool) \\s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_asidpool_ksDomSchedule[wp]: + "setObject p (ap::asidpool) \\s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_asidpool_tcb_in_cur_domain'[wp]: + "setObject p (ap::asidpool) \tcb_in_cur_domain' t\" + by (wp tcb_in_cur_domain'_lift) + +lemma setObject_asidpool_ct_idle_or_in_cur_domain'[wp]: + "setObject p (ap::asidpool) \ct_idle_or_in_cur_domain'\" + by (wp hoare_vcg_disj_lift ct_idle_or_in_cur_domain'_lift) + +lemma setObject_ap_ksDomScheduleIdx[wp]: + "setObject p (ap::asidpool) \\s. P (ksDomScheduleIdx s)\" + by (wpsimp wp: updateObject_default_inv simp: setObject_def) + +lemma setASIDPool_invs[wp]: + "setObject p (ap::asidpool) \invs'\" + apply (simp add: invs'_def valid_state'_def valid_pspace'_def) + apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift + valid_irq_node_lift + cur_tcb_lift valid_irq_handlers_lift'' + untyped_ranges_zero_lift + updateObject_default_inv + | simp add: cteCaps_of_def + | rule setObject_ksPSpace_only)+ + apply (clarsimp simp: o_def) + done + +lemma doMachineOp_invalidateTranslationASID_invs'[wp]: + "doMachineOp (invalidateTranslationASID vmid) \invs'\" + unfolding invalidateTranslationASID_def + by (wp dmo_machine_op_lift_invs') + +crunches getVMID, armContextSwitch, setGlobalUserVSpace + for invs'[wp]: invs' + (ignore: doMachineOp wp: getASID_wp crunch_wps) + +lemma setVMRoot_invs'[wp]: + "setVMRoot p \invs'\" + unfolding setVMRoot_def getThreadVSpaceRoot_def + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift + hoare_vcg_all_lift) + +lemma setASIDPool_invs_no_cicd'[wp]: + "setObject p (ap::asidpool) \invs_no_cicd'\" + apply (simp add: invs_no_cicd'_def valid_state'_def valid_pspace'_def) + apply (wp sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift + valid_irq_node_lift + cur_tcb_lift valid_irq_handlers_lift'' + untyped_ranges_zero_lift + updateObject_default_inv + | simp add: cteCaps_of_def + | rule setObject_ksPSpace_only)+ + apply (clarsimp simp: o_def) + done + +lemma invalidateTranslationASID_invs_no_cicd'[wp]: + "doMachineOp (invalidateTranslationASID asid) \invs_no_cicd'\" + by (wp dmo_invs_no_cicd_lift') + +crunches getVMID, armContextSwitch, setGlobalUserVSpace + for invs_no_cicd'[wp]: "invs_no_cicd'" + (ignore: doMachineOp wp: getASID_wp crunch_wps) + +lemma setVMRoot_invs_no_cicd': + "setVMRoot p \invs_no_cicd'\" + unfolding setVMRoot_def getThreadVSpaceRoot_def + by (wpsimp wp: whenE_wp findVSpaceForASID_vs_at_wp hoare_drop_imps hoare_vcg_ex_lift + hoare_vcg_all_lift) + +crunch nosch [wp]: setVMRoot "\s. P (ksSchedulerAction s)" + (wp: crunch_wps getObject_inv setObject_nosch simp: crunch_simps loadObject_default_def updateObject_default_def) + +crunch it' [wp]: deleteASIDPool "\s. P (ksIdleThread s)" + (simp: crunch_simps loadObject_default_def updateObject_default_def wp: getObject_inv mapM_wp' crunch_wps) + +crunch it' [wp]: storePTE "\s. P (ksIdleThread s)" + (simp: crunch_simps updateObject_default_def wp: setObject_idle') + +crunch it' [wp]: deleteASID "\s. P (ksIdleThread s)" + (simp: crunch_simps loadObject_default_def updateObject_default_def + wp: getObject_inv) + +crunch typ_at' [wp]: performPageTableInvocation "\s. P (typ_at' T p s)" + (wp: crunch_wps) + +crunch typ_at' [wp]: performPageInvocation "\s. P (typ_at' T p s)" + (wp: crunch_wps simp: crunch_simps) + +lemma performASIDPoolInvocation_typ_at' [wp]: + "\\s. P (typ_at' T p s)\ performASIDPoolInvocation api \\_ s. P (typ_at' T p s)\" + by (wpsimp simp: performASIDPoolInvocation_def + wp: getASID_wp hoare_vcg_imp_lift[where P'=\, simplified]) + +lemmas performPageTableInvocation_typ_ats' [wp] = + typ_at_lifts [OF performPageTableInvocation_typ_at'] + +lemmas performPageInvocation_typ_ats' [wp] = + typ_at_lifts [OF performPageInvocation_typ_at'] + +lemmas performASIDPoolInvocation_typ_ats' [wp] = + typ_at_lifts [OF performASIDPoolInvocation_typ_at'] + +lemma storePTE_pred_tcb_at' [wp]: + "storePTE p pte \pred_tcb_at' proj P t\" + apply (simp add: storePTE_def pred_tcb_at'_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma storePTE_valid_mdb [wp]: + "\valid_mdb'\ storePTE p pte \\rv. valid_mdb'\" + by (simp add: valid_mdb'_def) wp + +crunch nosch [wp]: storePTE "\s. P (ksSchedulerAction s)" + (simp: updateObject_default_def ignore_del: setObject) + +crunch ksQ [wp]: storePTE "\s. P (ksReadyQueues s)" + (simp: updateObject_default_def) + +lemma storePTE_inQ[wp]: + "\\s. P (obj_at' (inQ d p) t s)\ storePTE ptr pte \\rv s. P (obj_at' (inQ d p) t s)\" + apply (simp add: obj_at'_real_def storePTE_def) + apply (wp setObject_ko_wp_at | simp add: objBits_simps)+ + apply (clarsimp simp: obj_at'_def ko_wp_at'_def) + done + +crunch norqL1[wp]: storePTE "\s. P (ksReadyQueuesL1Bitmap s)" + (simp: updateObject_default_def) + +crunch norqL2[wp]: storePTE "\s. P (ksReadyQueuesL2Bitmap s)" + (simp: updateObject_default_def) + +lemma storePTE_valid_queues' [wp]: + "\valid_queues'\ storePTE p pte \\_. valid_queues'\" + by (wp valid_queues_lift') + +lemma storePTE_iflive [wp]: + "\if_live_then_nonz_cap'\ storePTE p pte \\rv. if_live_then_nonz_cap'\" + apply (simp add: storePTE_def) + apply (rule hoare_pre) + apply (rule setObject_iflive' [where P=\], simp) + apply (simp add: objBits_simps) + apply (auto simp: updateObject_default_def in_monad live'_def hyp_live'_def arch_live'_def) + done + +lemma setObject_pte_ksInt [wp]: + "\\s. P (ksInterruptState s)\ setObject p (pte::pte) \\_. \s. P (ksInterruptState s)\" + by (wp setObject_ksInterrupt updateObject_default_inv|simp)+ + +crunch ksInterruptState [wp]: storePTE "\s. P (ksInterruptState s)" + +lemma storePTE_ifunsafe [wp]: + "\if_unsafe_then_cap'\ storePTE p pte \\rv. if_unsafe_then_cap'\" + apply (simp add: storePTE_def) + apply (rule hoare_pre) + apply (rule setObject_ifunsafe' [where P=\], simp) + apply (auto simp: updateObject_default_def in_monad)[2] + apply wp + apply simp + done + +method valid_idle'_setObject uses simp = + simp add: valid_idle'_def, rule hoare_lift_Pf [where f="ksIdleThread"]; wpsimp?; + (wpsimp wp: obj_at_setObject2[where P="idle_tcb'", simplified] hoare_drop_imp + simp: simp + | clarsimp dest!: updateObject_default_result)+ + + +lemma storePTE_idle [wp]: + "\valid_idle'\ storePTE p pte \\rv. valid_idle'\" by (valid_idle'_setObject simp: storePTE_def) + +crunch arch' [wp]: storePTE "\s. P (ksArchState s)" + +crunch cur' [wp]: storePTE "\s. P (ksCurThread s)" + +lemma storePTE_irq_states' [wp]: + "\valid_irq_states'\ storePTE pte p \\_. valid_irq_states'\" + apply (simp add: storePTE_def) + apply (wpsimp wp: valid_irq_states_lift' dmo_lift' no_irq_storeWord setObject_ksMachine + updateObject_default_inv) + done + +lemma storePTE_vms'[wp]: + "\valid_machine_state'\ storePTE p pte \\_. valid_machine_state'\" + apply (simp add: storePTE_def valid_machine_state'_def pointerInUserData_def + pointerInDeviceData_def) + apply (wp setObject_typ_at_inv setObject_ksMachine updateObject_default_inv + hoare_vcg_all_lift hoare_vcg_disj_lift | simp)+ + done + +crunch pspace_domain_valid[wp]: storePTE "pspace_domain_valid" + +lemma storePTE_ct_not_inQ[wp]: + "\ct_not_inQ\ storePTE p pte \\_. ct_not_inQ\" + apply (rule ct_not_inQ_lift [OF storePTE_nosch]) + apply (simp add: storePTE_def) + apply (wp_pre, wps) + apply (rule obj_at_setObject2) + apply (clarsimp simp: updateObject_default_def in_monad)+ + done + +lemma setObject_pte_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ setObject t (v::pte) \\rv s. P (ksCurDomain s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma setObject_pte_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ setObject t (v::pte) \\rv s. P (ksDomSchedule s)\" + apply (simp add: setObject_def split_def) + apply (wp updateObject_default_inv | simp)+ + done + +lemma storePTE_cur_domain[wp]: + "\\s. P (ksCurDomain s)\ storePTE p pte \\rv s. P (ksCurDomain s)\" + by (simp add: storePTE_def) wp + +lemma storePTE_ksDomSchedule[wp]: + "\\s. P (ksDomSchedule s)\ storePTE p pte \\rv s. P (ksDomSchedule s)\" + by (simp add: storePTE_def) wp + +lemma storePTE_tcb_obj_at'[wp]: + "\obj_at' (P::tcb \ bool) t\ storePTE p pte \\_. obj_at' P t\" + apply (simp add: storePTE_def) + apply (rule obj_at_setObject2) + apply (clarsimp simp add: updateObject_default_def in_monad) + done + +lemma storePTE_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t\ storePTE p pte \\_. tcb_in_cur_domain' t\" + by (wp tcb_in_cur_domain'_lift) + +lemma storePTE_ct_idle_or_in_cur_domain'[wp]: + "\ct_idle_or_in_cur_domain'\ storePTE p pte \\_. ct_idle_or_in_cur_domain'\" + by (wp ct_idle_or_in_cur_domain'_lift hoare_vcg_disj_lift) + +lemma setObject_pte_ksDomScheduleIdx [wp]: + "\\s. P (ksDomScheduleIdx s)\ setObject p (pte::pte) \\_. \s. P (ksDomScheduleIdx s)\" + by (wp updateObject_default_inv|simp add:setObject_def | wpc)+ + +crunches storePTE + for ksDomScheduleIdx[wp]: "\s. P (ksDomScheduleIdx s)" + and gsMaxObjectSize[wp]: "\s. P (gsMaxObjectSize s)" + and gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (wp: setObject_ksPSpace_only updateObject_default_inv) + +lemma storePTE_valid_objs[wp]: + "storePTE p pte \valid_objs'\" + apply (simp add: storePTE_def doMachineOp_def split_def) + apply (rule hoare_pre, rule setObject_valid_objs'[where P=\]) + apply (clarsimp simp: updateObject_default_def in_monad valid_obj'_def) + apply simp + done + +lemma storePTE_valid_queues [wp]: + "\Invariants_H.valid_queues\ storePTE p pde \\_. Invariants_H.valid_queues\" + by (wp valid_queues_lift | simp add: pred_tcb_at'_def)+ + +lemma storePTE_ko_wp_vcpu_at'[wp]: + "storePTE p pde \\s. P (ko_wp_at' (is_vcpu' and hyp_live') p' s)\" + apply (clarsimp simp: storePTE_def) + apply (wpsimp wp: hoare_drop_imps setObject_ko_wp_at simp: objBits_simps archObjSize_def) + apply (auto simp: bit_simps ko_wp_at'_def obj_at'_def is_vcpu'_def)+ + done + +lemma storePTE_invs[wp]: + "storePTE p pte \invs'\" + unfolding invs'_def valid_state'_def valid_pspace'_def + by (wpsimp wp: sch_act_wf_lift valid_global_refs_lift' irqs_masked_lift valid_arch_state_lift' + valid_irq_node_lift cur_tcb_lift valid_irq_handlers_lift'' untyped_ranges_zero_lift + simp: cteCaps_of_def o_def) + +crunch cte_wp_at'[wp]: unmapPageTable "\s. P (cte_wp_at' P' p s)" + (wp: crunch_wps simp: crunch_simps) + +lemmas storePTE_Invalid_invs = storePTE_invs[where pte=InvalidPTE, simplified] + +crunches unmapPageTable, invalidateTLBByASIDVA + for invs'[wp]: "invs'" + (ignore: doMachineOp + wp: storePTE_Invalid_invs mapM_wp' crunch_wps dmo_invs_lift' + simp: crunch_simps if_apply_def2) + +lemma perform_pti_invs [wp]: + "\invs' and valid_pti' pti\ performPageTableInvocation pti \\_. invs'\" + apply (clarsimp simp: performPageTableInvocation_def getSlotCap_def valid_pti'_def + split: page_table_invocation.splits) + apply (intro conjI allI impI; + wpsimp wp: arch_update_updateCap_invs getCTE_wp' mapM_x_wp' + hoare_vcg_all_lift hoare_vcg_imp_lift' dmo_invs_lift') + apply (auto simp: cte_wp_at_ctes_of is_arch_update'_def isCap_simps valid_cap'_def capAligned_def) + done + +crunches unmapPage + for cte_wp_at': "\s. P (cte_wp_at' P' p s)" + (wp: crunch_wps lookupPTSlotFromLevel_inv simp: crunch_simps) + +lemmas unmapPage_typ_ats [wp] = typ_at_lifts [OF unmapPage_typ_at'] + +lemma unmapPage_invs' [wp]: + "unmapPage sz asid vptr pptr \invs'\" + unfolding unmapPage_def + by (wpsimp wp: lookupPTSlot_inv hoare_drop_imp hoare_vcg_all_lift dmo_invs_lift') + +lemma dmo_doFlush_invs'[wp]: + "doMachineOp (doFlush flushOp vstart vend pstart) \invs'\" + unfolding doFlush_def cleanCacheRange_RAM_def invalidateCacheRange_RAM_def branchFlushRange_def + cleanInvalidateCacheRange_RAM_def cleanCacheRange_PoU_def invalidateCacheRange_I_def + by (cases flushOp; wpsimp wp: dmo_machine_op_lift_invs' simp: doMachineOp_bind empty_fail_bind) + +lemma perform_page_invs [wp]: + "\invs' and valid_page_inv' pt\ performPageInvocation pt \\_. invs'\" + supply if_split[split del] + apply (simp add: performPageInvocation_def) + apply (cases pt) + (* FIXME AARCH64: clean up this proof, not clear why all, fwd_all or solve_emerging don't work *) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + prefer 2 + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (wpsimp wp: hoare_vcg_all_lift hoare_vcg_ex_lift hoare_vcg_const_imp_lift + arch_update_updateCap_invs unmapPage_cte_wp_at' getSlotCap_wp dmo_invs_lift' + simp: valid_page_inv'_def is_arch_update'_def if_apply_def2) + apply (clarsimp simp: cte_wp_at_ctes_of valid_page_inv'_def is_arch_update'_def isCap_simps valid_cap'_def capAligned_def + split: option.splits)+ + done + +lemma setObject_cte_obj_at_ap': + shows + "\\s. P' (obj_at' (P :: asidpool \ bool) p s)\ + setObject c (cte::cte) + \\_ s. P' (obj_at' P p s)\" + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd + split del: if_split) + apply (clarsimp elim!: rsubst[where P=P']) + apply (clarsimp simp: updateObject_cte in_monad objBits_simps + tcbCTableSlot_def tcbVTableSlot_def + typeError_def + split: if_split_asm + Structures_H.kernel_object.split_asm) + done + +lemma updateCap_ko_at_ap_inv'[wp]: + "\\s. P (ko_at' (ko::asidpool) p s )\ updateCap a b \\rv s. P ( ko_at' ko p s)\" + by (wpsimp simp: updateCap_def setCTE_def wp: setObject_cte_obj_at_ap') + +lemma storePTE_asid_pool_obj_at'[wp]: + "storePTE p pte \\s. P (obj_at' (P'::asidpool \ bool) t s)\" + apply (simp add: storePTE_def) + apply (clarsimp simp: setObject_def in_monad split_def + valid_def lookupAround2_char1 + obj_at'_def ps_clear_upd + split del: if_split) + apply (clarsimp elim!: rsubst[where P=P]) + apply (clarsimp simp: updateObject_default_def in_monad) + done + +lemma perform_aci_invs [wp]: + "\invs' and valid_apinv' api\ performASIDPoolInvocation api \\_. invs'\" + apply (clarsimp simp: performASIDPoolInvocation_def split: asidpool_invocation.splits) + apply (wpsimp wp: arch_update_updateCap_invs getASID_wp getSlotCap_wp hoare_vcg_all_lift + hoare_vcg_imp_lift') + apply (clarsimp simp: valid_apinv'_def cte_wp_at_ctes_of) + apply (case_tac cte, clarsimp) + apply (drule ctes_of_valid_cap', fastforce) + apply (clarsimp simp: valid_cap'_def capAligned_def is_arch_update'_def isCap_simps + wellformed_mapdata'_def) + done + +end + +end diff --git a/proof/refine/AARCH64/orphanage/Orphanage.thy b/proof/refine/AARCH64/orphanage/Orphanage.thy new file mode 100644 index 0000000000..ec109368d7 --- /dev/null +++ b/proof/refine/AARCH64/orphanage/Orphanage.thy @@ -0,0 +1,2465 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Orphanage +imports Refine.Refine +begin + +text \ + Proof that calling the kernel never leaves threads orphaned. + More specifically, every active thread must be the current thread, + or about to be switched to, or be in a scheduling queue. +\ + +(*FIXME: arch_split: move up? *) +context Arch begin + +requalify_facts + switchToIdleThread_def + switchToThread_def + +lemmas [crunch_def] = switchToIdleThread_def switchToThread_def + +context begin global_naming global +requalify_facts + Thread_H.switchToIdleThread_def + Thread_H.switchToThread_def +end +end + +context begin interpretation Arch . (*FIXME: arch_split*) + +definition + is_active_thread_state :: "thread_state \ bool" +where + "is_active_thread_state ts \ + isRunning ts \ isRestart ts" + +definition + is_active_tcb_ptr :: "machine_word \ kernel_state \ bool" +where + "is_active_tcb_ptr tcb_ptr s \ + st_tcb_at' is_active_thread_state tcb_ptr s" + +lemma is_active_tcb_ptr_runnable': + "is_active_tcb_ptr t s = st_tcb_at' runnable' t s" + by (auto simp: is_active_tcb_ptr_def pred_tcb_at'_def obj_at'_def + is_active_thread_state_def isRunning_def isRestart_def + split: Structures_H.thread_state.split_asm) + +definition + all_active_tcb_ptrs :: "kernel_state \ machine_word set" +where + "all_active_tcb_ptrs s \ + { tcb_ptr. is_active_tcb_ptr tcb_ptr s }" + +definition + all_queued_tcb_ptrs :: "kernel_state \ machine_word set" +where + "all_queued_tcb_ptrs s \ + { tcb_ptr. \ priority. tcb_ptr : set ((ksReadyQueues s) priority) }" + +lemma st_tcb_at_neg': + "(st_tcb_at' (\ ts. \ P ts) t s) = (tcb_at' t s \ \ st_tcb_at' P t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma st_tcb_at_neg2: + "(\ st_tcb_at' P t s) = (st_tcb_at' (\ ts. \ P ts) t s \ \ tcb_at' t s)" + by (auto simp: pred_tcb_at'_def obj_at'_def) + +lemma st_tcb_at_double_neg': + "(st_tcb_at' (\ ts. \ P ts \ \ Q ts) t s) = + ((st_tcb_at' (\ ts. \ P ts) t s) \ (st_tcb_at' (\ ts. \ Q ts) t s))" + apply (auto simp: pred_tcb_at'_def obj_at'_def) + done + +definition + no_orphans :: " kernel_state \ bool" +where + "no_orphans s \ + \ tcb_ptr. + (tcb_ptr : all_active_tcb_ptrs s + \ + tcb_ptr = ksCurThread s \ tcb_ptr : all_queued_tcb_ptrs s \ + ksSchedulerAction s = SwitchToThread tcb_ptr)" + +lemma no_orphans_disj: + "no_orphans = (\ s. + \ tcb_ptr. tcb_ptr = ksCurThread s \ + tcb_ptr : all_queued_tcb_ptrs s \ + \ typ_at' TCBT tcb_ptr s \ + st_tcb_at' (\ state. \ is_active_thread_state state) tcb_ptr s \ + ksSchedulerAction s = SwitchToThread tcb_ptr)" + apply clarsimp + apply (rule ext) + apply (unfold no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') + apply (auto intro: pred_tcb_at') + done + +lemma no_orphans_lift: + assumes typ_at'_is_lifted: + "\ tcb_ptr. \ \s. \ typ_at' TCBT tcb_ptr s\ f \ \_ s. \ typ_at' TCBT tcb_ptr s \" + assumes ksCurThread_is_lifted: + "\ tcb_ptr. \ \s. tcb_ptr = ksCurThread s \ f \ \_ s. tcb_ptr = ksCurThread s \" + assumes st_tcb_at'_is_lifted: + "\P p. \ \s. st_tcb_at' P p s\ f \ \_ s. st_tcb_at' P p s \" + assumes ksReadyQueues_is_lifted: + "\P. \ \s. P (ksReadyQueues s)\ f \ \_ s. P (ksReadyQueues s) \" + assumes ksSchedulerAction_is_lifted: + "\P. \ \s. P (ksSchedulerAction s)\ f \ \_ s. P (ksSchedulerAction s) \" + shows + "\ \s. no_orphans s \ f \ \_ s. no_orphans s \" + apply (unfold no_orphans_disj + all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (rule ksCurThread_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (rule ksReadyQueues_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (rule typ_at'_is_lifted) + apply (wp hoare_vcg_disj_lift) + apply (rule st_tcb_at'_is_lifted) + apply (rule ksSchedulerAction_is_lifted) + apply simp + done + +lemma st_tcb_at'_is_active_tcb_ptr_lift: + assumes "\P P' t. \\s. P (st_tcb_at' P' t s)\ f \\rv s. P (st_tcb_at' P' t s)\" + shows "\\s. P (is_active_tcb_ptr t s)\ f \\_ s. P (is_active_tcb_ptr t s)\" + by (clarsimp simp: is_active_tcb_ptr_def) (rule assms) + +lemma st_tcb_at'_all_active_tcb_ptrs_lift: + assumes "\P P' t. \\s. P (st_tcb_at' P' t s)\ f \\rv s. P (st_tcb_at' P' t s)\" + shows "\\s. P (t \ all_active_tcb_ptrs s)\ f \\_ s. P (t \ all_active_tcb_ptrs s)\" + by (clarsimp simp: all_active_tcb_ptrs_def) + (rule st_tcb_at'_is_active_tcb_ptr_lift [OF assms]) + +lemma ksQ_all_queued_tcb_ptrs_lift: + assumes "\P p. \\s. P (ksReadyQueues s p)\ f \\rv s. P (ksReadyQueues s p)\" + shows "\\s. P (t \ all_queued_tcb_ptrs s)\ f \\_ s. P (t \ all_queued_tcb_ptrs s)\" + apply (clarsimp simp: all_queued_tcb_ptrs_def) + apply (rule_tac P=P in P_bool_lift) + apply (wp hoare_vcg_ex_lift assms) + apply (clarsimp) + apply (wp hoare_vcg_all_lift assms) + done + +definition + almost_no_orphans :: "obj_ref \ kernel_state \ bool" +where + "almost_no_orphans tcb_ptr s \ + \ptr. ptr = tcb_ptr \ + (ptr : all_active_tcb_ptrs s + \ + ptr = ksCurThread s \ ptr : all_queued_tcb_ptrs s \ + ksSchedulerAction s = SwitchToThread ptr)" + +lemma no_orphans_strg_almost: + "no_orphans s \ almost_no_orphans tcb_ptr s" + unfolding no_orphans_def almost_no_orphans_def + apply simp + done + +lemma almost_no_orphans_disj: + "almost_no_orphans tcb_ptr = (\ s. + \ ptr. ptr = ksCurThread s \ + ptr : all_queued_tcb_ptrs s \ + \ typ_at' TCBT ptr s \ + st_tcb_at' (\ thread_state. \ is_active_thread_state thread_state) ptr s \ + ptr = tcb_ptr \ + ksSchedulerAction s = SwitchToThread ptr)" + apply clarsimp + apply (rule ext) + apply (unfold almost_no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def st_tcb_at_neg' typ_at_tcb') + apply (auto intro: pred_tcb_at') + done + +lemma no_orphans_update_simps[simp]: + "no_orphans (gsCNodes_update f s) = no_orphans s" + "no_orphans (gsUserPages_update g s) = no_orphans s" + "no_orphans (gsUntypedZeroRanges_update h s) = no_orphans s" + by (simp_all add: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + +lemma no_orphans_ksReadyQueuesL1Bitmap_update[simp]: + "no_orphans (ksReadyQueuesL1Bitmap_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma no_orphans_ksReadyQueuesL2Bitmap_update[simp]: + "no_orphans (ksReadyQueuesL2Bitmap_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma no_orphans_ksIdle[simp]: + "no_orphans (ksIdleThread_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + apply auto + done + +lemma no_orphans_ksWorkUnits [simp]: + "no_orphans (ksWorkUnitsCompleted_update f s) = no_orphans s" + unfolding no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + apply auto + done + +lemma no_orphans_irq_state_independent[intro!, simp]: + "no_orphans (s \ksMachineState := ksMachineState s \ irq_state := f (irq_state (ksMachineState s)) \ \) + = no_orphans s" + by (simp add: no_orphans_def all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def is_active_tcb_ptr_def) + +add_upd_simps "no_orphans (gsUntypedZeroRanges_update f s)" +declare upd_simps[simp] + +lemma almost_no_orphans_ksReadyQueuesL1Bitmap_update[simp]: + "almost_no_orphans t (ksReadyQueuesL1Bitmap_update f s) = almost_no_orphans t s" + unfolding almost_no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma almost_no_orphans_ksReadyQueuesL2Bitmap_update[simp]: + "almost_no_orphans t (ksReadyQueuesL2Bitmap_update f s) = almost_no_orphans t s" + unfolding almost_no_orphans_def all_active_tcb_ptrs_def all_queued_tcb_ptrs_def is_active_tcb_ptr_def + by auto + +lemma all_active_tcb_ptrs_queue [simp]: + "all_active_tcb_ptrs (ksReadyQueues_update f s) = all_active_tcb_ptrs s" + by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) + +(****************************************************************************************************) + +crunch no_orphans [wp]: addToBitmap "no_orphans" +crunch no_orphans [wp]: removeFromBitmap "no_orphans" + +crunch almost_no_orphans [wp]: addToBitmap "almost_no_orphans x" +crunch almost_no_orphans [wp]: removeFromBitmap "almost_no_orphans x" + +lemma setCTE_no_orphans [wp]: + "\ \s. no_orphans s \ + setCTE p cte + \ \rv s. no_orphans s \" + apply (rule no_orphans_lift) + apply (wp setCTE_typ_at' setCTE_pred_tcb_at')+ + done + +lemma setCTE_almost_no_orphans [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ + setCTE p cte + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift setCTE_typ_at' setCTE_pred_tcb_at') + done + +crunch no_orphans [wp]: activateIdleThread "no_orphans" + +lemma asUser_no_orphans [wp]: + "\ \s. no_orphans s \ + asUser thread f + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma threadSet_no_orphans: + "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ + \ \s. no_orphans s \ + threadSet F tptr + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ + done + +lemma threadSet_almost_no_orphans: + "\tcb. \ is_active_thread_state (tcbState tcb) \ \ is_active_thread_state (tcbState (F tcb)) \ + \ \s. almost_no_orphans ptr s \ + threadSet F tptr + \ \rv s. almost_no_orphans ptr s \" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_st_tcb_at2 | clarsimp)+ + done + +lemma setQueue_no_orphans_enq: + "\ \s. no_orphans s \ set (ksReadyQueues s (d, prio)) \ set qs \ + setQueue d prio qs + \ \_ s. no_orphans s \" + unfolding setQueue_def + apply wp + apply (clarsimp simp: no_orphans_def all_queued_tcb_ptrs_def + split: if_split_asm) + apply fastforce + done + +lemma setQueue_almost_no_orphans_enq: + "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ tcb_ptr \ set qs \ + setQueue d prio qs + \ \_ s. no_orphans s \" + unfolding setQueue_def + apply wp + apply (clarsimp simp: no_orphans_def almost_no_orphans_def all_queued_tcb_ptrs_def + split: if_split_asm) + apply fastforce + done + +lemma setQueue_almost_no_orphans_enq_lift: + "\ \s. almost_no_orphans tcb_ptr s \ set (ksReadyQueues s (d, prio)) \ set qs \ + setQueue d prio qs + \ \_ s. almost_no_orphans tcb_ptr s \" + unfolding setQueue_def + apply wp + apply (clarsimp simp: almost_no_orphans_def all_queued_tcb_ptrs_def + split: if_split_asm) + apply fastforce + done + +lemma tcbSchedEnqueue_no_orphans[wp]: + "\ \s. no_orphans s \ + tcbSchedEnqueue tcb_ptr + \ \rv s. no_orphans s \" + unfolding tcbSchedEnqueue_def + apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (drule obj_at_ko_at') + apply auto + done + +lemma tcbSchedAppend_no_orphans[wp]: + "\ \s. no_orphans s \ + tcbSchedAppend tcb_ptr + \ \rv s. no_orphans s \" + unfolding tcbSchedAppend_def + apply (wp setQueue_no_orphans_enq threadSet_no_orphans | clarsimp simp: unless_def)+ + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (drule obj_at_ko_at') + apply auto + done + +lemma ko_at_obj_at': + "ko_at' ko p s \ P ko \ obj_at' P p s" + unfolding obj_at'_def + apply clarsimp + done + +lemma queued_in_queue: + "\valid_queues' s; ko_at' tcb tcb_ptr s; tcbQueued tcb\ \ + \ p. tcb_ptr \ set (ksReadyQueues s p)" + unfolding valid_queues'_def + apply (drule_tac x="tcbDomain tcb" in spec) + apply (drule_tac x="tcbPriority tcb" in spec) + apply (drule_tac x="tcb_ptr" in spec) + apply (drule mp) + apply (rule ko_at_obj_at') + apply (auto simp: inQ_def) + done + +lemma tcbSchedEnqueue_almost_no_orphans: + "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ + tcbSchedEnqueue tcb_ptr + \ \rv s. no_orphans s \" + unfolding tcbSchedEnqueue_def + apply simp + apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=tcb_ptr] threadSet_no_orphans + | clarsimp)+ + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply normalise_obj_at' + apply (rule_tac x=ko in exI) + apply (clarsimp simp: subset_insertI) + apply (unfold no_orphans_def almost_no_orphans_def) + apply clarsimp + apply (drule(2) queued_in_queue) + apply (fastforce simp: all_queued_tcb_ptrs_def) + done + +lemma tcbSchedEnqueue_almost_no_orphans_lift: + "\ \s. almost_no_orphans ptr s \ + tcbSchedEnqueue tcb_ptr + \ \rv s. almost_no_orphans ptr s \" + unfolding tcbSchedEnqueue_def + apply (wp setQueue_almost_no_orphans_enq_lift threadSet_almost_no_orphans | clarsimp simp: unless_def)+ + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (drule obj_at_ko_at') + apply auto + done + +lemma ssa_no_orphans: + "\ \s. no_orphans s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction sa + \ \rv s. no_orphans s \" + unfolding setSchedulerAction_def no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma ssa_almost_no_orphans: + "\ \s. almost_no_orphans tcb_ptr s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction (SwitchToThread tcb_ptr) + \ \rv s. no_orphans s \" + unfolding setSchedulerAction_def no_orphans_disj almost_no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma ssa_almost_no_orphans_lift [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ + (\t. sch_act_not t s \ t : all_queued_tcb_ptrs s \ ksCurThread s = t) \ + setSchedulerAction sa + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding setSchedulerAction_def almost_no_orphans_disj all_queued_tcb_ptrs_def + apply wp + apply auto + done + +lemma tcbSchedEnqueue_inQueue [wp]: + "\ \s. valid_queues' s \ + tcbSchedEnqueue tcb_ptr + \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" + unfolding tcbSchedEnqueue_def all_queued_tcb_ptrs_def + apply (wp | clarsimp simp: unless_def)+ + apply (rule_tac Q="\rv. \" in hoare_post_imp) + apply fastforce + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) + done + +lemma tcbSchedAppend_inQueue [wp]: + "\ \s. valid_queues' s \ + tcbSchedAppend tcb_ptr + \ \rv s. tcb_ptr \ all_queued_tcb_ptrs s \" + unfolding tcbSchedAppend_def all_queued_tcb_ptrs_def + apply (wp | clarsimp simp: unless_def)+ + apply (rule_tac Q="\rv. \" in hoare_post_imp) + apply fastforce + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (fastforce simp: obj_at'_def valid_queues'_def inQ_def) + done + +lemma rescheduleRequired_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ + rescheduleRequired + \ \rv s. no_orphans s \" + unfolding rescheduleRequired_def + apply (wp tcbSchedEnqueue_no_orphans hoare_vcg_all_lift ssa_no_orphans | wpc | clarsimp)+ + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) + apply (rename_tac word t p) + apply (rule_tac P="word = t" in hoare_gen_asm) + apply (wp hoare_disjI1 | clarsimp)+ + done + +lemma rescheduleRequired_almost_no_orphans [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ + rescheduleRequired + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding rescheduleRequired_def + apply (wp tcbSchedEnqueue_almost_no_orphans_lift hoare_vcg_all_lift | wpc | clarsimp)+ + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) + apply (rename_tac word t p) + apply (rule_tac P="word = t" in hoare_gen_asm) + apply (wp hoare_disjI1 | clarsimp)+ + done + +lemma setThreadState_current_no_orphans: + "\ \s. no_orphans s \ ksCurThread s = tcb_ptr \ valid_queues' s \ + setThreadState state tcb_ptr + \ \rv s. no_orphans s \" + unfolding setThreadState_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_valid_queues') + apply (unfold no_orphans_disj all_queued_tcb_ptrs_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) + apply (auto simp: inQ_def) + done + +lemma setThreadState_isRestart_no_orphans: + "\ \s. no_orphans s \ st_tcb_at' isRestart tcb_ptr s \ valid_queues' s\ + setThreadState state tcb_ptr + \ \rv s. no_orphans s \" + unfolding setThreadState_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_valid_queues') + apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) + apply (auto simp: st_tcb_at_double_neg' st_tcb_at_neg' inQ_def) + done + +lemma setThreadState_almost_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s\ + setThreadState state tcb_ptr + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding setThreadState_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans tcb_ptr s" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_valid_queues') + apply (unfold no_orphans_disj almost_no_orphans_disj all_queued_tcb_ptrs_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) + apply (auto simp: inQ_def) + done + +lemma setThreadState_not_active_no_orphans: + "\ is_active_thread_state state \ + \ \s. no_orphans s \ valid_queues' s \ + setThreadState state tcb_ptr + \ \rv s. no_orphans s \" + unfolding setThreadState_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. valid_queues' s \ no_orphans s" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_valid_queues') + apply (unfold no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) + apply (auto simp: isRunning_def isRestart_def inQ_def) + done + +lemma setThreadState_not_active_almost_no_orphans: + "\ is_active_thread_state state \ + \ \s. almost_no_orphans thread s \ valid_queues' s \ + setThreadState state tcb_ptr + \ \rv s. almost_no_orphans thread s \" + unfolding setThreadState_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. valid_queues' s \ almost_no_orphans thread s" in hoare_post_imp) + apply clarsimp + apply (wp threadSet_valid_queues') + apply (unfold almost_no_orphans_disj all_queued_tcb_ptrs_def is_active_thread_state_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift threadSet_pred_tcb_at_state) + apply (auto simp: isRunning_def isRestart_def inQ_def) + done + +lemma activateThread_no_orphans [wp]: + "\ \s. no_orphans s \ ct_in_state' activatable' s \ invs' s \ + activateThread + \ \rv s. no_orphans s \" + unfolding activateThread_def + apply (wp gts_wp' setThreadState_isRestart_no_orphans | wpc | clarsimp)+ + apply (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def isRestart_def) + done + +lemma setQueue_no_orphans_deq: + "\ \s. \ tcb_ptr. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ + queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ + setQueue d priority queue + \ \rv s. no_orphans s \" + unfolding setQueue_def + apply (wp | clarsimp)+ + apply (fastforce simp: no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + done + +lemma setQueue_almost_no_orphans_deq [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ + queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr] \ + setQueue d priority queue + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding setQueue_def + apply (wp | clarsimp)+ + apply (fastforce simp: almost_no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + done + +lemma tcbSchedDequeue_almost_no_orphans [wp]: + "\ \s. no_orphans s \ + tcbSchedDequeue thread + \ \rv s. almost_no_orphans thread s \" + unfolding tcbSchedDequeue_def + apply (wp threadSet_almost_no_orphans | simp cong: if_cong)+ + apply (simp add:no_orphans_strg_almost cong: if_cong) + done + +lemma tcbSchedDequeue_no_orphans [wp]: + "\ \s. no_orphans s \ \ is_active_tcb_ptr tcb_ptr s \ + tcbSchedDequeue tcb_ptr + \ \rv s. no_orphans s \" + unfolding tcbSchedDequeue_def + apply (wp setQueue_no_orphans_deq threadSet_no_orphans | clarsimp)+ + apply (wp getObject_tcb_wp | clarsimp simp: threadGet_def)+ + apply (drule obj_at_ko_at') + apply auto + done + +crunches setGlobalUserVSpace + for ksReadyQueues[wp]: "\s. P (ksReadyQueues s)" + (wp: crunch_wps) + +lemma switchToIdleThread_no_orphans' [wp]: + "\ \s. no_orphans s \ + (is_active_tcb_ptr (ksCurThread s) s + \ ksCurThread s \ all_queued_tcb_ptrs s) \ + switchToIdleThread + \ \rv s. no_orphans s \" + unfolding switchToIdleThread_def setCurThread_def AARCH64_H.switchToIdleThread_def + apply (simp add: no_orphans_disj all_queued_tcb_ptrs_def) + apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift hoare_vcg_disj_lift + | clarsimp)+ + apply (auto simp: no_orphans_disj all_queued_tcb_ptrs_def is_active_tcb_ptr_def + st_tcb_at_neg' tcb_at_typ_at') + done + +crunches getVMID, Arch.switchToThread + for ksCurThread[wp]: "\ s. P (ksCurThread s)" + (wp: crunch_wps getObject_inv loadObject_default_inv) + +crunches updateASIDPoolEntry, Arch.switchToThread + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift crunch_wps) + +lemma ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs [wp]: + "\ \s. P (all_queued_tcb_ptrs s) \ + Arch.switchToThread tcb_ptr + \ \rv s. P (all_queued_tcb_ptrs s) \" + unfolding AARCH64_H.switchToThread_def all_queued_tcb_ptrs_def + apply (wp | clarsimp)+ + done + +crunch ksSchedulerAction [wp]: "Arch.switchToThread" "\s. P (ksSchedulerAction s)" + +lemma setCurThread_no_orphans [wp]: + "\ \s. no_orphans s \ + (is_active_tcb_ptr (ksCurThread s) s \ ksCurThread s : all_queued_tcb_ptrs s) \ + setCurThread newThread + \ \rv s. no_orphans s \" + unfolding setCurThread_def + apply (wp | clarsimp)+ + apply (unfold no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + apply auto + done + +lemma tcbSchedDequeue_all_queued_tcb_ptrs: + "\\s. x \ all_queued_tcb_ptrs s \ x \ t \ + tcbSchedDequeue t \\_ s. x \ all_queued_tcb_ptrs s\" + apply (rule_tac Q="(\s. x \ all_queued_tcb_ptrs s) and K (x \ t)" + in hoare_pre_imp, clarsimp) + apply (rule hoare_gen_asm) + apply (clarsimp simp: tcbSchedDequeue_def all_queued_tcb_ptrs_def) + apply (rule hoare_pre) + apply (wp, clarsimp) + apply (wp hoare_vcg_ex_lift)+ + apply (rename_tac d p) + apply (rule_tac Q="\_ s. x \ set (ksReadyQueues s (d, p))" + in hoare_post_imp, clarsimp) + apply (wp hoare_vcg_all_lift | simp)+ + done + +lemma tcbSchedDequeue_all_active_tcb_ptrs[wp]: + "\\s. P (t' \ all_active_tcb_ptrs s)\ tcbSchedDequeue t \\_ s. P (t' \ all_active_tcb_ptrs s)\" + by (clarsimp simp: all_active_tcb_ptrs_def is_active_tcb_ptr_def) wp + +lemma setCurThread_almost_no_orphans: + "\\s. almost_no_orphans t s \ + (ksCurThread s \ t \ + ksCurThread s \ all_active_tcb_ptrs s \ + ksCurThread s \ all_queued_tcb_ptrs s)\ + setCurThread t \\_. no_orphans\" + unfolding setCurThread_def + apply wp + apply (fastforce simp: almost_no_orphans_def + no_orphans_def + all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def + is_active_tcb_ptr_def) + done + +lemmas ArchThreadDecls_H_switchToThread_all_active_tcb_ptrs[wp] = + st_tcb_at'_all_active_tcb_ptrs_lift [OF Arch_switchToThread_pred_tcb'] + +lemmas ArchThreadDecls_H_switchToThread_all_queued_tcb_ptrs_lift[wp] = + ksQ_all_queued_tcb_ptrs_lift [OF ArchThreadDecls_H_AARCH64_H_switchToThread_ksQ] + +lemma ThreadDecls_H_switchToThread_no_orphans: + "\ \s. no_orphans s \ + st_tcb_at' runnable' tcb_ptr s \ + (ksCurThread s \ all_active_tcb_ptrs s + \ ksCurThread s \ all_queued_tcb_ptrs s)\ + ThreadDecls_H.switchToThread tcb_ptr + \ \rv s. no_orphans s \" + unfolding Thread_H.switchToThread_def + apply (wp setCurThread_almost_no_orphans + tcbSchedDequeue_almost_no_orphans) + apply (wps tcbSchedDequeue_ct') + apply (wp tcbSchedDequeue_all_queued_tcb_ptrs hoare_convert_imp)+ + apply (wps) + apply (wp)+ + apply (wps) + apply (wp) + apply (clarsimp) + done + +lemma findM_failure': + "\ \x S. \ \s. P S s \ f x \ \rv s. \ rv \ P (insert x S) s \ \ \ + \ \s. P S s \ findM f xs \ \rv s. rv = None \ P (S \ set xs) s \" + apply (induct xs arbitrary: S) + apply (clarsimp, wp, clarsimp) + apply clarsimp + apply (rule hoare_seq_ext[rotated], assumption) + apply (case_tac r) + apply (clarsimp, wp, clarsimp) + apply clarsimp + apply (rule hoare_strengthen_post, assumption) + apply clarsimp + done + +lemmas findM_failure = findM_failure'[where S="{}", simplified] + +lemma tcbSchedEnqueue_inQueue_eq: + "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ + tcbSchedEnqueue tcb_ptr + \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" + apply (rule hoare_gen_asm, simp) + apply wp + done + +lemma tcbSchedAppend_inQueue_eq: + "\ valid_queues' and K (tcb_ptr = tcb_ptr') \ + tcbSchedAppend tcb_ptr + \ \rv s. tcb_ptr' \ all_queued_tcb_ptrs s \" + apply (rule hoare_gen_asm, simp) + apply wp + done + +lemma findM_on_success: + "\ \x. \ P x \ f x \ \rv s. rv \; \x y. \ P x \ f y \ \rv. P x \ \ \ + \ \s. \x \ set xs. P x s \ findM f xs \ \rv s. \ y. rv = Some y \" + apply (induct xs; clarsimp) + apply wp+ + apply (clarsimp simp: imp_conv_disj Bex_def) + apply (wp hoare_vcg_disj_lift hoare_vcg_ex_lift | clarsimp | assumption)+ + done + +crunch st_tcb' [wp]: switchToThread "\s. P' (st_tcb_at' P t s)" + +lemma setQueue_deq_not_empty: + "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ + (\tcb_ptr. \ st_tcb_at' P tcb_ptr s \ + queue = [x\((ksReadyQueues s) (d, priority)). x \ tcb_ptr]) \ + setQueue d priority queue + \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" + unfolding setQueue_def + apply wp + apply auto + done + +lemma tcbSchedDequeue_not_empty: + "\ \s. (\tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s) \ \ st_tcb_at' P thread s \ + tcbSchedDequeue thread + \ \rv s. \tcb. tcb \ set (ksReadyQueues s p) \ st_tcb_at' P tcb s \" + unfolding tcbSchedDequeue_def + apply wp + apply (wp hoare_vcg_ex_lift threadSet_pred_tcb_no_state) + apply clarsimp + apply (wp setQueue_deq_not_empty) + apply clarsimp + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: bitmap_fun_defs) + apply wp + apply clarsimp + apply clarsimp + apply (wp setQueue_deq_not_empty)+ + apply (rule_tac Q="\rv s. \ st_tcb_at' P thread s" in hoare_post_imp) + apply fastforce + apply (wp weak_if_wp | clarsimp)+ + done + +lemmas switchToThread_all_active_tcb_ptrs[wp] = + st_tcb_at'_all_active_tcb_ptrs_lift [OF switchToThread_st_tcb'] + +(* ksSchedulerAction s = ChooseNewThread *) +lemma chooseThread_no_orphans [wp]: + notes hoare_TrueI[simp] + shows + "\\s. no_orphans s \ all_invs_but_ct_idle_or_in_cur_domain' s \ + (is_active_tcb_ptr (ksCurThread s) s + \ ksCurThread s \ all_queued_tcb_ptrs s)\ + chooseThread + \ \rv s. no_orphans s \" + (is "\?PRE\ _ \_\") + unfolding chooseThread_def Let_def + supply if_split[split del] + apply (simp only: return_bind, simp) + apply (rule hoare_seq_ext[where B="\rv s. ?PRE s \ rv = ksCurDomain s"]) + apply (rule_tac B="\rv s. ?PRE s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply (simp, wp (once), simp) + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv ThreadDecls_H_switchToThread_no_orphans) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + valid_queues_def st_tcb_at'_def) + apply (fastforce dest!: lookupBitmapPriority_obj_at' elim: obj_at'_weaken + simp: all_active_tcb_ptrs_def) + apply (wpsimp simp: bitmap_fun_defs) + apply (wp curDomain_or_return_0[simplified]) + apply (wpsimp simp: curDomain_def simp: invs_no_cicd_ksCurDomain_maxDomain')+ + done + +lemma valid_queues'_ko_atD: + "valid_queues' s \ ko_at' tcb t s \ tcbQueued tcb + \ t \ set (ksReadyQueues s (tcbDomain tcb, tcbPriority tcb))" + apply (simp add: valid_queues'_def) + apply (elim allE, erule mp) + apply normalise_obj_at' + apply (simp add: inQ_def) + done + +lemma tcbSchedAppend_in_ksQ: + "\valid_queues' and tcb_at' t\ tcbSchedAppend t + \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" + apply (rule_tac Q="\s. \d p. valid_queues' s \ + obj_at' (\tcb. tcbPriority tcb = p) t s \ + obj_at' (\tcb. tcbDomain tcb = d) t s" + in hoare_pre_imp) + apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) + apply (rule hoare_vcg_ex_lift)+ + apply (simp add: tcbSchedAppend_def unless_def) + apply wpsimp + apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s + \ obj_at' (\tcb. tcbDomain tcb = d) t s" + in hoare_post_imp, clarsimp) + apply (wp, (wp threadGet_const)+) + apply (rule_tac Q="\rv s. + obj_at' (\tcb. tcbPriority tcb = p) t s \ + obj_at' (\tcb. tcbDomain tcb = d) t s \ + obj_at' (\tcb. tcbQueued tcb = rv) t s \ + (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) + apply (clarsimp simp: o_def elim!: obj_at'_weakenE) + apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) + apply clarsimp + apply normalise_obj_at' + apply (drule(1) valid_queues'_ko_atD, simp+) + done + +lemma hoare_neg_imps: + "\P\ f \\ rv s. \ R rv s\ \ \P\ f \\r s. R r s \ Q r s\" + by (auto simp: valid_def) + +lemma setCurThread_ct [wp]: + "\ \ \ + setCurThread tcb_ptr + \ \rv s. ksCurThread s = tcb_ptr \" + unfolding setCurThread_def + apply (wp | clarsimp)+ + done + +lemma ThreadDecls_H_switchToThread_ct [wp]: + "\ \ \ + switchToThread tcb_ptr + \ \rv s. ksCurThread s = tcb_ptr \" + unfolding switchToThread_def + apply (wp | clarsimp)+ + done + +crunch no_orphans [wp]: nextDomain no_orphans +(wp: no_orphans_lift simp: Let_def) + +crunch ksQ [wp]: nextDomain "\s. P (ksReadyQueues s p)" +(simp: Let_def) + +crunch st_tcb_at' [wp]: nextDomain "\s. P (st_tcb_at' P' p s)" +(simp: Let_def) + +crunch ct' [wp]: nextDomain "\s. P (ksCurThread s)" +(simp: Let_def) + +crunch sch_act_not [wp]: nextDomain "sch_act_not t" +(simp: Let_def) + +lemma tcbSchedEnqueue_in_ksQ: + "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t + \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" + apply (rule_tac Q="\s. \d p. valid_queues' s \ + obj_at' (\tcb. tcbPriority tcb = p) t s \ + obj_at' (\tcb. tcbDomain tcb = d) t s" + in hoare_pre_imp) + apply (clarsimp simp: tcb_at'_has_tcbPriority tcb_at'_has_tcbDomain) + apply (rule hoare_vcg_ex_lift)+ + apply (simp add: tcbSchedEnqueue_def unless_def) + apply (wpsimp simp: if_apply_def2) + apply (rule_tac Q="\rv s. tdom = d \ rv = p \ obj_at' (\tcb. tcbPriority tcb = p) t s + \ obj_at' (\tcb. tcbDomain tcb = d) t s" + in hoare_post_imp, clarsimp) + apply (wp, (wp threadGet_const)+) + apply (rule_tac Q="\rv s. + obj_at' (\tcb. tcbPriority tcb = p) t s \ + obj_at' (\tcb. tcbDomain tcb = d) t s \ + obj_at' (\tcb. tcbQueued tcb = rv) t s \ + (rv \ t \ set (ksReadyQueues s (d, p)))" in hoare_post_imp) + apply (clarsimp simp: o_def elim!: obj_at'_weakenE) + apply (wp threadGet_obj_at' hoare_vcg_imp_lift threadGet_const) + apply clarsimp + apply normalise_obj_at' + apply (frule(1) valid_queues'_ko_atD, simp+) + done + +lemma tcbSchedEnqueue_in_ksQ': + "\valid_queues' and tcb_at' t and K (t = t')\ + tcbSchedEnqueue t' + \\r s. \domain priority. t \ set (ksReadyQueues s (domain, priority))\" + apply (rule hoare_gen_asm) + apply (wp tcbSchedEnqueue_in_ksQ | clarsimp)+ + done + +lemma all_invs_but_ct_idle_or_in_cur_domain'_strg: + "invs' s \ all_invs_but_ct_idle_or_in_cur_domain' s" + by (clarsimp simp: invs'_to_invs_no_cicd'_def) + +lemma setSchedulerAction_cnt_sch_act_not[wp]: + "\ \ \ setSchedulerAction ChooseNewThread \\rv s. sch_act_not x s\" + by (rule hoare_pre, rule hoare_strengthen_post[OF setSchedulerAction_direct]) auto + +lemma tcbSchedEnqueue_in_ksQ_aqtp[wp]: + "\valid_queues' and tcb_at' t\ tcbSchedEnqueue t + \\r s. t \ all_queued_tcb_ptrs s\" + apply (clarsimp simp: all_queued_tcb_ptrs_def) + apply (rule tcbSchedEnqueue_in_ksQ) + done + +lemma tcbSchedEnqueue_in_ksQ_already_queued: + "\\s. valid_queues' s \ tcb_at' t s \ + (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ + tcbSchedEnqueue t + \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" + apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) + apply (wpsimp simp: tcbSchedEnqueue_def unless_def) + apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" + in hoare_post_imp) + apply metis + apply wpsimp+ + done + +lemma tcbSchedAppend_in_ksQ_already_queued: + "\\s. valid_queues' s \ tcb_at' t s \ + (\domain priority. t' \ set (ksReadyQueues s (domain, priority))) \ + tcbSchedAppend t + \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" + apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) + apply (wpsimp simp: tcbSchedAppend_def unless_def) + apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" + in hoare_post_imp) + apply metis + apply wpsimp+ + done + +lemma tcbSchedEnqueue_in_ksQ'': + "\\s. valid_queues' s \ tcb_at' t s \ + (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ + tcbSchedEnqueue t + \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" + apply (case_tac "t'=t", wpsimp wp: tcbSchedEnqueue_in_ksQ) + apply clarsimp + apply (wpsimp simp: tcbSchedEnqueue_def unless_def) + apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" + in hoare_post_imp) + apply metis + apply wpsimp+ + done + +lemma tcbSchedAppend_in_ksQ'': + "\\s. valid_queues' s \ tcb_at' t s \ + (t' \ t \ (\domain priority. t' \ set (ksReadyQueues s (domain, priority)))) \ + tcbSchedAppend t + \\r s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))\" + apply (case_tac "t'=t", wpsimp wp: tcbSchedAppend_in_ksQ) + apply clarsimp + apply (wpsimp simp: tcbSchedAppend_def unless_def) + apply (rule_tac Q="\_ s. \domain priority. t' \ set (ksReadyQueues s (domain, priority))" + in hoare_post_imp) + apply metis + apply wpsimp+ + done + +crunches setSchedulerAction + for pred_tcb_at': "\s. P (pred_tcb_at' proj Q t s)" + and ct': "\s. P (ksCurThread s)" + (wp_del: ssa_wp) + +lemmas ssa_st_tcb_at'_ksCurThread[wp] = + hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_pred_tcb_at' setSchedulerAction_ct'] + +lemma ct_active_st_tcb_at': + "ct_active' s = st_tcb_at' runnable' (ksCurThread s) s" + apply (rule iffI) + apply (drule ct_active_runnable') + apply (simp add: ct_in_state'_def) + apply (clarsimp simp: ct_in_state'_def) + apply (erule pred_tcb'_weakenE) + apply (case_tac st, auto) + done + +lemma tcbSchedEnqueue_in_ksQ_already_queued_aqtp: + "\\s. valid_queues' s \ tcb_at' t s \ + t' \ all_queued_tcb_ptrs s \ tcbSchedEnqueue t + \\r s. t' \ all_queued_tcb_ptrs s \" + by (clarsimp simp: all_queued_tcb_ptrs_def tcbSchedEnqueue_in_ksQ_already_queued) + +(* FIXME move *) +lemma invs_switchToThread_runnable': + "\ invs' s ; ksSchedulerAction s = SwitchToThread t \ \ st_tcb_at' runnable' t s" + by (simp add: invs'_def valid_state'_def) + +(* for shoving pred_tcb_at' through hoare_vcg_imp_lift for tcbs we know are there *) +lemma not_pred_tcb_at'I: + "\ pred_tcb_at' f (Not \ P) t s ; tcb_at' t s \ \ \ pred_tcb_at' f P t s" + by (subst (asm) pred_tcb_at'_Not, blast) + +lemma in_all_active_tcb_ptrsD: + "t \ all_active_tcb_ptrs s \ st_tcb_at' runnable' t s" + unfolding all_active_tcb_ptrs_def is_active_tcb_ptr_def + is_active_thread_state_def isRunning_def isRestart_def + apply clarsimp + apply (erule pred_tcb'_weakenE) + apply (case_tac st; clarsimp) + done + +lemma chooseThread_nosch: + "\\s. P (ksSchedulerAction s)\ + chooseThread + \\rv s. P (ksSchedulerAction s)\" + unfolding chooseThread_def Let_def curDomain_def + supply if_split[split del] + apply (simp only: return_bind, simp) + apply (wp findM_inv | simp)+ + apply (case_tac queue) + apply (wp stt_nosch | simp add: curDomain_def bitmap_fun_defs)+ + done + +lemma scheduleChooseNewThread_no_orphans: + "\ invs' and no_orphans + and (\s. ksSchedulerAction s = ChooseNewThread + \ (st_tcb_at' runnable' (ksCurThread s) s + \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))))) \ + scheduleChooseNewThread + \\_. no_orphans \" + unfolding scheduleChooseNewThread_def + apply (wp add: ssa_no_orphans hoare_vcg_all_lift) + apply (wp hoare_disjI1 chooseThread_nosch)+ + apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift + hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift[OF nextDomain_ksQ] + nextDomain_ct'] + hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift[OF nextDomain_st_tcb_at'] + nextDomain_ct'] + hoare_vcg_all_lift getDomainTime_wp)[2] + apply (wpsimp simp: if_apply_def2 invs'_invs_no_cicd all_queued_tcb_ptrs_def + is_active_tcb_ptr_runnable')+ + done + +lemma schedule_no_orphans[wp]: + notes ssa_wp[wp del] + shows + "\ \s. no_orphans s \ invs' s \ + schedule + \ \rv s. no_orphans s \" +proof - + + have do_switch_to: + "\candidate. + \\s. no_orphans s \ ksSchedulerAction s = SwitchToThread candidate + \ st_tcb_at' runnable' candidate s + \ (st_tcb_at' runnable' (ksCurThread s) s + \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ + do ThreadDecls_H.switchToThread candidate; + setSchedulerAction ResumeCurrentThread + od + \\rv. no_orphans\" + apply (wpsimp wp: scheduleChooseNewThread_no_orphans ssa_no_orphans + hoare_vcg_all_lift ThreadDecls_H_switchToThread_no_orphans)+ + apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ + (t \ candidate \ sch_act_not t s)" + in hoare_post_imp) + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ + apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) + done + + have abort_switch_to_enq: + "\candidate. + \\s. no_orphans s \ invs' s \ valid_queues' s + \ ksSchedulerAction s = SwitchToThread candidate + \ (st_tcb_at' runnable' (ksCurThread s) s + \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p)))) \ + do tcbSchedEnqueue candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\rv. no_orphans\" + apply (rule hoare_pre) + apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ + apply (wp tcbSchedEnqueue_in_ksQ' tcbSchedEnqueue_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) + apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] + hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ_already_queued] + tcbSchedEnqueue_no_orphans + | strengthen not_pred_tcb_at'_strengthen + | wp (once) hoare_vcg_imp_lift')+ + apply (clarsimp) + apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') + apply (simp add: st_tcb_at_neg' tcb_at_invs') + done + + have abort_switch_to_app: + "\candidate. + \\s. no_orphans s \ invs' s \ valid_queues' s + \ ksSchedulerAction s = SwitchToThread candidate + \ (st_tcb_at' runnable' (ksCurThread s) s + \ (\d p. ksCurThread s \ set (ksReadyQueues s (d, p))) ) \ + do tcbSchedAppend candidate; + setSchedulerAction ChooseNewThread; + scheduleChooseNewThread + od + \\rv. no_orphans\" + apply (rule hoare_pre) + apply (wp scheduleChooseNewThread_no_orphans ssa_no_orphans setSchedulerAction_direct) + apply (wpsimp wp: hoare_vcg_imp_lift' hoare_vcg_ex_lift + simp: is_active_tcb_ptr_runnable' all_queued_tcb_ptrs_def + | rule hoare_lift_Pf2[where f=ksCurThread, OF setSchedulerAction_ksQ])+ + apply (wp tcbSchedAppend_in_ksQ'' tcbSchedAppend_no_orphans hoare_vcg_all_lift hoare_vcg_imp_lift' hoare_vcg_disj_lift) + apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_pred_tcb_at'] + hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedAppend_in_ksQ_already_queued] + tcbSchedAppend_no_orphans + | strengthen not_pred_tcb_at'_strengthen + | wp (once) hoare_vcg_imp_lift')+ + apply (clarsimp) + apply (frule invs_sch_act_wf', clarsimp simp: pred_tcb_at') + apply (simp add: st_tcb_at_neg' tcb_at_invs') + done + + show ?thesis + supply K_bind_def[simp del] + unfolding schedule_def + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (clarsimp simp: when_def scheduleChooseNewThread_def) + apply (wp ssa_no_orphans hoare_vcg_all_lift) + apply (wp hoare_disjI1 chooseThread_nosch) + apply (wp nextDomain_invs_no_cicd' hoare_vcg_imp_lift + hoare_lift_Pf2 [OF ksQ_all_queued_tcb_ptrs_lift + [OF nextDomain_ksQ] + nextDomain_ct'] + hoare_lift_Pf2 [OF st_tcb_at'_is_active_tcb_ptr_lift + [OF nextDomain_st_tcb_at'] + nextDomain_ct'] + hoare_vcg_all_lift getDomainTime_wp)[2] + apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' + hoare_drop_imp + | clarsimp simp: all_queued_tcb_ptrs_def + | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg + | wps tcbSchedEnqueue_ct')+)[1] + apply ((wp tcbSchedEnqueue_no_orphans tcbSchedEnqueue_in_ksQ' + hoare_drop_imp + | clarsimp simp: all_queued_tcb_ptrs_def + | strengthen all_invs_but_ct_idle_or_in_cur_domain'_strg + | wps tcbSchedEnqueue_ct')+)[1] + apply wp[1] + \ \action = SwitchToThread candidate\ + apply (clarsimp) + apply (rename_tac candidate) + apply (wpsimp wp: do_switch_to abort_switch_to_enq abort_switch_to_app) + (* isHighestPrio *) + apply (wp hoare_drop_imps) + apply (wp add: tcbSchedEnqueue_no_orphans)+ + apply (clarsimp simp: conj_comms cong: conj_cong imp_cong split del: if_split) + apply (wp hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_pred_tcb_at'] + hoare_lift_Pf2[where f=ksCurThread, OF tcbSchedEnqueue_in_ksQ'] + hoare_vcg_imp_lift' + | strengthen not_pred_tcb_at'_strengthen)+ + apply (clarsimp simp: comp_def) + apply (frule invs_queues) + apply (clarsimp simp: invs_valid_queues' tcb_at_invs' st_tcb_at_neg' is_active_tcb_ptr_runnable') + apply (fastforce simp: all_invs_but_ct_idle_or_in_cur_domain'_strg invs_switchToThread_runnable') + done +qed + +lemma setNotification_no_orphans [wp]: + "\ \s. no_orphans s \ + setNotification p ntfn + \ \_ s. no_orphans s \" + apply (rule no_orphans_lift) + apply (wp | clarsimp simp: setNotification_def updateObject_default_def)+ + done + +crunch no_orphans [wp]: doMachineOp "no_orphans" +(wp: no_orphans_lift) + +crunch no_orphans [wp]: setMessageInfo "no_orphans" + +crunch no_orphans [wp]: completeSignal "no_orphans" +(simp: crunch_simps wp: crunch_wps) + +lemma possibleSwitchTo_almost_no_orphans [wp]: + "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s + \ weak_sch_act_wf (ksSchedulerAction s) s \ + possibleSwitchTo target + \ \rv s. no_orphans s \" + unfolding possibleSwitchTo_def + by (wp rescheduleRequired_valid_queues'_weak tcbSchedEnqueue_almost_no_orphans + ssa_almost_no_orphans hoare_weak_lift_imp + | wpc | clarsimp + | wp (once) hoare_drop_imp)+ + +lemma possibleSwitchTo_almost_no_orphans': + "\ \s. almost_no_orphans target s \ valid_queues' s \ st_tcb_at' runnable' target s + \ sch_act_wf (ksSchedulerAction s) s \ + possibleSwitchTo target + \ \rv s. no_orphans s \" + by wp (strengthen sch_act_wf_weak, assumption) + +lemma tcbSchedAppend_almost_no_orphans: + "\ \s. almost_no_orphans thread s \ valid_queues' s \ + tcbSchedAppend thread + \ \_ s. no_orphans s \" + unfolding tcbSchedAppend_def + apply (wp setQueue_almost_no_orphans_enq[where tcb_ptr=thread] threadSet_no_orphans + | clarsimp simp: unless_def | simp only: subset_insertI)+ + apply (unfold threadGet_def) + apply (wp getObject_tcb_wp | clarsimp)+ + apply (drule obj_at_ko_at', clarsimp) + apply (rule_tac x=ko in exI) + apply (clarsimp simp: almost_no_orphans_def no_orphans_def) + apply (drule queued_in_queue | simp)+ + apply (auto simp: all_queued_tcb_ptrs_def) + done + +lemma no_orphans_is_almost[simp]: + "no_orphans s \ almost_no_orphans t s" + by (clarsimp simp: no_orphans_def almost_no_orphans_def) + +crunch no_orphans [wp]: decDomainTime no_orphans +(wp: no_orphans_lift) + +crunch valid_queues' [wp]: decDomainTime valid_queues' + +lemma timerTick_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + timerTick + \ \_ s. no_orphans s \" + unfolding timerTick_def getDomainTime_def + supply if_split[split del] + apply (subst threadState_case_if) + apply (wpsimp wp: threadSet_no_orphans threadSet_valid_queues' + threadSet_valid_queues' tcbSchedAppend_almost_no_orphans threadSet_sch_act + threadSet_almost_no_orphans threadSet_no_orphans tcbSchedAppend_sch_act_wf + hoare_drop_imp + simp: if_apply_def2 + | strengthen sch_act_wf_weak)+ + apply (rule_tac Q="\rv s. no_orphans s \ valid_queues' s \ tcb_at' thread s + \ sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) + apply (clarsimp simp: inQ_def) + apply (wp hoare_drop_imps | clarsimp)+ + apply (auto split: if_split) + done + + +lemma handleDoubleFault_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ + handleDoubleFault tptr ex1 ex2 + \ \rv s. no_orphans s \" + unfolding handleDoubleFault_def + apply (wp setThreadState_not_active_no_orphans + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + done + +crunch st_tcb' [wp]: getThreadCallerSlot "st_tcb_at' (\st. P st) t" + +crunch no_orphans [wp]: cteInsert "no_orphans" +(wp: crunch_wps) + +crunch no_orphans [wp]: getThreadCallerSlot "no_orphans" + +crunch no_orphans [wp]: getThreadReplySlot "no_orphans" + +lemma setupCallerCap_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ + setupCallerCap sender receiver gr + \ \rv s. no_orphans s \" + unfolding setupCallerCap_def + apply (wp setThreadState_not_active_no_orphans + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + done + +crunch almost_no_orphans [wp]: cteInsert "almost_no_orphans tcb_ptr" +(wp: crunch_wps) + +crunch almost_no_orphans [wp]: getThreadCallerSlot "almost_no_orphans tcb_ptr" + +crunch almost_no_orphans [wp]: getThreadReplySlot "almost_no_orphans tcb_ptr" + +lemma setupCallerCap_almost_no_orphans [wp]: + "\ \s. almost_no_orphans tcb_ptr s \ valid_queues' s \ + setupCallerCap sender receiver gr + \ \rv s. almost_no_orphans tcb_ptr s \" + unfolding setupCallerCap_def + apply (wp setThreadState_not_active_almost_no_orphans + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + done + +crunches doIPCTransfer, setMRs + for no_orphans [wp]: "no_orphans" + (wp: no_orphans_lift) + +crunch ksQ'[wp]: setEndpoint "\s. P (ksReadyQueues s)" + (wp: setObject_queues_unchanged_tcb updateObject_default_inv) + +crunch no_orphans [wp]: setEndpoint "no_orphans" + (wp: no_orphans_lift) + +lemma sendIPC_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + sendIPC blocking call badge canGrant canGrantReply thread epptr + \ \rv s. no_orphans s \" + unfolding sendIPC_def + apply (wp hoare_drop_imps setThreadState_not_active_no_orphans sts_st_tcb' + possibleSwitchTo_almost_no_orphans' + | wpc + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + + apply (rule_tac Q="\rv. no_orphans and valid_queues' and valid_objs' and ko_at' rv epptr + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + apply (fastforce simp: valid_objs'_def valid_obj'_def valid_ep'_def obj_at'_def) + apply (wp get_ep_sp' | clarsimp)+ + done + +lemma sendFaultIPC_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + sendFaultIPC tptr fault + \ \rv s. no_orphans s \" + unfolding sendFaultIPC_def + apply (rule hoare_pre) + apply (wp threadSet_valid_queues' threadSet_no_orphans threadSet_valid_objs' + threadSet_sch_act | wpc | clarsimp)+ + apply (rule_tac Q'="\handlerCap s. no_orphans s \ valid_queues' s + \ valid_objs' s + \ sch_act_wf (ksSchedulerAction s) s" + in hoare_post_imp_R) + apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ + done + +lemma sendIPC_valid_queues' [wp]: + "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + sendIPC blocking call badge canGrant canGrantReply thread epptr + \ \rv s. valid_queues' s \" + unfolding sendIPC_def + apply (wpsimp wp: hoare_drop_imps) + apply (wpsimp | wp (once) sts_st_tcb')+ + apply (rule_tac Q="\rv. valid_queues' and valid_objs' and ko_at' rv epptr + and (\s. sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + apply (clarsimp) + apply (wp get_ep_sp' | clarsimp)+ + done + +lemma sendFaultIPC_valid_queues' [wp]: + "\ \s. valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + sendFaultIPC tptr fault + \ \rv s. valid_queues' s \" + unfolding sendFaultIPC_def + apply (rule hoare_pre) + apply (wp threadSet_valid_queues' threadSet_valid_objs' threadSet_sch_act + | wpc | clarsimp)+ + apply (rule_tac Q'="\handlerCap s. valid_queues' s \ valid_objs' s + \ sch_act_wf (ksSchedulerAction s) s" + in hoare_post_imp_R) + apply (wp | clarsimp simp: inQ_def valid_tcb'_def tcb_cte_cases_def)+ + done + +lemma handleFault_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + handleFault tptr ex1 + \ \rv s. no_orphans s \" + unfolding handleFault_def + apply (rule hoare_pre) + apply (wp | clarsimp)+ + done + +lemma replyFromKernel_no_orphans [wp]: + "\ \s. no_orphans s \ + replyFromKernel thread r + \ \rv s. no_orphans s \" + apply (cases r, simp_all add: replyFromKernel_def) + apply wp + done + +crunch inv [wp]: alignError "P" + +lemma createObjects_no_orphans [wp]: + "\ \s. no_orphans s \ pspace_aligned' s \ pspace_no_overlap' ptr sz s \ pspace_distinct' s + \ n \ 0 \ range_cover ptr sz (objBitsKO val + gbits) n + \ \ case_option False (is_active_thread_state \ tcbState) (projectKO_opt val) \ + createObjects ptr n val gbits + \ \rv s. no_orphans s \" + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + apply (simp only: imp_conv_disj pred_tcb_at'_def createObjects_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift createObjects_orig_obj_at2') + apply clarsimp + apply (erule(1) impE) + apply clarsimp + apply (drule_tac x = x in spec) + apply (erule impE) + apply (clarsimp simp: obj_at'_def split: option.splits) + apply simp + done + +crunch no_orphans [wp]: insertNewCap "no_orphans" +(wp: hoare_drop_imps) + +lemma no_orphans_ksArchState_idem[simp]: + "no_orphans (s\ksArchState := f (ksArchState s)\) = no_orphans s" + unfolding no_orphans_def all_queued_tcb_ptrs_def all_active_tcb_ptrs_def is_active_tcb_ptr_def + by clarsimp + +lemma createNewCaps_no_orphans: + "\ (\s. no_orphans s + \ pspace_aligned' s \ pspace_distinct' s + \ pspace_no_overlap' ptr sz s + \ (tp = APIObjectType CapTableObject \ us > 0)) + and K (range_cover ptr sz (APIType_capBits tp us) n \ 0 < n) \ + createNewCaps tp ptr n us d + \ \rv s. no_orphans s \" + supply if_split[split del] + apply (clarsimp simp: createNewCaps_def toAPIType_def cong: option.case_cong) + apply (cases tp; simp) + apply (rename_tac apiobject_type) + apply (case_tac apiobject_type; simp) + apply (wpsimp wp: mapM_x_wp' threadSet_no_orphans + | clarsimp simp: is_active_thread_state_def makeObject_tcb + projectKO_opt_tcb isRunning_def isRestart_def + APIType_capBits_def Arch_createNewCaps_def + objBits_if_dev + | simp add: objBits_simps mult_2 nat_arith.add1 split: if_split)+ + done + +crunches updatePTType + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift) + +lemma createObject_no_orphans: + "\pspace_no_overlap' ptr sz and pspace_aligned' and pspace_distinct' and + cte_wp_at' (\cte. cteCap cte = (capability.UntypedCap d ptr sz idx)) cref and + K (range_cover ptr sz (APIType_capBits tp us) (Suc 0)) and no_orphans\ + RetypeDecls_H.createObject tp ptr us d + \\xa. no_orphans\" + apply (simp only: createObject_def AARCH64_H.createObject_def placeNewObject_def2) + apply (wpsimp wp: createObjects'_wp_subst threadSet_no_orphans + createObjects_no_orphans[where sz = sz] + simp: placeNewObject_def2 placeNewDataObject_def + projectKO_opt_tcb cte_wp_at_ctes_of projectKO_opt_ep + is_active_thread_state_def makeObject_tcb pageBits_def unless_def + projectKO_opt_tcb isRunning_def isRestart_def + APIType_capBits_def objBits_simps + split_del: if_split) + apply (clarsimp simp: toAPIType_def APIType_capBits_def objBits_simps + bit_simps + split: object_type.split_asm apiobject_type.split_asm if_splits) + done + +lemma createNewObjects_no_orphans: + "\\s. no_orphans s \ invs' s \ pspace_no_overlap' ptr sz s + \ (\slot\set slots. cte_wp_at' (\c. cteCap c = capability.NullCap) slot s) + \ cte_wp_at' (\cte. cteCap cte = UntypedCap d (ptr && ~~ mask sz) sz idx) cref s + \ caps_no_overlap'' ptr sz s + \ range_cover ptr sz (APIType_capBits tp us) (length slots) + \ (tp = APIObjectType ArchTypes_H.CapTableObject \ us > 0) + \ caps_overlap_reserved' {ptr..ptr + of_nat (length slots) * 2 ^ APIType_capBits tp us - 1} s + \ slots \ [] \ distinct slots \ ptr \ 0 + \ sz \ maxUntypedSizeBits\ + createNewObjects tp cref slots ptr us d + \ \rv s. no_orphans s \" + apply (rule hoare_name_pre_state) + apply clarsimp + apply (rule hoare_pre) + apply (rule createNewObjects_wp_helper) + apply (simp)+ + apply (simp add:insertNewCaps_def) + apply wp + apply (rule_tac P = "length caps = length slots" in hoare_gen_asm) + apply (wp zipWithM_x_inv) + apply simp + apply (wp createNewCaps_no_orphans[where sz = sz] | clarsimp)+ + apply (rule hoare_strengthen_post[OF createNewCaps_ret_len]) + apply simp + apply (clarsimp simp:invs_pspace_aligned' invs_valid_pspace' invs_pspace_distinct') + apply (intro conjI) + apply (erule range_cover.range_cover_n_less[where 'a=machine_word_len, folded word_bits_def]) + apply (clarsimp simp:cte_wp_at_ctes_of) + apply (simp add:invs'_def valid_state'_def) + apply (simp add: invs_ksCurDomain_maxDomain') + done + +lemma ksMachineState_ksPSpace_upd_comm: + "ksPSpace_update g (ksMachineState_update f s) = + ksMachineState_update f (ksPSpace_update g s)" +by simp + +lemma deleteObjects_no_orphans [wp]: + "\ (\s. no_orphans s \ pspace_distinct' s) and K (is_aligned ptr bits) \ + deleteObjects ptr bits + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (unfold deleteObjects_def2 doMachineOp_def split_def) + apply (wp hoare_drop_imps | clarsimp)+ + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + all_queued_tcb_ptrs_def is_active_tcb_ptr_def + ksMachineState_ksPSpace_upd_comm) + apply (drule_tac x=tcb_ptr in spec) + apply (clarsimp simp: pred_tcb_at'_def obj_at_delete'[simplified field_simps] + cong: if_cong) + done + +crunch no_orphans[wp]: updateFreeIndex "no_orphans" + +lemma resetUntypedCap_no_orphans [wp]: + "\ (\s. no_orphans s \ pspace_distinct' s \ valid_objs' s) + and cte_wp_at' (isUntypedCap o cteCap) slot\ + resetUntypedCap slot + \ \rv s. no_orphans s \" + apply (simp add: resetUntypedCap_def) + apply (rule hoare_pre) + apply (wp mapME_x_inv_wp preemptionPoint_inv getSlotCap_wp hoare_drop_imps + | simp add: unless_def split del: if_split)+ + apply (clarsimp simp: cte_wp_at_ctes_of split del: if_split) + apply (frule(1) cte_wp_at_valid_objs_valid_cap'[OF ctes_of_cte_wpD]) + apply (clarsimp simp: isCap_simps valid_cap_simps' capAligned_def) + done + +lemma invokeUntyped_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_untyped_inv' ui s \ ct_active' s \ + invokeUntyped ui + \ \reply s. no_orphans s \" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule invokeUntyped_invs''[where Q=no_orphans]) + apply (wp createNewCaps_no_orphans)+ + apply (clarsimp simp: valid_pspace'_def) + apply (intro conjI, simp_all)[1] + apply (wp | simp)+ + apply (cases ui, auto simp: cte_wp_at_ctes_of)[1] + done + +lemma setInterruptState_no_orphans [wp]: + "\ \s. no_orphans s \ + setInterruptState a + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift | clarsimp)+ + done + +crunch no_orphans [wp]: emptySlot "no_orphans" + +lemma mapM_x_match: + "\I and V xs\ mapM_x m xs \\rv. Q\ \ \I and V xs\ mapM_x m xs \\rv. Q\" + by assumption + +lemma cancelAllIPC_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + cancelAllIPC epptr + \ \rv s. no_orphans s \" + unfolding cancelAllIPC_def + apply (wp sts_valid_objs' set_ep_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans + | wpc + | rule mapM_x_match, + rename_tac list, + rule_tac V="\_. valid_queues' and valid_objs'" + and I="no_orphans and (\s. \t\set list. tcb_at' t s)" + in mapM_x_inv_wp2 + | clarsimp simp: valid_tcb_state'_def)+ + apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv epptr" + in hoare_post_imp) + apply (fastforce simp: valid_obj'_def valid_ep'_def obj_at'_def) + apply (wp get_ep_sp' | clarsimp)+ + done + +lemma cancelAllSignals_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + cancelAllSignals ntfn + \ \rv s. no_orphans s \" + unfolding cancelAllSignals_def + apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans + | wpc + | clarsimp simp: valid_tcb_state'_def)+ + apply (rename_tac list) + apply (rule_tac V="\_. valid_queues' and valid_objs'" + and I="no_orphans and (\s. \t\set list. tcb_at' t s)" + in mapM_x_inv_wp2) + apply simp + apply (wp sts_valid_objs' set_ntfn_valid_objs' sts_st_tcb' + hoare_vcg_const_Ball_lift tcbSchedEnqueue_almost_no_orphans| + clarsimp simp: valid_tcb_state'_def)+ + apply (rule_tac Q="\rv. no_orphans and valid_objs' and valid_queues' and ko_at' rv ntfn" + in hoare_post_imp) + apply (fastforce simp: valid_obj'_def valid_ntfn'_def obj_at'_def) + apply (wp get_ntfn_sp' | clarsimp)+ + done + +crunch no_orphans[wp]: setBoundNotification "no_orphans" + +lemma unbindNotification_no_orphans[wp]: + "\\s. no_orphans s\ + unbindNotification t + \ \rv s. no_orphans s\" + unfolding unbindNotification_def + apply (rule hoare_seq_ext[OF _ gbn_sp']) + apply (case_tac ntfnPtr, simp_all, wp, simp) + apply (rule hoare_seq_ext[OF _ get_ntfn_sp']) + apply (wp | simp)+ + done + +lemma unbindMaybeNotification_no_orphans[wp]: + "\\s. no_orphans s\ + unbindMaybeNotification a + \ \rv s. no_orphans s\" + unfolding unbindMaybeNotification_def + by (wp getNotification_wp | simp | wpc)+ + +lemma finaliseCapTrue_standin_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + finaliseCapTrue_standin cap final + \ \rv s. no_orphans s \" + unfolding finaliseCapTrue_standin_def + apply (rule hoare_pre) + apply (wp | clarsimp simp: Let_def | wpc)+ + done + +lemma cteDeleteOne_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + cteDeleteOne slot + \ \rv s. no_orphans s \" + unfolding cteDeleteOne_def + apply (wp assert_inv isFinalCapability_inv weak_if_wp | clarsimp simp: unless_def)+ + done + +crunch valid_objs' [wp]: getThreadReplySlot "valid_objs'" + +lemma cancelSignal_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + cancelSignal t ntfn + \ \rv s. no_orphans s \" + unfolding cancelSignal_def Let_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def)+ + done + +lemma cancelIPC_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ + cancelIPC t + \ \rv s. no_orphans s \" + unfolding cancelIPC_def Let_def + apply (rule hoare_pre) + apply (wp setThreadState_not_active_no_orphans hoare_drop_imps weak_if_wp + threadSet_valid_queues' threadSet_valid_objs' threadSet_no_orphans | wpc + | clarsimp simp: is_active_thread_state_def isRestart_def isRunning_def + inQ_def valid_tcb'_def tcb_cte_cases_def cteSizeBits_def)+ + done + + +lemma asUser_almost_no_orphans: + "\almost_no_orphans t\ asUser a f \\_. almost_no_orphans t\" + unfolding almost_no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma sendSignal_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s\ + sendSignal ntfnptr badge + \ \_ s. no_orphans s \" + unfolding sendSignal_def + apply (rule hoare_pre) + apply (wp sts_st_tcb' gts_wp' getNotification_wp asUser_almost_no_orphans + cancelIPC_weak_sch_act_wf + | wpc | clarsimp simp: sch_act_wf_weak)+ + done + +crunches vgicUpdateLR + for no_orphans[wp]: "no_orphans" + (wp: no_orphans_lift crunch_wps) + +crunch not_pred_tcb_at'[wp]: vgicUpdateLR,doMachineOp "\s. \ (pred_tcb_at' proj P' t) s" + +crunch valid_queues' [wp]: vgicUpdateLR valid_queues' + +crunches vcpuUpdate, vgicUpdateLR, doMachineOp + for no_orphans[wp]: no_orphans + and tcb_in_cur_domain'[wp]: "tcb_in_cur_domain' t" + (wp: no_orphans_lift tcb_in_cur_domain'_lift) + +lemma vgicMaintenance_no_orphans[wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + vgicMaintenance + \\_. no_orphans\" + unfolding vgicMaintenance_def Let_def + by (wpsimp wp: sch_act_wf_lift hoare_drop_imp[where f="vgicUpdateLR v idx virq" for v idx virq] + hoare_drop_imp[where f="return v" for v] + hoare_drop_imp[where f="doMachineOp f" for f]) + +lemma vppiEvent_no_orphans[wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + vppiEvent irq + \\_. no_orphans\" + unfolding vppiEvent_def Let_def + by (wpsimp wp: hoare_vcg_imp_lift' sch_act_wf_lift | wps)+ + +(* FIXME AARCH64: move *) +lemma irqVPPIEventIndex_irqVGICMaintenance_None[simp]: + "irqVPPIEventIndex irqVGICMaintenance = None" + unfolding irqVTimerEvent_def irqVGICMaintenance_def IRQ_def irqVPPIEventIndex_def + by simp + +lemma handleReservedIRQ_no_orphans[wp]: + "\ \s. no_orphans s \ valid_queues' s \ valid_objs' s \ sch_act_wf (ksSchedulerAction s) s \ + handleReservedIRQ irq + \\_. no_orphans \" + unfolding handleReservedIRQ_def + by (case_tac "irq = irqVGICMaintenance"; wpsimp) + +lemma handleInterrupt_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + handleInterrupt irq + \ \rv s. no_orphans s \" + unfolding handleInterrupt_def + supply if_split[split del] + apply (wp hoare_drop_imps hoare_vcg_all_lift getIRQState_inv + | wpc | clarsimp simp: invs'_def valid_state'_def maskIrqSignal_def + if_apply_def2)+ + done + +lemma updateRestartPC_no_orphans[wp]: + "\ \s. no_orphans s \ invs' s \ + updateRestartPC t + \ \rv s. no_orphans s \" + by (wpsimp simp: updateRestartPC_def asUser_no_orphans) + +lemma updateRestartPC_valid_queues'[wp]: + "\ \s. valid_queues' s \ + updateRestartPC t + \ \rv s. valid_queues' s \" + unfolding updateRestartPC_def + apply (rule asUser_valid_queues') + done + +lemma updateRestartPC_no_orphans_invs'_valid_queues'[wp]: + "\\s. no_orphans s \ invs' s \ valid_queues' s \ + updateRestartPC t + \\rv s. no_orphans s \ valid_queues' s \" + by (wpsimp simp: updateRestartPC_def asUser_no_orphans) + +lemma suspend_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ + suspend t + \ \rv s. no_orphans s \" + unfolding suspend_def + apply (wp | clarsimp simp: unless_def | rule conjI)+ + apply (clarsimp simp: is_active_tcb_ptr_def is_active_thread_state_def st_tcb_at_neg2) + apply (wp setThreadState_not_active_no_orphans hoare_disjI1 setThreadState_st_tcb + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def)+ + apply (wp hoare_drop_imp)+ + apply auto + done + +crunches invalidateASIDEntry, invalidateTLBByASID + for no_orphans[wp]: no_orphans + (wp: no_orphans_lift) + +lemma deleteASIDPool_no_orphans [wp]: + "\ \s. no_orphans s \ + deleteASIDPool asid pool + \ \rv s. no_orphans s \" + unfolding deleteASIDPool_def + apply (wp | clarsimp)+ + apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) + apply (clarsimp simp: no_orphans_def all_queued_tcb_ptrs_def + all_active_tcb_ptrs_def is_active_tcb_ptr_def) + apply (wp mapM_wp_inv getObject_inv loadObject_default_inv | clarsimp)+ + done + +lemma storePTE_no_orphans [wp]: + "\ \s. no_orphans s \ + storePTE ptr val + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +crunch no_orphans [wp]: modifyArchState, vcpuUpdate, archThreadSet, dissociateVCPUTCB, vcpuFinalise "no_orphans" + (wp: no_orphans_lift crunch_wps) + +crunch no_orphans [wp]: unmapPage "no_orphans" + (wp: crunch_wps) + +crunches unmapPageTable, prepareThreadDelete + for no_orphans [wp]: "no_orphans" + (wp: lookupPTSlotFromLevel_inv) + +lemma setASIDPool_no_orphans [wp]: + "\ \s. no_orphans s \ + setObject p (ap :: asidpool) + \ \rv s. no_orphans s \" + unfolding no_orphans_disj all_queued_tcb_ptrs_def + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + done + +lemma deleteASID_no_orphans [wp]: + "\ \s. no_orphans s \ + deleteASID asid pd + \ \rv s. no_orphans s \" + unfolding deleteASID_def + apply (wp getObject_inv loadObject_default_inv | wpc | clarsimp)+ + done + +lemma arch_finaliseCap_no_orphans [wp]: + "\ \s. no_orphans s \ + Arch.finaliseCap cap fin + \ \rv s. no_orphans s \" + unfolding AARCH64_H.finaliseCap_def + apply (wpsimp simp: isCap_simps) + done + +lemma deletingIRQHandler_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + deletingIRQHandler irq + \ \rv s. no_orphans s \" + unfolding deletingIRQHandler_def + apply (wp, auto) + done + +lemma finaliseCap_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ valid_cap' cap s \ + finaliseCap cap final flag + \ \rv s. no_orphans s \" + apply (simp add: finaliseCap_def Let_def + cong: if_cong split del: if_split) + apply (rule hoare_pre) + apply (wp | clarsimp simp: o_def | wpc)+ + apply (auto simp: valid_cap'_def dest!: isCapDs) + done + +crunch no_orphans [wp]: cteSwap "no_orphans" + +crunch no_orphans [wp]: capSwapForDelete "no_orphans" + +declare withoutPreemption_lift [wp del] + +lemma no_orphans_finalise_prop_stuff: + "no_cte_prop no_orphans = no_orphans" + "finalise_prop_stuff no_orphans" + by (simp_all add: no_cte_prop_def finalise_prop_stuff_def + setCTE_no_orphans, + simp_all add: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + +lemma finaliseSlot_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ (\ e \ ex_cte_cap_to' slot s) \ + finaliseSlot slot e + \ \rv s. no_orphans s \" + unfolding finaliseSlot_def + apply (rule validE_valid, rule hoare_pre, + rule hoare_post_impErr, rule use_spec) + apply (rule finaliseSlot_invs'[where p=slot and slot=slot and Pr=no_orphans]) + apply (simp_all add: no_orphans_finalise_prop_stuff) + apply (wp | simp)+ + apply (auto dest: cte_wp_at_valid_objs_valid_cap') + done + +lemma cteDelete_no_orphans [wp]: + "\ no_orphans and invs' and sch_act_simple and K ex \ + cteDelete ptr ex + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (clarsimp simp: cteDelete_def whenE_def split_def) + apply (rule hoare_pre, wp) + apply clarsimp + done + +crunch no_orphans [wp]: cteMove "no_orphans" +(wp: crunch_wps) + +lemma cteRevoke_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ + cteRevoke ptr + \ \rv s. no_orphans s \" + apply (rule_tac Q="\rv s. no_orphans s \ invs' s \ sch_act_simple s" + in hoare_strengthen_post) + apply (wp cteRevoke_preservation cteDelete_invs' cteDelete_sch_act_simple)+ + apply auto + done + +lemma cancelBadgedSends_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ + cancelBadgedSends epptr badge + \ \rv s. no_orphans s \" + unfolding cancelBadgedSends_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpc | clarsimp)+ + apply (wp filterM_preserved tcbSchedEnqueue_almost_no_orphans gts_wp' + sts_st_tcb' hoare_drop_imps | clarsimp)+ + done + +crunch no_orphans [wp]: handleFaultReply "no_orphans" + +lemma doReplyTransfer_no_orphans[wp]: + "\no_orphans and invs' and tcb_at' sender and tcb_at' receiver\ + doReplyTransfer sender receiver slot grant + \\rv. no_orphans\" + unfolding doReplyTransfer_def + apply (wp sts_st_tcb' setThreadState_not_active_no_orphans threadSet_no_orphans + threadSet_valid_queues' threadSet_weak_sch_act_wf + | wpc | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + | wp (once) hoare_drop_imps + | strengthen sch_act_wf_weak invs_valid_queues')+ + apply (rule_tac Q="\rv. invs' and no_orphans" in hoare_post_imp) + apply (fastforce simp: inQ_def) + apply (wp hoare_drop_imps | clarsimp)+ + apply (clarsimp simp:invs'_def valid_state'_def valid_pspace'_def) + done + +lemma cancelSignal_valid_queues' [wp]: + "\ \s. valid_queues' s \ valid_objs' s \ + cancelSignal t ntfn + \ \rv s. valid_queues' s \" + unfolding cancelSignal_def Let_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpc | clarsimp)+ + done + +crunch no_orphans [wp]: setupReplyMaster "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +lemma restart_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' t s \ + restart t + \ \rv s. no_orphans s \" + unfolding restart_def isStopped_def2 + apply (wp tcbSchedEnqueue_almost_no_orphans sts_st_tcb' cancelIPC_weak_sch_act_wf + | clarsimp simp: o_def if_apply_def2 + | strengthen no_orphans_strg_almost + | strengthen invs_valid_queues' + | wp (once) hoare_drop_imps)+ + apply auto + done + +lemma readreg_no_orphans: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s \ + invokeTCB (tcbinvocation.ReadRegisters src susp n arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def + apply (wp | clarsimp)+ + done + +lemma writereg_no_orphans: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s + \ tcb_at' dest s \ ex_nonz_cap_to' dest s\ + invokeTCB (tcbinvocation.WriteRegisters dest resume values arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def postModifyRegisters_def + apply simp + apply (rule hoare_pre) + by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp + | strengthen invs_valid_queues' | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ + +lemma copyreg_no_orphans: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_at' src s + \ tcb_at' dest s \ ex_nonz_cap_to' src s \ ex_nonz_cap_to' dest s \ + invokeTCB (tcbinvocation.CopyRegisters dest src susp resume frames ints arch) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def postModifyRegisters_def + apply simp + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb + | strengthen invs_valid_queues' | wpc | simp add: if_apply_def2)+ + apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) + done + +lemma settlsbase_no_orphans: + "\ \s. no_orphans s \ invs' s \ + invokeTCB (tcbinvocation.SetTLSBase src dest) + \ \rv s. no_orphans s \" + unfolding invokeTCB_def performTransfer_def + apply simp + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) + apply (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans)+ + done + +lemma almost_no_orphans_no_orphans: + "\ almost_no_orphans t s; \ is_active_tcb_ptr t s \ \ no_orphans s" + by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) + +lemma almost_no_orphans_no_orphans': + "\ almost_no_orphans t s; ksCurThread s = t\ \ no_orphans s" + by (auto simp: almost_no_orphans_def no_orphans_def all_active_tcb_ptrs_def) + +lemma setPriority_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ tcb_at' tptr s \ + setPriority tptr prio + \ \rv s. no_orphans s \" + unfolding setPriority_def + apply wpsimp + apply (rule_tac Q="\rv s. almost_no_orphans tptr s \ valid_queues' s \ weak_sch_act_wf (ksSchedulerAction s) s" in hoare_post_imp) + apply clarsimp + apply (clarsimp simp: is_active_tcb_ptr_runnable' pred_tcb_at'_def obj_at'_def + almost_no_orphans_no_orphans elim!: almost_no_orphans_no_orphans') + apply (wp threadSet_almost_no_orphans threadSet_valid_queues' | clarsimp simp: inQ_def)+ + apply (wpsimp wp: threadSet_weak_sch_act_wf) + apply (wp tcbSchedDequeue_almost_no_orphans| clarsimp)+ + apply (rule_tac Q="\rv. obj_at' (Not \ tcbQueued) tptr and invs' and (\s. weak_sch_act_wf (ksSchedulerAction s) s)" in hoare_post_imp) + apply (clarsimp simp: obj_at'_def inQ_def) + apply (wp tcbSchedDequeue_not_queued | clarsimp)+ + done + +lemma setMCPriority_no_orphans[wp]: + "\no_orphans\ setMCPriority t prio \\rv. no_orphans\" + unfolding setMCPriority_def + apply (rule hoare_pre) + apply (wp threadSet_no_orphans) + by clarsimp+ + +lemma threadSet_ipcbuffer_invs: + "is_aligned a msg_align_bits \ + \invs' and tcb_at' t\ threadSet (tcbIPCBuffer_update (\_. a)) t \\rv. invs'\" + apply (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + done + +lemma tc_no_orphans: + "\ no_orphans and invs' and sch_act_simple and tcb_at' a and ex_nonz_cap_to' a and + case_option \ (valid_cap' o fst) e' and + K (case_option True (isCNodeCap o fst) e') and + case_option \ (valid_cap' o fst) f' and + K (case_option True (isValidVTableRoot o fst) f') and + case_option \ (valid_cap') (case_option None (case_option None (Some o fst) o snd) g) and + K (case_option True isArchObjectCap (case_option None (case_option None (Some o fst) o snd) g)) and + K (case_option True (swp is_aligned 2 o fst) g) and + K (case_option True (swp is_aligned msg_align_bits o fst) g) and + K (case g of None \ True | Some x \ (case_option True (isArchObjectCap \ fst) \ snd) x) and + K (valid_option_prio d \ valid_option_prio mcp) \ + invokeTCB (tcbinvocation.ThreadControl a sl b' mcp d e' f' g) + \ \rv s. no_orphans s \" + apply (rule hoare_gen_asm) + apply (rule hoare_gen_asm) + apply (rule hoare_gen_asm) + apply (simp add: invokeTCB_def getThreadCSpaceRoot getThreadVSpaceRoot + getThreadBufferSlot_def split_def) + apply (simp only: eq_commute[where a="a"]) + apply (rule hoare_walk_assmsE) + apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] + apply (rule hoare_walk_assmsE) + apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) + apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' + typ_at_lifts[OF setMCPriority_typ_at'] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] + apply ((simp only: simp_thms cong: conj_cong + | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple + case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] + checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] + checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] + threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift threadSet_no_orphans + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs + | strengthen invs_valid_queues' + | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def + objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, + wp hoare_return_sp) + | wpc | clarsimp)+) + apply (fastforce simp: objBits_defs isCap_simps dest!: isValidVTableRootD) + done + +lemma bindNotification_no_orphans[wp]: + "\no_orphans\ bindNotification t ntfn \\_. no_orphans\" + unfolding bindNotification_def + by wp + +lemma invokeTCB_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ sch_act_simple s \ tcb_inv_wf' tinv s \ + invokeTCB tinv + \ \rv s. no_orphans s \" + apply (case_tac tinv, simp_all) + apply (clarsimp simp: invokeTCB_def) + apply (wp, clarsimp) + apply (clarsimp simp: invokeTCB_def) + apply (wp, clarsimp) + apply (wp tc_no_orphans) + apply (clarsimp split: option.splits simp: msg_align_bits elim!:is_aligned_weaken) + apply (rename_tac option) + apply (case_tac option) + apply ((wp | simp add: invokeTCB_def)+)[2] + apply (wp writereg_no_orphans readreg_no_orphans copyreg_no_orphans settlsbase_no_orphans | clarsimp)+ + done + +lemma invokeCNode_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_cnode_inv' cinv s \ sch_act_simple s \ + invokeCNode cinv + \ \rv. no_orphans \" + unfolding invokeCNode_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps unless_wp | wpc | clarsimp split del: if_split)+ + apply (simp add: invs_valid_queues') + done + +lemma invokeIRQControl_no_orphans [wp]: + "\ \s. no_orphans s \ + performIRQControl i + \ \rv s. no_orphans s \" + apply (cases i, simp_all add: performIRQControl_def AARCH64_H.performIRQControl_def) + apply (rename_tac archinv) + apply (case_tac archinv) + apply (wp | clarsimp)+ + done + +lemma invokeIRQHandler_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + InterruptDecls_H.invokeIRQHandler i + \ \reply s. no_orphans s \" + apply (cases i, simp_all add: Interrupt_H.invokeIRQHandler_def invokeIRQHandler_def) + apply (wp | clarsimp | fastforce)+ + done + +lemma performPageTableInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ + performPageTableInvocation pti + \ \reply s. no_orphans s \" + apply (cases pti, simp_all add: performPageTableInvocation_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | clarsimp)+ + done + +lemma performVSpaceInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ + performVSpaceInvocation vsi + \ \reply s. no_orphans s \" + apply (cases vsi, simp_all add: performVSpaceInvocation_def) + apply (rule hoare_pre) + apply (wp mapM_x_wp' | wpc | clarsimp)+ + done + +lemma performPageInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ + performPageInvocation pgi + \ \reply s. no_orphans s \" + apply (simp add: performPageInvocation_def + cong: page_invocation.case_cong) + apply (rule hoare_pre) + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp)+ + done + +crunch no_orphans [wp]: handleVMFault "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +crunch no_orphans [wp]: handleHypervisorFault "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +lemma associateVCPUTCB_no_orphans[wp]: + "associateVCPUTCB vcpuPtr tcbPtr \no_orphans\" + unfolding associateVCPUTCB_def + apply (rule no_orphans_lift) + apply (wpsimp wp: setObject_typ_at_not)+ + done + +crunch no_orphans [wp]: invokeVCPUInjectIRQ, invokeVCPUWriteReg, invokeVCPUAckVPPI "no_orphans" + (wp: crunch_wps simp: crunch_simps) + +lemma performARMVCPUInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ + performARMVCPUInvocation pgi + \ \reply s. no_orphans s \" + apply (simp add: performARMVCPUInvocation_def + cong: page_invocation.case_cong) + apply (rule hoare_pre) + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp)+ + done + +lemma performASIDControlInvocation_no_orphans [wp]: + notes blah[simp del] = + atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex usableUntypedRange.simps + shows "\ \s. no_orphans s \ invs' s \ valid_aci' aci s \ ct_active' s \ + performASIDControlInvocation aci + \ \reply s. no_orphans s \" + apply (rule hoare_name_pre_state) + apply (clarsimp simp:valid_aci'_def cte_wp_at_ctes_of + split:asidcontrol_invocation.splits) + apply (rename_tac s ptr_base p cref ptr null_cte ut_cte idx) + proof - + fix s ptr_base p cref ptr null_cte ut_cte idx + assume no_orphans: "no_orphans s" + and invs' : "invs' s" + and cte : "ctes_of s p = Some null_cte" "cteCap null_cte = capability.NullCap" + "ctes_of s cref = Some ut_cte" "cteCap ut_cte = capability.UntypedCap False ptr_base pageBits idx" + and desc : "descendants_of' cref (ctes_of s) = {}" + and misc : "p \ cref" "ex_cte_cap_wp_to' (\_. True) p s" "sch_act_simple s" "is_aligned ptr asid_low_bits" + "asid_wf ptr" "ct_active' s" + + have vc:"s \' UntypedCap False ptr_base pageBits idx" + using cte misc invs' + apply - + apply (case_tac ut_cte) + apply (rule ctes_of_valid_cap') + apply simp + apply fastforce + done + + hence cover: + "range_cover ptr_base pageBits pageBits (Suc 0)" + apply - + apply (rule range_cover_full) + apply (simp add:valid_cap'_def capAligned_def) + apply simp + done + + have exclude: "cref \ mask_range ptr_base pageBits" + apply (rule descendants_range_ex_cte'[where cte = "ut_cte"]) + apply (rule empty_descendants_range_in'[OF desc]) + apply (rule if_unsafe_then_capD'[where P = "\c. c = ut_cte"]) + apply (clarsimp simp: cte_wp_at_ctes_of cte) + apply (simp add:invs' invs_unsafe_then_cap') + apply (simp add:cte invs' add_mask_fold)+ + done + + show "\(=) s\performASIDControlInvocation (asidcontrol_invocation.MakePool ptr_base p cref ptr) + \\reply. no_orphans\" + apply (clarsimp simp: performASIDControlInvocation_def + split: asidcontrol_invocation.splits) + apply (wp hoare_weak_lift_imp | clarsimp)+ + apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) + apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def + is_active_tcb_ptr_def all_queued_tcb_ptrs_def) + apply (wp | clarsimp simp:placeNewObject_def2)+ + apply (wp createObjects'_wp_subst)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') + apply (clarsimp simp:conj_comms) + apply (wp deleteObjects_invs'[where idx = idx and d=False] + hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where idx = idx and d=False] hoare_vcg_const_imp_lift ) + using invs' misc cte exclude no_orphans cover + apply (clarsimp simp: is_active_thread_state_def makeObject_tcb valid_aci'_def + cte_wp_at_ctes_of invs_pspace_aligned' invs_pspace_distinct' + projectKO_opt_tcb isRunning_def isRestart_def conj_comms + invs_valid_pspace' vc objBits_simps range_cover.aligned) + apply (intro conjI) + apply (rule vc) + apply (simp add:descendants_range'_def2) + apply (rule empty_descendants_range_in'[OF desc]) + apply clarsimp + done +qed + +lemma performASIDPoolInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ + performASIDPoolInvocation api + \ \reply s. no_orphans s \" + apply (cases api, simp_all add: performASIDPoolInvocation_def) + apply (wp getObject_inv loadObject_default_inv | clarsimp)+ + done + +lemma arch_performInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_arch_inv' i s \ ct_active' s \ + Arch.performInvocation i + \ \reply s. no_orphans s \" + unfolding AARCH64_H.performInvocation_def performARMMMUInvocation_def + apply (cases i, simp_all add: valid_arch_inv'_def) + apply (wp | clarsimp)+ + done + +lemma setDomain_no_orphans [wp]: + "\no_orphans and valid_queues and valid_queues' and cur_tcb'\ + setDomain tptr newdom + \\_. no_orphans\" + apply (simp add: setDomain_def when_def) + apply (wp tcbSchedEnqueue_almost_no_orphans hoare_vcg_imp_lift threadSet_almost_no_orphans + threadSet_valid_queues'_no_state threadSet_st_tcb_at2 hoare_vcg_disj_lift + threadSet_no_orphans + | clarsimp simp: st_tcb_at_neg2 not_obj_at')+ + apply (auto simp: tcb_at_typ_at' st_tcb_at_neg' is_active_tcb_ptr_runnable' + cur_tcb'_def obj_at'_def + dest: pred_tcb_at') + done + +lemma performInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ valid_invocation' i s \ ct_active' s \ sch_act_simple s \ + performInvocation block call i + \ \reply s. no_orphans s \" + apply (simp add: performInvocation_def + cong: invocation.case_cong) + apply (rule hoare_pre) + apply (wp | wpc | clarsimp)+ + apply auto + done + +lemma getThreadState_restart [wp]: + "\ \s. tcb_at' thread s \ + getThreadState thread + \ \rv s. rv = Structures_H.thread_state.Restart \ st_tcb_at' isRestart thread s \" + apply (rule hoare_strengthen_post) + apply (rule gts_st_tcb') + apply (clarsimp simp add: pred_tcb_at'_def obj_at'_def isRestart_def) + done + +lemma K_bind_hoareE [wp]: + "\P\ f \Q\,\E\ \ \P\ K_bind f x \Q\,\E\" + by simp + +crunch valid_queues' [wp]: replyFromKernel "valid_queues'" + +lemma handleInvocation_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + ct_active' s \ ksSchedulerAction s = ResumeCurrentThread \ + handleInvocation isCall isBlocking + \ \rv s. no_orphans s \" + unfolding handleInvocation_def + apply (rule hoare_pre) + apply (wp syscall_valid' setThreadState_isRestart_no_orphans | wpc | clarsimp)+ + apply (rule_tac Q="\state s. no_orphans s \ invs' s \ + (state = Structures_H.thread_state.Restart \ + st_tcb_at' isRestart thread s)" + in hoare_post_imp) + apply (wp | clarsimp)+ + apply (wp setThreadState_current_no_orphans sts_invs_minor' + ct_in_state'_set setThreadState_st_tcb + hoare_vcg_all_lift + | simp add: split_def split del: if_split)+ + apply (wps setThreadState_ct') + apply (wp sts_ksQ + setThreadState_current_no_orphans sts_invs_minor' + ct_in_state'_set setThreadState_st_tcb + | simp add: split_def split del: if_split)+ + apply (clarsimp simp: if_apply_def2) + apply (frule(1) ct_not_ksQ) + by (auto simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def invs'_def + cur_tcb'_def valid_state'_def valid_idle'_def) + +lemma receiveSignal_no_orphans [wp]: + "\ \s. no_orphans s \ valid_queues' s \ + receiveSignal thread cap isBlocking + \ \rv s. no_orphans s \" + unfolding receiveSignal_def + apply (wp hoare_drop_imps setThreadState_not_active_no_orphans | wpc + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + doNBRecvFailedTransfer_def)+ + done + + +lemma receiveIPC_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + receiveIPC thread cap is_blocking + \ \rv s. no_orphans s \" + unfolding receiveIPC_def + apply (rule hoare_pre) + apply (wp setThreadState_not_active_no_orphans hoare_drop_imps + hoare_vcg_all_lift sts_st_tcb' + | wpc + | clarsimp simp: is_active_thread_state_def isRunning_def isRestart_def + doNBRecvFailedTransfer_def invs_valid_queues' + | strengthen sch_act_wf_weak)+ + done + +crunch valid_objs' [wp]: getThreadCallerSlot "valid_objs'" + +lemma deleteCallerCap_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + deleteCallerCap receiver + \ \rv s. no_orphans s \" + unfolding deleteCallerCap_def + by wpsimp auto + +lemma remove_neg_strg: + "(A \ B) \ ((x \ A) \ (\ x \ B))" + by blast + +lemma handleRecv_no_orphans [wp]: +notes if_cong[cong] shows + "\ \s. no_orphans s \ invs' s \ + handleRecv isBlocking + \ \rv . no_orphans \" + unfolding handleRecv_def + apply (clarsimp simp: whenE_def split del: if_split | wp hoare_drop_imps getNotification_wp | wpc )+ (*takes a while*) + apply (rule_tac Q'="\rv s. no_orphans s \ invs' s" in hoare_post_imp_R) + apply (wp, fastforce) + apply (rule_tac Q="\rv s. no_orphans s \ invs' s" in hoare_post_imp) + apply (wp | clarsimp | fastforce)+ + done + +crunches getThreadCallerSlot, handleHypervisorFault + for invs' [wp]: "invs'" + +lemma handleReply_no_orphans [wp]: + "\no_orphans and invs'\ handleReply \\_. no_orphans\" + unfolding handleReply_def + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpc | clarsimp)+ + apply (wp hoare_vcg_all_lift) + apply (rule_tac Q="\rv s. no_orphans s \ invs' s \ tcb_at' thread s \ + valid_cap' rv s" in hoare_post_imp) + apply (wp hoare_drop_imps | clarsimp simp: valid_cap'_def + | clarsimp simp: invs'_def cur_tcb'_def valid_state'_def)+ + done + +lemma handleYield_no_orphans [wp]: + "\ \s. no_orphans s \ invs' s \ + handleYield + \ \rv . no_orphans \" + unfolding handleYield_def + apply (wp tcbSchedAppend_almost_no_orphans) + apply auto + done + +lemma activatable_from_running': + "ct_running' s \ ct_in_state' activatable' s" + by (clarsimp simp: ct_in_state'_def elim!: pred_tcb'_weakenE) + +(* FIXME move *) +lemma sts_tcb_at'_preserve: + "\ st_tcb_at' P t and K (P st) \ setThreadState st t' \\_. st_tcb_at' P t \" + by (wpsimp wp: sts_st_tcb') + +(* FIXME move *) +(* e.g. if you set a non-runnable thread to Inactive, all runnable threads are still runnable *) +lemma sts_tcb_at'_preserve': + "\ st_tcb_at' P t and st_tcb_at' (\st. \ P st) t' and K (\ P st) \ + setThreadState st t' + \\_. st_tcb_at' P t \" + by (wpsimp wp: sts_st_tcb' simp: st_tcb_at_neg') + +lemma handleEvent_no_orphans [wp]: + "\ \s. invs' s \ + (e \ Interrupt \ ct_running' s) \ + ksSchedulerAction s = ResumeCurrentThread \ no_orphans s \ + handleEvent e + \ \rv s. no_orphans s \" + apply (simp add: handleEvent_def handleSend_def handleCall_def + cong: event.case_cong syscall.case_cong) + apply (rule hoare_pre) + apply (wp hoare_drop_imps | wpc | clarsimp simp: handleHypervisorFault_def + | strengthen invs_valid_queues' invs_valid_objs' invs_sch_act_wf')+ + apply (auto simp: activatable_from_running' active_from_running') + done + +theorem callKernel_no_orphans[wp]: + "\ \s. invs' s \ + (e \ Interrupt \ ct_running' s) \ + ksSchedulerAction s = ResumeCurrentThread \ no_orphans s \ + callKernel e + \ \rv s. no_orphans s \" + unfolding callKernel_def + apply (wpsimp wp: hoare_drop_imp[where f=activateThread] schedule_invs' + (* getActiveIRQ can't return a non-kernel IRQ *) + | wp (once) hoare_post_imp[ + where a="doMachineOp (getActiveIRQ True)" + and Q="\rv s. no_orphans s \ invs' s \ rv \ Some ` non_kernel_IRQs"])+ + done + +end + +end diff --git a/proof/refine/ARM/ArchAcc_R.thy b/proof/refine/ARM/ArchAcc_R.thy index adb9b4eeb9..6308ceaae1 100644 --- a/proof/refine/ARM/ArchAcc_R.thy +++ b/proof/refine/ARM/ArchAcc_R.thy @@ -369,7 +369,7 @@ lemma getObject_ASIDPool_corres': "corres (\p p'. p = inv ASIDPool p' o ucast) (asid_pool_at p) (pspace_aligned' and pspace_distinct') (get_asid_pool p) (getObject p)" - by (corressimp search: getObject_ASIDPool_corres) fastforce + by (corresKsimp search: getObject_ASIDPool_corres) fastforce lemma setObject_asidpool_replies_of'[wp]: "setObject c (asidpool::asidpool) \\s. P' (replies_of' s)\" @@ -394,7 +394,7 @@ lemma setObject_ASIDPool_corres [corres]: corres dc (asid_pool_at p) (asid_pool_at' p') (set_asid_pool p a) (setObject p' a')" apply (simp add: set_asid_pool_def) - apply (corressimp search: setObject_other_corres[where P="\_. True"] + apply (corresKsimp search: setObject_other_corres[where P="\_. True"] wp: get_object_ret get_object_wp) apply (simp add: other_obj_relation_def asid_pool_relation_def) apply (clarsimp simp: obj_at_simps ) @@ -1164,7 +1164,7 @@ lemma lookupPTSlot_corres [@lift_corres_args, corres]: (pspace_aligned' and pspace_distinct') (lookup_pt_slot pd vptr) (lookupPTSlot pd vptr)" unfolding lookup_pt_slot_def lookupPTSlot_def lookupPTSlotFromPT_def - apply (corressimp simp: pde_relation_aligned_def lookup_failure_map_def + apply (corresKsimp simp: pde_relation_aligned_def lookup_failure_map_def ptBits_def pdeBits_def pageBits_def pteBits_def mask_def wp: get_pde_wp_valid getPDE_wp) by (auto simp: lookup_failure_map_def obj_at_def) @@ -1246,7 +1246,7 @@ lemma createMappingEntries_corres [corres]: (create_mapping_entries base vptr pgsz vm_rights attrib pd) (createMappingEntries base' vptr' pgsz' vm_rights' attrib' pd')" unfolding createMappingEntries_def mapping_map_def - by (cases pgsz; corressimp simp: vmattributes_map_def less_kernel_base_mapping_slots + by (cases pgsz; corresKsimp simp: vmattributes_map_def less_kernel_base_mapping_slots largePagePTEOffsets_def largePagePTE_offsets_def superSectionPDEOffsets_def @@ -1283,7 +1283,7 @@ lemma ensureSafeMapping_corres [corres]: unfolding mapping_map_def ensureSafeMapping_def apply (cases m; cases m'; simp; match premises in "(_ \ (=)) p p'" for p p' \ \cases "fst p"; cases "fst p'"\; clarsimp) - by (corressimp corresK: mapME_x_corresK_inv + by (corresKsimp corresK: mapME_x_corresK_inv wp: get_master_pte_wp get_master_pde_wp getPTE_wp getPDE_wp; auto simp add: valid_mapping_entries_def)+ @@ -1310,7 +1310,7 @@ lemma find_pd_for_asid_corres [corres]: (pspace_aligned' and pspace_distinct' and no_0_obj') (find_pd_for_asid asid) (findPDForASID asid')" apply (simp add: find_pd_for_asid_def findPDForASID_def liftME_def bindE_assoc) - apply (corressimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast + apply (corresKsimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def wp: getPDE_wp getASID_wp search: checkPDAt_corres corres_gets_asid) diff --git a/proof/refine/ARM/Arch_R.thy b/proof/refine/ARM/Arch_R.thy index 0368e41886..da78660b4f 100644 --- a/proof/refine/ARM/Arch_R.thy +++ b/proof/refine/ARM/Arch_R.thy @@ -1165,13 +1165,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1768,7 +1768,7 @@ crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - armKSASIDTable_update (\_. (armKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_dom_schedule'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -1843,7 +1843,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_bounded' diff --git a/proof/refine/ARM/CNodeInv_R.thy b/proof/refine/ARM/CNodeInv_R.thy index bf16178987..2b46ef3cca 100644 --- a/proof/refine/ARM/CNodeInv_R.thy +++ b/proof/refine/ARM/CNodeInv_R.thy @@ -4825,7 +4825,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -5685,7 +5685,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5699,7 +5699,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -6251,7 +6251,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) ; clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -7055,17 +7055,17 @@ next apply simp apply ((wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms)+)[1] apply (simp(no_asm_use)) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply (elim conjE, strengthen subst[where P="cap_relation cap" for cap, mk_strg I _ E]) apply simp apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7109,7 +7109,7 @@ next obj_at'_def)[1] apply (wpsimp wp: isFinal[where x="cte_map slot"] simp: is_final_cap_def) - apply (wpsimp wp: isFinalCapability_inv static_imp_wp isFinal + apply (wpsimp wp: isFinalCapability_inv hoare_weak_lift_imp isFinal simp: is_final_cap_def) apply (wpsimp wp: get_cap_wp) apply (wpsimp wp: getCTE_wp') @@ -7252,7 +7252,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -8388,7 +8388,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8563,7 +8563,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/ARM/CSpace1_R.thy b/proof/refine/ARM/CSpace1_R.thy index 0f2ae17085..f877a45a4f 100644 --- a/proof/refine/ARM/CSpace1_R.thy +++ b/proof/refine/ARM/CSpace1_R.thy @@ -310,7 +310,7 @@ lemma getSlotCap_corres: (getSlotCap cte_ptr')" apply (simp add: getSlotCap_def) apply (subst bind_return [symmetric]) - apply (corressimp) + apply (corresKsimp) done lemma maskCapRights [simp]: @@ -585,7 +585,7 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (simp add: Let_def unlessE_whenE) apply (simp add: caps isCap_defs Let_def whenE_bindE_throwError_to_if) apply (subst cnode_cap_case_if) - apply (corressimp search: getSlotCap_corres IH + apply (corresKsimp search: getSlotCap_corres IH wp: get_cap_wp getSlotCap_valid hoare_drop_imps simp: locateSlot_conv stateAssert_def) apply (simp add: drop_postfix_eq) @@ -786,7 +786,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -881,7 +881,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done diff --git a/proof/refine/ARM/CSpace_R.thy b/proof/refine/ARM/CSpace_R.thy index 41653a7393..b39d5fec6e 100644 --- a/proof/refine/ARM/CSpace_R.thy +++ b/proof/refine/ARM/CSpace_R.thy @@ -2237,7 +2237,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (revokable' src_cap cap) (revokable' src_cap cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" diff --git a/proof/refine/ARM/Detype_R.thy b/proof/refine/ARM/Detype_R.thy index 64b5ee4049..10e11e2cba 100644 --- a/proof/refine/ARM/Detype_R.thy +++ b/proof/refine/ARM/Detype_R.thy @@ -128,7 +128,7 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) apply (rule bind_eqI, rule ext)+ apply (simp add: bind_assoc[symmetric]) apply (rule bind_cong[rotated], rule refl) @@ -2846,7 +2846,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::ARM_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp: ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def get_def setObject_def @@ -3107,7 +3107,7 @@ lemma cte_wp_at_modify_pde: notes blah[simp del] = atLeastAtMost_simps shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3244,8 +3244,7 @@ lemma copyGlobalMappings_setCTE_commute: lemma dmo_bounded'[wp]: "doMachineOp f \pspace_bounded'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) - apply clarsimp + apply wpsimp done lemma setCTE_doMachineOp_commute: diff --git a/proof/refine/ARM/Finalise_R.thy b/proof/refine/ARM/Finalise_R.thy index 0ebd7fa4a9..eaf9071fd9 100644 --- a/proof/refine/ARM/Finalise_R.thy +++ b/proof/refine/ARM/Finalise_R.thy @@ -1260,7 +1260,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1443,13 +1443,13 @@ lemma deletedIRQHandler_corres: lemma arch_postCapDeletion_corres: "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (ARM_H.postCapDeletion cap')" - by (corressimp simp: arch_post_cap_deletion_def ARM_H.postCapDeletion_def) + by (corresKsimp simp: arch_post_cap_deletion_def ARM_H.postCapDeletion_def) lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1508,7 +1508,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2880,7 +2880,7 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) @@ -3669,7 +3669,7 @@ crunches schedContextCancelYieldTo, tcbReleaseRemove lemma suspend_cte_wp_at': "suspend t \cte_wp_at' (\cte. P (cteCap cte)) p\" unfolding updateRestartPC_def suspend_def - apply (wpsimp wp: hoare_vcg_imp_lift hoare_disjI2[where Q="\_. cte_wp_at' a b" for a b]) + apply (wpsimp wp: hoare_vcg_imp_lift hoare_disjI2[where R="\_. cte_wp_at' a b" for a b]) done context begin interpretation Arch . (*FIXME: arch_split*) @@ -4125,7 +4125,7 @@ lemma cteDeleteOne_invs[wp]: apply (rule conjI) apply fastforce apply (fastforce dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/ARM/InterruptAcc_R.thy b/proof/refine/ARM/InterruptAcc_R.thy index d39f3dbbc9..017ec2dfd2 100644 --- a/proof/refine/ARM/InterruptAcc_R.thy +++ b/proof/refine/ARM/InterruptAcc_R.thy @@ -117,11 +117,11 @@ lemma updateTimeStamp_corres[corres]: apply (prop_tac "minBudget = MIN_BUDGET") apply (clarsimp simp: minBudget_def MIN_BUDGET_def kernelWCETTicks_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurTime_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[where r'="(=)"]) apply (rule corres_guard_imp) apply (rule corres_machine_op) - apply corressimp + apply corresKsimp apply (wpsimp simp: getCurrentTime_def) apply simp apply simp @@ -207,7 +207,7 @@ lemma scActive_corres: (scActive scPtr)" apply (rule corres_cross[where Q' = "sc_at' scPtr", OF sc_at'_cross_rel]) apply (fastforce simp: obj_at_def is_sc_obj_def valid_obj_def valid_pspace_def sc_at_pred_n_def) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: sc_relation_def get_sc_active_def scActive_def active_sc_def) done @@ -256,7 +256,7 @@ lemma preemptionPoint_corres: supply if_split[split del] apply (simp add: preemption_point_def preemptionPoint_def) apply (rule corres_splitEE_skip - ; corressimp corres: update_work_units_corres + ; corresKsimp corres: update_work_units_corres simp: update_work_units_def) apply (clarsimp simp: bindE_def liftE_def) apply (rule_tac Q'="\rv s. rv = ksWorkUnitsCompleted s \ ?conc s" in corres_symb_exec_r[rotated]) @@ -292,22 +292,22 @@ lemma preemptionPoint_corres: apply (rule corres_split_skip) apply (wpsimp simp: reset_work_units_def) apply (wpsimp simp: setWorkUnits_def) - apply (corressimp corres: setWorkUnits_corres) + apply (corresKsimp corres: setWorkUnits_corres) apply (rule corres_split_skip) apply wpsimp apply wpsimp - apply (corressimp corres: updateTimeStamp_corres) + apply (corresKsimp corres: updateTimeStamp_corres) apply (rule corres_split_skip) apply (wpsimp simp: cur_sc_tcb_def) apply wpsimp - apply (corressimp corres: corres_machine_op) + apply (corresKsimp corres: corres_machine_op) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_underlying_split[rotated 2, OF gets_sp getConsumedTime_sp]) - apply (corressimp corres: getConsumedTime_corres) + apply (corresKsimp corres: getConsumedTime_corres) apply (clarsimp simp: andM_def ifM_def bind_assoc) apply (rule corres_underlying_split[rotated 2, OF get_sc_active_sp scActive_sp]) - apply (corressimp corres: scActive_corres) + apply (corresKsimp corres: scActive_corres) apply (fastforce dest: valid_objs_valid_sched_context_size simp: cur_sc_tcb_def obj_at_def is_sc_obj_def sc_at_pred_n_def) apply (clarsimp split: if_split) @@ -326,7 +326,7 @@ lemma preemptionPoint_corres: active_sc_def sc_valid_refills_def rr_valid_refills_def split: if_splits) apply simp - apply corressimp + apply corresKsimp apply (fastforce intro: corres_returnOkTT) apply (clarsimp split: if_split) apply (clarsimp split: if_split) diff --git a/proof/refine/ARM/Interrupt_R.thy b/proof/refine/ARM/Interrupt_R.thy index 5399b1e76c..7215c24cb1 100644 --- a/proof/refine/ARM/Interrupt_R.thy +++ b/proof/refine/ARM/Interrupt_R.thy @@ -676,7 +676,7 @@ lemma handleInterrupt_corres: apply ((wp | simp)+) apply clarsimp apply fastforce - apply (corressimp corres: corres_machine_op reprogram_timer_corres + apply (corresKsimp corres: corres_machine_op reprogram_timer_corres simp: ackDeadlineIRQ_def) done diff --git a/proof/refine/ARM/InvariantUpdates_H.thy b/proof/refine/ARM/InvariantUpdates_H.thy index 4a381fb7a1..358ba7831b 100644 --- a/proof/refine/ARM/InvariantUpdates_H.thy +++ b/proof/refine/ARM/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] diff --git a/proof/refine/ARM/IpcCancel_R.thy b/proof/refine/ARM/IpcCancel_R.thy index e41df2d79d..5b91d9fbd4 100644 --- a/proof/refine/ARM/IpcCancel_R.thy +++ b/proof/refine/ARM/IpcCancel_R.thy @@ -1276,7 +1276,7 @@ lemma replyPop_corres: loadObject_default_def ARM_H.fromPPtr_def split: if_split_asm option.split_asm dest!: readObject_misc_ko_at') - apply (prop_tac "ksPSpace s'(rp \ + apply (prop_tac "(ksPSpace s')(rp \ KOReply (replyNext_update Map.empty reply)) = ksPSpace s'") apply (rule ext) @@ -2507,7 +2507,7 @@ crunches cancelSignal, cleanReply lemma tcbFault_update_valid_queues: "\ko_at' tcb t s; valid_queues s\ - \ valid_queues (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbFault_update Map.empty tcb))\)" + \ valid_queues (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbFault_update Map.empty tcb))\)" by (fastforce simp: valid_queues_def valid_queues_no_bitmap_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def obj_at'_def projectKOs inQ_def objBitsKO_def) @@ -2580,7 +2580,7 @@ lemma (in delete_one_conc_pre) suspend_nonq: apply (simp add: suspend_def unless_def) unfolding updateRestartPC_def apply (wpsimp wp: hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ hoare_vcg_imp_lift - hoare_disjI2[where Q="\_. valid_queues"]) + hoare_disjI2[where R="\_. valid_queues"]) done lemma suspend_makes_inactive: @@ -2618,7 +2618,7 @@ lemma updateSchedContext_valid_tcbs'[wp]: lemma valid_refills'_tcbQueued_update[simp]: "scp \ t \ valid_refills' scp - (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbQueued_update (\_. True) tcb))\) + (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbQueued_update (\_. True) tcb))\) = valid_refills' scp s" by (clarsimp simp: valid_refills'_def opt_pred_def) @@ -2779,7 +2779,7 @@ lemma cancelAllIPC_corres_helper: st = Structures_A.thread_state.BlockedOnReceive ep r_opt pl") apply (clarsimp simp: when_def split: option.splits) apply (intro conjI impI allI; clarsimp simp: isReceive_def) - apply (corressimp corres: restart_thread_if_no_fault_corres) + apply (corresKsimp corres: restart_thread_if_no_fault_corres) apply (clarsimp simp: pred_tcb_at_def obj_at_def is_tcb valid_sched_def) apply (rule corres_guard_imp) apply (rule corres_split[OF replyUnlinkTcb_corres]) @@ -2796,7 +2796,7 @@ lemma cancelAllIPC_corres_helper: apply (case_tac st; clarsimp simp: isReceive_def) apply (case_tac st ; clarsimp simp: isReceive_def - ; (corressimp corres: restart_thread_if_no_fault_corres + ; (corresKsimp corres: restart_thread_if_no_fault_corres , fastforce simp: obj_at_def)) apply (wpsimp wp: gts_wp) apply (wpsimp wp: gts_wp') diff --git a/proof/refine/ARM/Ipc_R.thy b/proof/refine/ARM/Ipc_R.thy index 1828e88c1e..8eb4e08d2f 100644 --- a/proof/refine/ARM/Ipc_R.thy +++ b/proof/refine/ARM/Ipc_R.thy @@ -312,7 +312,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -356,7 +356,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -577,7 +577,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -620,7 +620,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -653,11 +653,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -1045,7 +1045,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1416,7 +1416,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1550,7 +1550,7 @@ lemma makeFaultMessage_corres: apply (wpsimp simp: sched_context_update_consumed_def setTimeArg_def)+ apply (fastforce dest!: valid_tcb_objs simp: valid_tcb_def valid_bound_obj_def obj_at_def) apply clarsimp - apply (corressimp corres: makeArchFaultMessage_corres) + apply (corresKsimp corres: makeArchFaultMessage_corres) done crunches makeFaultMessage @@ -2739,7 +2739,7 @@ context begin interpretation Arch . (*FIXME: arch_split*) crunches cancel_ipc for cur[wp]: "cur_tcb" and ntfn_at[wp]: "ntfn_at t" - (wp: select_wp crunch_wps simp: crunch_simps ignore: set_object) + (wp: crunch_wps simp: crunch_simps ignore: set_object) lemma valid_sched_weak_strg: "valid_sched s \ weak_valid_sched_action s" @@ -3592,7 +3592,7 @@ lemma possibleSwitchTo_ksQ': possibleSwitchTo t \\_ s. t' \ set (ksReadyQueues s p)\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs inReleaseQueue_def) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp + apply (wp hoare_weak_lift_imp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp | wpc | simp split del: if_split)+ apply (auto simp: obj_at'_def) @@ -4929,7 +4929,7 @@ lemma receiveSignal_corres: and tcb_at' thread and ntfn_at' cap_ntfn_ptr and valid_ntfn' rv' and ko_at' rv' cap_ntfn_ptr" in corres_underlying_split) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: ntfn_at_def2 valid_cap_def st_tcb_at_tcb_at valid_cap'_def) defer apply (wpsimp wp: get_simple_ko_wp) @@ -4959,7 +4959,7 @@ lemma receiveSignal_corres: pred_tcb_at_def obj_at_def is_obj_defs split: if_split_asm option.splits)+ apply (fastforce simp: valid_tcb_state'_def) - apply (corressimp corres: doNBRecvFailedTransfer_corres) + apply (corresKsimp corres: doNBRecvFailedTransfer_corres) apply fastforce \ \WaitingNtfn\ apply (case_tac isBlocking; simp) @@ -4983,7 +4983,7 @@ lemma receiveSignal_corres: pred_tcb_at_def obj_at_def is_obj_defs split: if_split_asm option.splits)+ apply (fastforce simp: valid_tcb_state'_def valid_ntfn'_def) - apply (corressimp corres: doNBRecvFailedTransfer_corres) + apply (corresKsimp corres: doNBRecvFailedTransfer_corres) apply fastforce \ \ActiveNtfn\ apply (rule corres_guard_imp) @@ -5174,7 +5174,7 @@ lemma completeSignal_invs': apply (wpsimp wp: maybeDonateSc_invs') apply (clarsimp simp: obj_at'_def) apply (wpsimp wp: set_ntfn_minor_invs') - apply (wpsimp wp: hoare_vcg_ex_lift static_imp_wp simp: valid_ntfn'_def) + apply (wpsimp wp: hoare_vcg_ex_lift hoare_weak_lift_imp simp: valid_ntfn'_def) apply wpsimp apply clarsimp apply (intro conjI impI) diff --git a/proof/refine/ARM/KHeap_R.thy b/proof/refine/ARM/KHeap_R.thy index 254a02bc65..16d233acfd 100644 --- a/proof/refine/ARM/KHeap_R.thy +++ b/proof/refine/ARM/KHeap_R.thy @@ -968,7 +968,7 @@ lemma max_word_minus_1[simp]: "0xFFFFFFFF + 2^x = (2^x - 1::32 word)" by simp lemma ctes_of'_after_update: - "ko_wp_at' (same_caps' val) p s \ ctes_of (s\ksPSpace := ksPSpace s(p \ val)\) x = ctes_of s x" + "ko_wp_at' (same_caps' val) p s \ ctes_of (s\ksPSpace := (ksPSpace s)(p \ val)\) x = ctes_of s x" apply (clarsimp simp only: ko_wp_at'_def map_to_ctes_def Let_def) apply (rule if_cong) apply (cases val; fastforce split: if_splits) @@ -981,7 +981,7 @@ lemma ctes_of'_after_update: lemma ex_cap_to'_after_update: "\ ex_nonz_cap_to' p s; ko_wp_at' (same_caps' val) p' s \ - \ ex_nonz_cap_to' p (s\ksPSpace := ksPSpace s(p' \ val)\)" + \ ex_nonz_cap_to' p (s\ksPSpace := (ksPSpace s)(p' \ val)\)" unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of using ctes_of'_after_update by fastforce @@ -1247,7 +1247,7 @@ lemma obj_relation_cut_same_type: lemma replyNexts_of_non_reply_update: "\s'. \typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ ReplyT \ - \ replyNexts_of (s'\ksPSpace := ksPSpace s'(ptr \ ko)\) = replyNexts_of s'" + \ replyNexts_of (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = replyNexts_of s'" by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs split: kernel_object.splits) @@ -1261,14 +1261,14 @@ lemma replyNexts_of_replyNext_same_update: "\s'. \typ_at' ReplyT ptr s'; ksPSpace s' ptr = Some ko; koTypeOf (injectKO (ob':: 'a :: pspace_storable)) = ReplyT; projectKO_opt ko = Some ab; replyNext_same (ob':: 'a) ab\ - \ replyNexts_of (s'\ksPSpace := ksPSpace s'(ptr \ injectKO ob')\) = replyNexts_of s'" + \ replyNexts_of (s'\ksPSpace := (ksPSpace s')(ptr \ injectKO ob')\) = replyNexts_of s'" apply (cases "injectKO ob'"; clarsimp simp: typ_at'_def ko_wp_at'_def) by (cases ko; fastforce simp add: replyNext_same_def project_inject projectKO_opts_defs opt_map_def) lemma replyPrevs_of_non_reply_update: "\s'. \typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ ReplyT \ - \ replyPrevs_of (s'\ksPSpace := ksPSpace s'(ptr \ ko)\) = replyPrevs_of s'" + \ replyPrevs_of (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = replyPrevs_of s'" by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs split: kernel_object.splits) @@ -1282,7 +1282,7 @@ lemma replyPrevs_of_replyPrev_same_update: "\s'. \typ_at' ReplyT ptr s'; ksPSpace s' ptr = Some ko; koTypeOf (injectKO (ob':: 'a :: pspace_storable)) = ReplyT; projectKO_opt ko = Some ab; replyPrev_same (ob':: 'a) ab\ - \ replyPrevs_of (s'\ksPSpace := ksPSpace s'(ptr \ injectKO ob')\) = replyPrevs_of s'" + \ replyPrevs_of (s'\ksPSpace := (ksPSpace s')(ptr \ injectKO ob')\) = replyPrevs_of s'" apply (cases "injectKO ob'"; clarsimp simp: typ_at'_def ko_wp_at'_def) by (cases ko; fastforce simp add: replyPrev_same_def project_inject projectKO_opts_defs opt_map_def) @@ -1363,8 +1363,8 @@ lemma setEndpoint_corres [corres]: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres [corres]: @@ -1372,8 +1372,8 @@ lemma setNotification_corres [corres]: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma reply_at'_cross: @@ -3078,21 +3078,21 @@ lemmas valid_globals_cte_wpD'_idleSC = valid_globals_cte_wpD'[OF _ _ idle_sc_is_ lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -3100,7 +3100,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -3208,14 +3208,14 @@ lemma obj_relation_cuts_obj_bits: lemma typ_at'_same_type: assumes "typ_at' T p s" "koTypeOf k = koTypeOf ko" "objBitsKO k = objBitsKO ko" "ksPSpace s p' = Some ko" - shows "typ_at' T p (s\ksPSpace :=ksPSpace s(p' \ k)\)" + shows "typ_at' T p (s\ksPSpace :=(ksPSpace s)(p' \ k)\)" using assms by (clarsimp simp: typ_at'_def ko_wp_at'_def ps_clear_upd) lemma cte_at'_same_type: "\cte_wp_at' \ t s; koTypeOf k = koTypeOf ko;objBitsKO k = objBitsKO ko; ksPSpace s p = Some ko\ - \ cte_wp_at' \ t (s\ksPSpace := ksPSpace s(p \ k)\)" + \ cte_wp_at' \ t (s\ksPSpace := (ksPSpace s)(p \ k)\)" apply (simp add: cte_at_typ' typ_at'_same_type) apply (elim exE disjE) apply (rule disjI1, clarsimp simp: typ_at'_same_type) @@ -3224,7 +3224,7 @@ lemma cte_at'_same_type: lemma valid_ep'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOEndpoint obj) \ - \ valid_ep' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_ep' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (erule (1) valid_objsE') apply (fastforce simp: valid_objs'_def valid_obj'_def obj_at'_def projectKOs valid_ep'_def split: endpoint.splits) @@ -3232,7 +3232,7 @@ lemma valid_ep'_ep_update: lemma valid_cap'_ep_update: "\ valid_cap' cap s; valid_objs' s; valid_ep' ep s; ep_at' epPtr s \ - \ valid_cap' cap (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_cap' cap (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" supply ps_clear_upd[simp] apply (clarsimp simp: typ_at'_same_type ko_wp_at'_def cte_at'_same_type valid_cap'_def obj_at'_def projectKOs objBits_simps @@ -3257,7 +3257,7 @@ lemma valid_cap'_ep_update: lemma valid_cap'_reply_update: "\ valid_cap' cap s; valid_objs' s; valid_reply' reply s; reply_at' rptr s \ - \ valid_cap' cap (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_cap' cap (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" supply ps_clear_upd[simp] apply (clarsimp simp: typ_at'_same_type ko_wp_at'_def cte_at'_same_type valid_cap'_def obj_at'_def projectKOs objBits_simps @@ -3279,7 +3279,7 @@ lemma valid_cap'_reply_update: lemma valid_tcb_state'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb_state' (tcbState obj) (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_tcb_state' (tcbState obj) (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_tcb_state'_def valid_bound_obj'_def valid_tcb'_def obj_at'_def @@ -3287,7 +3287,7 @@ lemma valid_tcb_state'_ep_update: lemma valid_tcb_state'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb_state' (tcbState obj) (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_tcb_state' (tcbState obj) (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_bound_obj'_def valid_tcb'_def valid_tcb_state'_def obj_at'_def @@ -3295,7 +3295,7 @@ lemma valid_tcb_state'_reply_update: lemma valid_tcb'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_tcb' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_bound_obj'_def valid_tcb'_def obj_at'_def valid_tcb_state'_ep_update @@ -3304,7 +3304,7 @@ lemma valid_tcb'_ep_update: lemma valid_arch_obj'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOArch obj) \ - \ valid_arch_obj' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_arch_obj' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) apply (cases obj; clarsimp simp: valid_arch_obj'_def valid_obj'_def obj_at'_def projectKOs split: arch_kernel_object.splits) @@ -3315,7 +3315,7 @@ lemma valid_arch_obj'_ep_update: lemma valid_arch_obj'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s; ksPSpace s x = Some (KOArch obj) \ - \ valid_arch_obj' obj (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_arch_obj' obj (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" supply ps_clear_upd[simp] apply (rule valid_objsE', simp, simp) apply (cases obj; clarsimp simp: valid_arch_obj'_def valid_obj'_def obj_at'_def projectKOs @@ -3329,7 +3329,7 @@ end lemma valid_obj'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some obj\ - \ valid_obj' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_obj' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (cases obj; clarsimp simp: typ_at'_same_type valid_obj'_def obj_at'_def ps_clear_upd @@ -3341,7 +3341,7 @@ lemma valid_obj'_ep_update: lemma valid_obj'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s; ksPSpace s x = Some obj \ - \ valid_obj' obj (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_obj' obj (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (rule valid_objsE', simp, simp) apply (cases obj; clarsimp simp: valid_obj'_def) apply (fastforce simp: valid_ep'_def obj_at'_def projectKOs split: endpoint.split) @@ -3361,7 +3361,7 @@ lemma valid_obj'_reply_update: lemma valid_objs'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s \ - \ valid_objs' (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_objs' (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (clarsimp simp: valid_objs'_def obj_at'_def projectKOs) apply (erule ranE) apply (clarsimp simp: ps_clear_upd split: if_split_asm) @@ -3373,7 +3373,7 @@ lemma valid_objs'_ep_update: lemma valid_objs'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s \ - \ valid_objs' (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_objs' (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (clarsimp simp: valid_objs'_def obj_at'_def projectKOs) apply (erule ranE) apply (clarsimp split: if_split_asm) @@ -3387,14 +3387,14 @@ lemma valid_release_queue_ksPSpace_update: "\valid_release_queue s; ko_wp_at' (\ko'. koTypeOf ko' = koTypeOf ko \ objBitsKO ko' = objBitsKO ko) ptr s; koTypeOf ko \ TCBT\ \ - valid_release_queue (s\ksPSpace := ksPSpace s(ptr \ ko)\)" + valid_release_queue (s\ksPSpace := (ksPSpace s)(ptr \ ko)\)" by (fastforce simp: valid_release_queue_def ko_wp_at'_def obj_at'_def projectKOs ps_clear_upd) lemma valid_release_queue'_ksPSpace_update: "\valid_release_queue' s; ko_wp_at' (\ko'. koTypeOf ko' = koTypeOf ko \ objBitsKO ko' = objBitsKO ko) ptr s; koTypeOf ko \ TCBT\ \ - valid_release_queue' (s\ksPSpace := ksPSpace s(ptr \ ko)\)" + valid_release_queue' (s\ksPSpace := (ksPSpace s)(ptr \ ko)\)" by (fastforce simp: valid_release_queue'_def ko_wp_at'_def obj_at'_def projectKOs ps_clear_upd) lemma sym_ref_Receive_or_Reply_replyTCB': @@ -4352,7 +4352,7 @@ lemma state_relation_sc_update: | _ \ hp' ptr else hp' p) (ksPSpace s)) = map_to_ctes (ksPSpace s)" by (clarsimp simp: obj_at_simps fun_upd_def[symmetric]) have z: "\s sc'::sched_context. ko_at' sc' ptr s - \ map_to_ctes (ksPSpace s(ptr \ KOSchedContext (f' sc'))) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(ptr \ KOSchedContext (f' sc'))) = map_to_ctes (ksPSpace s)" by (clarsimp simp: obj_at_simps) assume H: "(s, s') \ state_relation" "P s" "P' s'" "sc_at ptr s" "sc_at' ptr s'" show ?thesis @@ -4420,7 +4420,7 @@ qed (* update wp rules without ko_at' *) lemma updateSchedContext_wp: "\ \s. sc_at' sc_ptr s \ - Q (s\ksPSpace := ksPSpace s(sc_ptr \ KOSchedContext (f' (the (scs_of' s sc_ptr))))\) \ + Q (s\ksPSpace := (ksPSpace s)(sc_ptr \ KOSchedContext (f' (the (scs_of' s sc_ptr))))\) \ updateSchedContext sc_ptr f' \ \rv. Q \" by (wpsimp simp: updateSchedContext_def wp: set_sc'.set_wp) @@ -4548,26 +4548,26 @@ lemma getSchedContext_setSchedContext_decompose: apply simp+ apply (rename_tac s'; erule disjE; clarsimp?) - apply (drule_tac Q2="\s'. s' = (s\ksPSpace := ksPSpace s(scPtr \ injectKO (f sc))\)" + apply (drule_tac Q2="\s'. s' = (s\ksPSpace := (ksPSpace s)(scPtr \ injectKO (f sc))\)" in use_valid[OF _ setObject_sc_wp]) apply simp+ - apply (prop_tac "sc_at' scPtr (s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\)") + apply (prop_tac "sc_at' scPtr (s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\)") apply (clarsimp simp: obj_at'_def projectKOs objBits_simps' ps_clear_upd) - apply (frule_tac s="s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\" + apply (frule_tac s="s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\" in no_failD[OF no_fail_getMiscObject(4)]) apply clarsimp - apply (rename_tac s') - apply (drule_tac Q2="\s'. s' = (s\ksPSpace := ksPSpace s(scPtr \ injectKO (f sc))\)" - in use_valid[OF _ setObject_sc_wp]) - apply simp+ + apply (rename_tac s') + apply (drule_tac Q2="\s'. s' = (s\ksPSpace := (ksPSpace s)(scPtr \ injectKO (f sc))\)" + in use_valid[OF _ setObject_sc_wp]) + apply simp+ apply (frule sc_inv_state_eq, simp) apply (drule use_valid[OF _ get_sc_ko'], simp) apply (clarsimp simp: obj_at'_def projectKOs) apply (prop_tac "obj_at' (\k. objBits k = objBits (g (f sc))) scPtr - (s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\)") + (s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\)") apply (clarsimp simp: obj_at'_def projectKOs projectKO_opt_sc) apply (rule_tac x="f sc" in exI, clarsimp simp: projectKO_opt_sc) apply (drule_tac ob1="g (f sc)" in no_failD[OF no_fail_setObject_other, rotated]) @@ -4649,7 +4649,7 @@ lemma updateSchedContext_corres_gen: apply (rule conjI, clarsimp) apply (erule use_valid[OF _ updateSchedContext_wp]) apply clarsimp - apply (rule_tac x="((), s\kheap := kheap s(ptr \ + apply (rule_tac x="((), s\kheap := (kheap s)(ptr \ kernel_object.SchedContext (f sc) n)\)" in bexI) apply clarsimp apply (drule state_relation_sc_update[OF R1 R2 sz, simplified]) diff --git a/proof/refine/ARM/LevityCatch.thy b/proof/refine/ARM/LevityCatch.thy index fa448ba053..72c64a908d 100644 --- a/proof/refine/ARM/LevityCatch.thy +++ b/proof/refine/ARM/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) diff --git a/proof/refine/ARM/PageTableDuplicates.thy b/proof/refine/ARM/PageTableDuplicates.thy index fac997e00c..b2767ba49d 100644 --- a/proof/refine/ARM/PageTableDuplicates.thy +++ b/proof/refine/ARM/PageTableDuplicates.thy @@ -1644,7 +1644,7 @@ lemma unmapPage_valid_duplicates'[wp]: in mapM_x_storePDE_update_helper[where sz = 6]) apply wp+ apply (clarsimp simp:conj_comms) - apply (wp checkMappingPPtr_inv static_imp_wp)+ + apply (wp checkMappingPPtr_inv hoare_weak_lift_imp)+ apply (clarsimp simp:conj_comms) apply (rule hoare_post_imp_R[where Q'= "\r. pspace_aligned' and (\s. vs_valid_duplicates' (ksPSpace s)) and @@ -2003,7 +2003,7 @@ lemma performArchInvocation_valid_duplicates': apply (clarsimp simp:cte_wp_at_ctes_of) apply (case_tac ctea,clarsimp) apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) - apply (wp static_imp_wp|simp)+ + apply (wp hoare_weak_lift_imp|simp)+ apply (simp add:placeNewObject_def) apply (wp |simp add:alignError_def unless_def|wpc)+ apply (wp updateFreeIndex_pspace_no_overlap' hoare_drop_imp diff --git a/proof/refine/ARM/Refine.thy b/proof/refine/ARM/Refine.thy index 2bbe604319..1ab774ec92 100644 --- a/proof/refine/ARM/Refine.thy +++ b/proof/refine/ARM/Refine.thy @@ -234,8 +234,8 @@ lemma kernel_entry_invs_det_ext: kernel_entry e us \\_ s :: det_state. invs s \ (ct_running s \ ct_idle s)\" apply (simp add: kernel_entry_def) - apply (wp akernel_invs_det_ext thread_set_invs_trivial thread_set_ct_in_state select_wp - static_imp_wp hoare_vcg_disj_lift hoare_vcg_imp_lift' + apply (wp akernel_invs_det_ext thread_set_invs_trivial thread_set_ct_in_state + hoare_weak_lift_imp hoare_vcg_disj_lift hoare_vcg_imp_lift' | clarsimp simp add: tcb_cap_cases_def)+ done @@ -250,7 +250,7 @@ lemma kernel_entry_valid_sched: \\_. valid_sched :: det_state \ _\" apply (simp add: kernel_entry_def) apply (wp call_kernel_valid_sched thread_set_invs_trivial thread_set_ct_in_state - static_imp_wp hoare_vcg_disj_lift thread_set_not_state_valid_sched + hoare_weak_lift_imp hoare_vcg_disj_lift thread_set_not_state_valid_sched | clarsimp simp add: tcb_cap_cases_def)+ done @@ -284,7 +284,7 @@ lemma kernel_entry_invs: apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state - static_imp_wp call_kernel_cur_sc_offset_ready_and_sufficient + hoare_weak_lift_imp call_kernel_cur_sc_offset_ready_and_sufficient | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (wpsimp wp: kernel_entry_valid_sched) @@ -297,20 +297,20 @@ lemma kernel_entry_invs: apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift - ct_in_state_thread_state_lift thread_set_no_change_tcb_state static_imp_wp + ct_in_state_thread_state_lift thread_set_no_change_tcb_state hoare_weak_lift_imp call_kernel_schact_is_rct[unfolded schact_is_rct_def] | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift - ct_in_state_thread_state_lift thread_set_no_change_tcb_state static_imp_wp + ct_in_state_thread_state_lift thread_set_no_change_tcb_state hoare_weak_lift_imp call_kernel_cur_sc_active | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state - static_imp_wp call_kernel_ct_not_in_release_q + hoare_weak_lift_imp call_kernel_ct_not_in_release_q | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) @@ -342,7 +342,6 @@ crunches do_user_op, check_active_irq and cur_sc_offset_ready[wp]: "\s. cur_sc_offset_ready (consumed_time s) s" and cur_sc_offset_sufficient[wp]: "\s. cur_sc_offset_sufficient (consumed_time s) s" and consumed_time_bounded[wp]: consumed_time_bounded - (wp: select_wp) lemma device_update_valid_machine_time[wp]: "do_machine_op (device_memory_update ds) \valid_machine_time\" @@ -359,7 +358,7 @@ lemma user_memory_update_valid_machine_time[wp]: lemma do_user_op_valid_machine_time[wp]: "do_user_op f tc \valid_machine_time\" apply (simp add: do_user_op_def) - apply (wpsimp wp: select_wp) + apply wpsimp done lemma check_active_irq_valid_machine_time[wp]: @@ -567,13 +566,13 @@ lemma kernelEntry_invs': apply (fastforce simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def) apply (simp add: kernelEntry_def) apply (wpsimp wp: ckernel_invs callKernel_valid_duplicates' threadSet_invs_trivial - threadSet_ct_in_state' static_imp_wp hoare_vcg_disj_lift threadSet_sym_heap_tcbSCs + threadSet_ct_in_state' hoare_weak_lift_imp hoare_vcg_disj_lift threadSet_sym_heap_tcbSCs | wps)+ apply (rule hoare_vcg_conj_lift) apply (wpsimp wp: threadSet_wp) apply (wpsimp wp: threadSet_invs_trivial; simp?) - apply (wpsimp wp: threadSet_ct_running' static_imp_wp)+ - apply (fastforce simp: obj_at'_def projectKOs pred_map_def opt_map_red) + apply (wpsimp wp: threadSet_ct_running' hoare_weak_lift_imp)+ + apply (fastforce simp: obj_at'_def projectKOs pred_map_def opt_map_red) done lemma absKState_correct': @@ -631,7 +630,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -645,7 +644,7 @@ lemma doUserOp_valid_duplicates': doUserOp f tc \\_ s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: doUserOp_def split_def) - apply (wp dmo_invs' select_wp) + apply (wp dmo_invs') apply clarsimp done @@ -1026,7 +1025,7 @@ lemma entry_corres: apply wpsimp apply (wp thread_set_invs_trivial threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state | (wps, wp threadSet_st_tcb_at2) )+ diff --git a/proof/refine/ARM/Reply_R.thy b/proof/refine/ARM/Reply_R.thy index a9418f69cd..b9809f0600 100644 --- a/proof/refine/ARM/Reply_R.thy +++ b/proof/refine/ARM/Reply_R.thy @@ -881,7 +881,7 @@ lemma updateReply_obj_at'_inv: "\x. P (f x) = P x \ updateReply rPtr f \\s. Q (obj_at' (P :: reply \ bool) rp s)\" apply (wpsimp wp: updateReply_wp_all) - apply (subgoal_tac "obj_at' P rp s = (obj_at' P rp (s\ksPSpace := ksPSpace s(rPtr \ KOReply (f ko))\))") + apply (subgoal_tac "obj_at' P rp s = (obj_at' P rp (s\ksPSpace := (ksPSpace s)(rPtr \ KOReply (f ko))\))") apply simp by (force simp: obj_at'_real_def ko_wp_at'_def objBitsKO_def ps_clear_def projectKO_reply) @@ -1063,7 +1063,7 @@ end lemma replyPrevs_of_replyNext_update: "ko_at' reply' rp s' \ - replyPrevs_of (s'\ksPSpace := ksPSpace s'(rp \ + replyPrevs_of (s'\ksPSpace := (ksPSpace s')(rp \ KOReply (reply' \ replyNext := v \))\) = replyPrevs_of s'" apply (clarsimp simp: obj_at'_def projectKOs isNext_def split: option.split_asm reply_next.split_asm) @@ -1071,7 +1071,7 @@ lemma replyPrevs_of_replyNext_update: lemma scs_of'_reply_update: "reply_at' rp s' \ - scs_of' (s'\ksPSpace := ksPSpace s'(rp \ KOReply reply)\) = scs_of' s'" + scs_of' (s'\ksPSpace := (ksPSpace s')(rp \ KOReply reply)\) = scs_of' s'" apply (clarsimp simp: obj_at'_def projectKOs isNext_def split: option.split_asm reply_next.split_asm) by (fastforce simp: projectKO_opt_sc opt_map_def) diff --git a/proof/refine/ARM/Retype_R.thy b/proof/refine/ARM/Retype_R.thy index d685e6fdbe..994360c7d2 100644 --- a/proof/refine/ARM/Retype_R.thy +++ b/proof/refine/ARM/Retype_R.thy @@ -2627,7 +2627,6 @@ lemmas object_splits = declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -4760,7 +4759,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed diff --git a/proof/refine/ARM/SchedContextInv_R.thy b/proof/refine/ARM/SchedContextInv_R.thy index 9925e28092..dfe2ac2211 100644 --- a/proof/refine/ARM/SchedContextInv_R.thy +++ b/proof/refine/ARM/SchedContextInv_R.thy @@ -174,25 +174,25 @@ lemma decodeSchedcontext_Bind_corres: apply (rename_tac cap list) apply (cases excaps'; clarsimp) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (case_tac cap; clarsimp) apply (clarsimp simp: bindE_assoc) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]; (solves wpsimp)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def ntfn_relation_def valid_cap_def valid_cap'_def wp: hoare_vcg_all_lift) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def sc_relation_def) apply (clarsimp simp: returnOk_def) apply (clarsimp simp: bindE_assoc get_tcb_obj_ref_def) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) apply (rule corres_guard_imp) @@ -203,7 +203,7 @@ lemma decodeSchedcontext_Bind_corres: apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) apply (rule corres_splitEE_skip; (solves \wpsimp simp: valid_cap'_def obj_at'_def\)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def sc_relation_def) apply (rule corres_guard_imp) apply (rule corres_split_eqrE) @@ -237,25 +237,25 @@ lemma decodeSchedContext_UnbindObject_corres: apply (case_tac cap; clarsimp) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) - apply corressimp + apply corresKsimp done lemma decodeSchedContext_YieldTo_corres: @@ -268,19 +268,19 @@ lemma decodeSchedContext_YieldTo_corres: apply (clarsimp simp: decode_sched_context_yield_to_def decodeSchedContext_YieldTo_def) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (clarsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) @@ -302,7 +302,7 @@ lemma decodeSchedContext_YieldTo_corres: apply (fastforce simp: cur_tcb'_def) apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) apply (rule corres_guard_imp) @@ -312,7 +312,7 @@ lemma decodeSchedContext_YieldTo_corres: apply fastforce apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) done lemma decode_sc_inv_corres: @@ -330,20 +330,20 @@ lemma decode_sc_inv_corres: ; clarsimp split: gen_invocation_labels.split list.splits split del: if_split) apply (clarsimp simp: returnOk_def) - apply (corressimp corres: decodeSchedcontext_Bind_corres) + apply (corresKsimp corres: decodeSchedcontext_Bind_corres) defer - apply (corressimp corres: decodeSchedContext_UnbindObject_corres) - apply (corressimp corres: decodeSchedContext_YieldTo_corres) + apply (corresKsimp corres: decodeSchedContext_UnbindObject_corres) + apply (corresKsimp corres: decodeSchedContext_YieldTo_corres) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) apply (clarsimp simp: sc_relation_def) done @@ -359,12 +359,12 @@ lemma decode_sc_ctrl_inv_corres: apply (rename_tac cap list) apply (cases excaps'; clarsimp) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply corressimp + apply corresKsimp apply (rule corres_splitEE_forwards') - apply corressimp + apply corresKsimp apply (case_tac cap; clarsimp simp: isSchedContextCap_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ - apply corressimp + apply corresKsimp apply (auto simp: minBudgetUs_def MIN_BUDGET_US_def maxPeriodUs_def parse_time_arg_def parseTimeArg_def usToTicks_def minRefills_def MIN_REFILLS_def max_num_refills_eq_refillAbsoluteMax' refillAbsoluteMax_def max_refills_cap_def @@ -1296,7 +1296,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (wpsimp wp: setSchedContext_invs') apply (fastforce dest!: sc_ko_at_valid_objs_valid_sc') - apply (corressimp corres: updateSchedContext_corres) + apply (corresKsimp corres: updateSchedContext_corres) apply (intro conjI impI allI) apply (rename_tac abs_state conc_state n') apply (frule_tac ptr=sc_ptr and s=abs_state in state_relation_sc_relation; simp?) @@ -1328,7 +1328,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (wpsimp wp: setSchedContext_invs') apply (fastforce dest!: sc_ko_at_valid_objs_valid_sc') - apply (corressimp corres: updateSchedContext_corres) + apply (corresKsimp corres: updateSchedContext_corres) apply (intro conjI impI allI) apply (rename_tac abs_state conc_state n') apply (frule_tac ptr=sc_ptr and s=abs_state in state_relation_sc_relation; simp?) @@ -1381,7 +1381,7 @@ lemma invokeSchedControlConfigureFlags_corres: in corres_underlying_split) apply (clarsimp simp: when_def split del: if_split) - apply (rule corres_if_split; (solves \corressimp simp: sc_relation_def\)?) + apply (rule corres_if_split; (solves \corresKsimp simp: sc_relation_def\)?) apply (rule corres_symb_exec_l[rotated]) apply (wpsimp wp: exs_valid_assert_opt) apply (rule assert_opt_sp) @@ -1500,7 +1500,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (rule corres_symb_exec_l[rotated 2, OF assert_opt_sp]; (solves wpsimp)?) apply (rule corres_underlying_split[rotated 2, OF gts_sp isRunnable_sp]) - apply (corressimp corres: isRunnable_corres') + apply (corresKsimp corres: isRunnable_corres') apply (fastforce simp: sc_relation_def sc_at_pred_n_def obj_at_def intro!: tcb_at_cross Some_to_the) @@ -1601,7 +1601,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (clarsimp simp: sc_relation_def) apply (rule corres_underlying_split[rotated 2, OF gts_sp isRunnable_sp]) - apply (corressimp corres: isRunnable_corres') + apply (corresKsimp corres: isRunnable_corres') apply (fastforce simp: sc_relation_def sc_at_pred_n_def obj_at_def intro!: tcb_at_cross Some_to_the) diff --git a/proof/refine/ARM/SchedContext_R.thy b/proof/refine/ARM/SchedContext_R.thy index ed64750511..7de61adb07 100644 --- a/proof/refine/ARM/SchedContext_R.thy +++ b/proof/refine/ARM/SchedContext_R.thy @@ -172,7 +172,7 @@ lemma schedContextUpdateConsumed_corres: apply (clarsimp simp: sched_context_update_consumed_def schedContextUpdateConsumed_def) apply (simp add: maxTicksToUs_def ticksToUs_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac abs_sc conc_sc) apply (rule corres_if_split) apply (clarsimp simp: sc_relation_def) @@ -842,7 +842,7 @@ lemma get_sc_released_corres: (get_sc_released sc_ptr) (scReleased sc_ptr)" apply (simp add: get_sc_released_def scReleased_def scActive_def refillReady_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc') apply (rule corres_symb_exec_l[rotated 2, OF gets_sp]; (solves wpsimp)?) apply (rule corres_symb_exec_r[rotated, OF gets_the_sp]; (solves wpsimp)?) diff --git a/proof/refine/ARM/Schedule_R.thy b/proof/refine/ARM/Schedule_R.thy index 3815bdf64a..13332dbb1c 100644 --- a/proof/refine/ARM/Schedule_R.thy +++ b/proof/refine/ARM/Schedule_R.thy @@ -14,7 +14,7 @@ crunches scReleased, getReprogramTimer, getCurTime, getRefills, getReleaseQueue, context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] @@ -137,7 +137,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -584,7 +584,7 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (unfold ct_idle_or_in_cur_domain'_def) apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) - apply (wp static_imp_wp hoare_vcg_disj_lift | assumption)+ + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift | assumption)+ done lemma tcbSchedEnqueue_invs'[wp]: @@ -894,7 +894,7 @@ lemma arch_switchToIdleThread_corres: Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def ARM_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args]) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args]) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb obj_at'_def) done @@ -1416,7 +1416,7 @@ lemma switchToIdleThread_invs': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -3336,7 +3336,7 @@ lemma tcbReleaseDequeue_corres: apply (rename_tac rq) apply (simp add: bind_assoc) apply (rule corres_underlying_split[rotated 2, OF gets_sp getReleaseQueue_sp]) - apply (corressimp corres: getReleaseQueue_corres) + apply (corresKsimp corres: getReleaseQueue_corres) apply clarsimp apply (rename_tac rq') @@ -3424,13 +3424,13 @@ lemma awakenBody_corres: apply (wpsimp simp: tcb_release_dequeue_def) apply (force simp: valid_release_q_def vs_all_heap_simps obj_at_def is_tcb_def) apply wpsimp - apply (corressimp corres: tcbReleaseDequeue_corres) + apply (corresKsimp corres: tcbReleaseDequeue_corres) apply (rule corres_symb_exec_r[OF _ isRunnable_sp, rotated]; (solves wpsimp)?) apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) apply wpsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def projectKOs objBitsKO_def) apply (case_tac "tcbState tcb'"; clarsimp) - apply (corressimp corres: possibleSwitchTo_corres) + apply (corresKsimp corres: possibleSwitchTo_corres) done lemma tcbReleaseDequeue_no_fail: @@ -3655,7 +3655,7 @@ lemma isRoundRobin_corres: "corres (=) (sc_at sc_ptr) (sc_at' sc_ptr) (is_round_robin sc_ptr) (isRoundRobin sc_ptr)" apply (clarsimp simp: is_round_robin_def isRoundRobin_def) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: sc_relation_def) done @@ -3756,7 +3756,7 @@ lemma refillHeadOverlapping_corres_eq: by linarith+ lemma refillPopHead_scs_of'[wp]: - "\\s'. P (scs_of' s'(scp \ (\sc'. scRefillCount_update (\_. scRefillCount sc' - Suc 0) + "\\s'. P ((scs_of' s')(scp \ (\sc'. scRefillCount_update (\_. scRefillCount sc' - Suc 0) (scRefillHead_update (\_. refillNextIndex (scRefillHead sc') sc') sc')) (the (scs_of' s' scp))))\ @@ -3771,7 +3771,7 @@ crunches update_refill_hd, refill_pop_head, merge_refills, schedule_used, handle simp: crunch_simps update_refill_hd_rewrite update_sched_context_set_refills_rewrite) lemma merge_refills_scs_of2[wp]: - "\\s. P (scs_of2 s(scp \ (\sc. sc_refills_update + "\\s. P ((scs_of2 s)(scp \ (\sc. sc_refills_update (\_. merge_refill (refill_hd sc) (hd (tl (sc_refills sc))) # tl (tl (sc_refills sc))) sc) (the (scs_of2 s scp)))) \ merge_refills scp @@ -4048,15 +4048,15 @@ lemma maybeAddEmptyTail_corres: apply (fastforce dest!: sc_at'_cross[OF state_relation_pspace_relation]) apply (clarsimp simp: maybe_add_empty_tail_def maybeAddEmptyTail_def get_refills_def) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) + apply (corresKsimp corres: isRoundRobin_corres) apply (clarsimp simp: obj_at_def is_sc_obj) apply (clarsimp simp: when_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: valid_objs_valid_sched_context_size simp: obj_at_def is_sc_obj_def) apply (rename_tac sc') - apply (corressimp corres: refillAddTail_corres) + apply (corresKsimp corres: refillAddTail_corres) apply (frule refill_hd_relation; clarsimp simp: obj_at'_def projectKOs opt_map_red opt_pred_def) apply (fastforce dest: valid_objs_valid_sched_context_size simp: obj_at_def is_sc_obj_def refill_map_def) @@ -4090,7 +4090,7 @@ lemma refillBudgetCheckRoundRobin_corres: apply (subst is_active_sc_rewrite) apply (clarsimp simp: refill_budget_check_round_robin_def refillBudgetCheckRoundRobin_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule_tac Q="\s. is_active_sc' (ksCurSc s) s" in corres_cross_add_guard) apply (rule_tac ptr="ksCurSc s'" in is_active_sc'_cross[OF state_relation_pspace_relation]; simp) apply clarsimp @@ -4210,7 +4210,7 @@ lemma nonOverlappingMergeRefills_corres: and Q'="\_. valid_refills' scPtr and sc_at' scPtr" in corres_underlying_split ; (solves wpsimp)?) - apply (corressimp corres: refillPopHead_corres + apply (corresKsimp corres: refillPopHead_corres simp: obj_at_def vs_all_heap_simps pred_map_simps sc_at_ppred_def) apply (subst update_refill_hd_comp) apply (rule corres_guard_imp) @@ -4349,15 +4349,13 @@ lemma headInsufficientLoop_corres: apply (rule_tac Q="active_sc_at' scPtr" in corres_cross_add_guard) apply (fastforce dest: active_sc_at'_cross) apply (rule corres_whileLoop_abs; simp?) - apply (frule head_insufficient_equiv[where scPtr=scPtr]; simp?) - apply (fastforce intro: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills) - apply (corressimp corres: nonOverlappingMergeRefills_corres) - apply (fastforce dest: head_insufficient_length_at_least_two) - apply (wpsimp wp: non_overlapping_merge_refills_no_fail) - apply (fastforce intro!: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills sc_atD1 - simp: obj_at_def) - apply (wpsimp wp: non_overlapping_merge_refills_refills_unat_sum_lower_bound - non_overlapping_merge_refills_refills_unat_sum) + apply (frule head_insufficient_equiv[where scPtr=scPtr]; simp?) + apply (fastforce intro: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills) + apply (corresKsimp corres: nonOverlappingMergeRefills_corres) + apply (fastforce dest: head_insufficient_length_at_least_two) + apply (wpsimp wp: non_overlapping_merge_refills_no_fail) + apply (wpsimp wp: non_overlapping_merge_refills_refills_unat_sum_lower_bound + non_overlapping_merge_refills_refills_unat_sum) apply (fastforce dest: head_insufficient_length_greater_than_one) apply (wpsimp wp: nonOverlappingMergeRefills_valid_objs') apply (fastforce intro!: non_overlapping_merge_refills_terminates) @@ -4385,8 +4383,8 @@ lemma refillFull_corres: apply (fastforce intro: sc_at_cross) apply (clarsimp simp: refill_full_def refillFull_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) - apply (corressimp corres: corres_return_eq_same) + apply (corresKsimp corres: get_sc_corres) + apply (corresKsimp corres: corres_return_eq_same) apply (fastforce simp: sc_relation_def obj_at_simps valid_refills'_def opt_map_red opt_pred_def) done @@ -4402,7 +4400,7 @@ lemma scheduleUsed_corres: apply (rule_tac Q="is_active_sc' scPtr" in corres_cross_add_guard) apply (fastforce intro: is_active_sc'_cross) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc sc') apply (rule corres_symb_exec_r[rotated, OF assert_sp]; (solves wpsimp)?) apply wpsimp @@ -4413,21 +4411,21 @@ lemma scheduleUsed_corres: apply (fastforce dest: length_sc_refills_cross[where P="\l. 0 = l"] simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; (solves simp)?) - apply (corressimp corres: refillAddTail_corres simp: refill_map_def) + apply (corresKsimp corres: refillAddTail_corres simp: refill_map_def) apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule_tac F="sc_valid_refills' sc'" in corres_req) apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; (solves simp)?) apply (fastforce dest: refills_tl_equal simp: refill_map_def can_merge_refill_def) - apply (corressimp corres: updateRefillTl_corres + apply (corresKsimp corres: updateRefillTl_corres simp: refill_map_def) apply (rule corres_underlying_split[rotated 2, OF refill_full_sp refillFull_sp]) - apply (corressimp corres: refillFull_corres) + apply (corresKsimp corres: refillFull_corres) apply (rule corres_if_split; (solves simp)?) - apply (corressimp corres: refillAddTail_corres) + apply (corresKsimp corres: refillAddTail_corres) apply (clarsimp simp: refill_map_def obj_at_simps opt_map_red opt_pred_def) - apply (corressimp corres: updateRefillTl_corres simp: refill_map_def) + apply (corresKsimp corres: updateRefillTl_corres simp: refill_map_def) done lemma head_time_buffer_simp: @@ -4502,18 +4500,18 @@ lemma handleOverrunLoopBody_corres: apply (fastforce intro: is_active_sc'_cross simp: state_relation_def) apply (clarsimp simp: handle_overrun_loop_body_def handleOverrunLoopBody_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_underlying_split[rotated 2, OF refill_single_sp refillSingle_sp]) - apply (corressimp corres: refillSingle_corres) + apply (corresKsimp corres: refillSingle_corres) apply (fastforce simp: obj_at_simps valid_refills'_def opt_map_red opt_pred_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc sc') apply (rule_tac Q="\_ s. sc_refills sc \ []" and Q'="\_ _. sc_valid_refills' sc'" and r'=dc in corres_underlying_split[rotated]) - apply corressimp + apply corresKsimp apply (fastforce dest: refill_hd_relation simp: refill_map_def) apply (wpsimp simp: update_refill_hd_def wp: update_sched_context_wp) @@ -4521,7 +4519,7 @@ lemma handleOverrunLoopBody_corres: apply wpsimp apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; simp?) - apply (corressimp corres: updateRefillHd_corres) + apply (corresKsimp corres: updateRefillHd_corres) apply (fastforce simp: refill_map_def sc_relation_def) apply (rule_tac F="1 < scRefillCount sc'" in corres_req) apply (frule_tac scp="scPtr" and P="\l. 1 < l" in length_sc_refills_cross) @@ -4691,13 +4689,11 @@ lemma handleOverrunLoop_corres: apply (fastforce intro: is_active_sc'_cross simp: state_relation_def) apply (clarsimp simp: handle_overrun_loop_def handleOverrunLoop_def runReaderT_def) apply (rule corres_whileLoop_abs; simp?) - apply (frule_tac usage=r' in head_time_buffer_equiv; simp?) - apply fastforce - apply (corressimp corres: handleOverrunLoopBody_corres) - apply (wpsimp wp: handle_overrun_loop_body_no_fail) - apply (clarsimp simp: vs_all_heap_simps) + apply (frule_tac usage=r' in head_time_buffer_equiv; simp?) + apply fastforce + apply (corresKsimp corres: handleOverrunLoopBody_corres) apply (wps_conj_solves wp: handle_overrun_loop_body_non_zero_refills - handle_overrun_loop_body_refills_unat_sum_equals_budget) + handle_overrun_loop_body_refills_unat_sum_equals_budget) apply wps_conj_solves apply (fastforce intro: handle_overrun_loop_body_terminates) done @@ -4736,13 +4732,13 @@ lemma refillBudgetCheck_corres: apply (clarsimp simp: refill_budget_check_def refillBudgetCheck_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_symb_exec_r[rotated, OF scActive_sp]; (solves \wpsimp simp: scActive_def\)?) apply (rule corres_symb_exec_r[rotated, OF assert_sp]; (solves wpsimp)?) apply (wpsimp wp: no_fail_assert simp: is_active_sc'_def opt_map_red opt_pred_def obj_at_simps) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) + apply (corresKsimp corres: isRoundRobin_corres) apply (rule corres_symb_exec_l[rotated, OF _ assert_sp]; (solves wpsimp)?) apply (rule_tac F="\roundRobin" in corres_req) apply clarsimp @@ -4763,7 +4759,7 @@ lemma refillBudgetCheck_corres: (sc_refill_cfgs_of s) sc_ptr" and Q'="\_ s'. valid_refills' scPtr s' \ active_sc_at' scPtr s' \ scPtr = ksCurSc s'" in corres_underlying_split) - apply (corressimp corres: handleOverrunLoop_corres) + apply (corresKsimp corres: handleOverrunLoop_corres) apply (fastforce intro: valid_refills_refills_unat_sum_equals_budget simp: vs_all_heap_simps cfg_valid_refills_def round_robin_def sp_valid_refills_def is_active_sc_rewrite[symmetric]) @@ -4786,7 +4782,7 @@ lemma refillBudgetCheck_corres: apply (clarsimp simp: get_refills_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: state_relation_def active_sc_at'_def obj_at_simps) apply (rename_tac sc sc') apply (rule_tac Q="\_ s. ?P s @@ -4799,7 +4795,7 @@ lemma refillBudgetCheck_corres: and Q'="\_ s'. valid_refills' scPtr s' \ active_sc_at' scPtr s' \ scPtr = ksCurSc s'" and r'=dc in corres_underlying_split[rotated]) - apply (corressimp corres: headInsufficientLoop_corres) + apply (corresKsimp corres: headInsufficientLoop_corres) apply (fastforce simp: vs_all_heap_simps word_le_nat_alt) apply (intro hoare_vcg_conj_lift_pre_fix; (solves wpsimp)?) apply schedule_used_simple @@ -4901,10 +4897,10 @@ lemma commitTime_corres: apply (fastforce intro: sc_at_cross simp: state_relation_def) apply (clarsimp simp: commit_time_def commitTime_def liftM_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply clarsimp apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rule corres_symb_exec_r[rotated, OF getIdleSC_sp]) apply wpsimp apply (wpsimp simp: getIdleSC_def) @@ -4920,7 +4916,7 @@ lemma commitTime_corres: apply (rule corres_if_split; fastforce?) apply (fastforce simp: sc_relation_def active_sc_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getConsumedTime_sp]) - apply corressimp + apply corresKsimp apply clarsimp apply (rename_tac consumed) apply (rule_tac Q="\_ s. sc_at (cur_sc s) s \ csc = cur_sc s" @@ -4951,8 +4947,8 @@ lemma commitTime_corres: apply (wpsimp simp: isRoundRobin_def | wps)+ apply (clarsimp simp: ifM_def split: if_split) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) - apply (corressimp corres: refillBudgetCheckRoundRobin_corres refillBudgetCheck_corres) + apply (corresKsimp corres: isRoundRobin_corres) + apply (corresKsimp corres: refillBudgetCheckRoundRobin_corres refillBudgetCheck_corres) apply (fastforce simp: obj_at_def vs_all_heap_simps is_sc_obj_def obj_at_simps sc_relation_def is_active_sc'_def opt_map_red opt_pred_def active_sc_def) done @@ -4971,10 +4967,10 @@ lemma switchSchedContext_corres: apply add_cur_tcb' apply (clarsimp simp: switch_sched_context_def switchSchedContext_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (clarsimp, rename_tac curScPtr) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply corressimp + apply corresKsimp apply (clarsimp, rename_tac ct) apply (rule corres_underlying_split[rotated 2, OF gsc_sp threadGet_sp, where r'="(=)"]) apply (rule corres_guard_imp) @@ -5005,19 +5001,19 @@ lemma switchSchedContext_corres: (solves wpsimp)?) apply (clarsimp simp: when_def) apply (rule corres_split_skip; (solves \wpsimp wp: hoare_vcg_ex_lift\)?) - apply (corressimp corres: setReprogramTimer_corres) - apply (corressimp corres: ifCondRefillUnblockCheck_corres) + apply (corresKsimp corres: setReprogramTimer_corres) + apply (corresKsimp corres: ifCondRefillUnblockCheck_corres) apply (fastforce intro: valid_objs'_valid_refills' sc_at_cross is_active_sc'2_cross valid_sched_context_size_objsI simp: obj_at_def pred_tcb_at_def vs_all_heap_simps is_sc_obj_def opt_map_red opt_pred_def) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: getReprogramTimer_corres) + apply (corresKsimp corres: getReprogramTimer_corres) apply (rule_tac Q="\\" and Q'="\\" and r'=dc in corres_underlying_split; (solves wpsimp)?) - apply (corressimp corres: commitTime_corres) + apply (corresKsimp corres: commitTime_corres) apply (fastforce intro!: valid_objs'_valid_refills' sc_at_cross simp: state_relation_def) - apply (corressimp corres: setCurSc_corres) + apply (corresKsimp corres: setCurSc_corres) apply (wpsimp wp: hoare_vcg_imp_lift' | wps)+ apply (fastforce intro: valid_sched_context_size_objsI active_sc_valid_refillsE simp: obj_at_def is_sc_obj_def) @@ -5114,26 +5110,26 @@ lemma schedule_corres: apply (wpsimp wp: awaken_valid_sched hoare_vcg_imp_lift') apply fastforce apply (wpsimp wp: awaken_invs') - apply (corressimp corres: awaken_corres) + apply (corresKsimp corres: awaken_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def) apply (rule corres_split_skip) apply (wpsimp wp: hoare_vcg_imp_lift' cur_sc_active_lift) apply wpsimp - apply (corressimp corres: checkDomainTime_corres) + apply (corresKsimp corres: checkDomainTime_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[rotated 2, OF is_schedulable_sp' isSchedulable_sp]) - apply (corressimp corres: isSchedulable_corres) + apply (corresKsimp corres: isSchedulable_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def state_relation_def cur_tcb_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getSchedulerAction_sp]) - apply (corressimp corres: getSchedulerAction_corres) + apply (corresKsimp corres: getSchedulerAction_corres) apply (case_tac "action = resume_cur_thread"; clarsimp) - apply (corressimp corres: scAndTimer_corres) + apply (corresKsimp corres: scAndTimer_corres) subgoal by (fastforce intro: valid_sched_context_size_objsI dest: schact_is_rct_ct_active_sc simp: invs_def cur_sc_tcb_def sc_at_pred_n_def obj_at_def is_sc_obj_def @@ -5201,12 +5197,12 @@ lemma schedule_corres: apply (clarsimp simp: invs'_def isSchedulable_bool_def st_tcb_at'_def pred_map_simps obj_at_simps vs_all_heap_simps cur_tcb'_def elim!: opt_mapE) - apply (corressimp corres: tcbSchedEnqueue_corres) + apply (corresKsimp corres: tcbSchedEnqueue_corres) apply (fastforce dest: invs_cur simp: cur_tcb_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getIdleThread_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[rotated 2, OF thread_get_sp threadGet_sp, where r'="(=)"]) apply (rule corres_guard_imp) apply (rule threadGet_corres) @@ -5241,15 +5237,15 @@ lemma schedule_corres: apply (rule corres_underlying_split[rotated 2, OF schedule_switch_thread_fastfail_inv scheduleSwitchThreadFastfail_inv]) - apply (corressimp corres: scheduleSwitchThreadFastfail_corres) + apply (corresKsimp corres: scheduleSwitchThreadFastfail_corres) apply (fastforce dest: invs_cur simp: cur_tcb_def obj_at_def is_tcb_def state_relation_def cur_tcb'_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp curDomain_sp]) - apply (corressimp corres: curDomain_corres) + apply (corresKsimp corres: curDomain_corres) apply (clarsimp simp: isHighestPrio_def' split del: if_split) apply (rule corres_underlying_split[rotated 2, OF gets_sp gets_sp, where r'="(=)"]) - apply (corressimp corres: isHighestPrio_corres) + apply (corresKsimp corres: isHighestPrio_corres) apply (clarsimp simp: is_highest_prio_def) apply (subst bitmapL1_zero_ksReadyQueues) apply (fastforce dest: invs_queues simp: valid_queues_def) diff --git a/proof/refine/ARM/Syscall_R.thy b/proof/refine/ARM/Syscall_R.thy index 3e1e3f0a75..5884ef1301 100644 --- a/proof/refine/ARM/Syscall_R.thy +++ b/proof/refine/ARM/Syscall_R.thy @@ -339,7 +339,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps set_tcb'.ksSchedulerAction) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -674,7 +674,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -1429,7 +1429,7 @@ lemma hinv_invs'[wp]: apply (rule validE_valid) apply (intro hoare_vcg_seqE[OF _ stateAssertE_sp]) apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ @@ -1728,7 +1728,6 @@ lemma hw_invs'[wp]: apply (rename_tac readright; case_tac readright; (wp getNotification_wp |simp)+) apply (clarsimp simp: obj_at_simps isNotificationCap_def) by (wpsimp simp: lookupReply_def getCapReg_def - wp: hoare_vcg_conj_liftE | wp (once) hoare_drop_imps)+ (clarsimp simp: obj_at_simps ct_in_state'_def pred_tcb_at'_def) @@ -2001,7 +2000,7 @@ lemma chargeBudget_corres: apply (fastforce intro: cur_sc_tcb_sc_at_cur_sc) apply add_cur_tcb' apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_symb_exec_r[rotated, OF getIdleSC_sp]; wpsimp simp: getIdleSC_def) apply (rule_tac F="idle_sc_ptr = idleSCPtr" in corres_req) apply (clarsimp simp: state_relation_def) @@ -2009,7 +2008,7 @@ lemma chargeBudget_corres: and Q'="\_. invs' and cur_tcb'" in corres_underlying_split) apply (clarsimp simp: when_def split del: if_split) - apply (rule corres_if_split; (solves corressimp)?) + apply (rule corres_if_split; (solves corresKsimp)?) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF isRoundRobin_corres]) apply (rule corres_split[OF corres_if2], simp) diff --git a/proof/refine/ARM/TcbAcc_R.thy b/proof/refine/ARM/TcbAcc_R.thy index 21e8f8333f..3732700e63 100644 --- a/proof/refine/ARM/TcbAcc_R.thy +++ b/proof/refine/ARM/TcbAcc_R.thy @@ -11,7 +11,6 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] declare storeWordUser_typ_at' [wp] @@ -54,7 +53,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -391,7 +390,7 @@ proof - assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def assert_def a_type_def[split_simps kernel_object.split arch_kernel_obj.split] dest!: get_tcb_SomeD) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) apply (simp add: map_upd_triv get_tcb_SomeD) done show ?thesis @@ -2240,13 +2239,13 @@ lemma rescheduleRequired_corres_weak: apply (rule corres_underlying_split[OF _ _ gets_sp, rotated 2]) apply (clarsimp simp: getSchedulerAction_def) apply (rule gets_sp) - apply (corressimp corres: getSchedulerAction_corres) + apply (corresKsimp corres: getSchedulerAction_corres) apply (rule corres_underlying_split[where r'=dc, rotated]; (solves \wpsimp\)?) - apply (corressimp corres: setSchedulerAction_corres) + apply (corresKsimp corres: setSchedulerAction_corres) apply (case_tac action; clarsimp?) apply (rename_tac tp) apply (rule corres_underlying_split[OF _ _ is_schedulable_sp isSchedulable_inv, rotated 2]) - apply (corressimp corres: isSchedulable_corres) + apply (corresKsimp corres: isSchedulable_corres) apply (clarsimp simp: weaker_valid_sched_action_def obj_at_def vs_all_heap_simps is_tcb_def) apply (clarsimp simp: when_def) @@ -2289,7 +2288,7 @@ lemma rescheduleRequired_corres_weak: obj_at_def pred_tcb_at_def weaker_valid_sched_action_def) apply (clarsimp simp: no_fail_def return_def vs_all_heap_simps obj_at_def pred_tcb_at_def weaker_valid_sched_action_def) - apply (corressimp corres: tcbSchedEnqueue_corres + apply (corresKsimp corres: tcbSchedEnqueue_corres simp: obj_at_def is_tcb_def weak_sch_act_wf_def) done @@ -2995,9 +2994,9 @@ lemma threadSet_queued_sch_act_wf[wp]: split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply wps - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -4970,7 +4969,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4990,7 +4989,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) diff --git a/proof/refine/ARM/Tcb_R.thy b/proof/refine/ARM/Tcb_R.thy index ffeac48eb6..5ce32e125b 100644 --- a/proof/refine/ARM/Tcb_R.thy +++ b/proof/refine/ARM/Tcb_R.thy @@ -350,20 +350,20 @@ lemma invokeTCB_WriteRegisters_corres: frameRegisters_def gpRegisters_def getSanitiseRegisterInfo_def sanitiseRegister_def sanitise_register_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: asUser_corres + apply (corresKsimp corres: asUser_corres simp: zipWithM_mapM getRestartPC_def setNextPC_def wp: no_fail_mapM no_fail_setRegister) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: asUser_postModifyRegisters_corres[simplified]) + apply (corresKsimp corres: asUser_postModifyRegisters_corres[simplified]) apply (rule_tac Q="\_. einvs" and Q'="\_. invs'" in corres_underlying_split[rotated 2]) apply (wpsimp wp: restart_valid_sched) using idle_no_ex_cap apply fastforce apply (wpsimp wp: restart_invs') using global'_no_ex_cap apply fastforce - apply (corressimp corres: restart_corres) - apply (corressimp corres: rescheduleRequired_corres) + apply (corresKsimp corres: restart_corres) + apply (corresKsimp corres: rescheduleRequired_corres) apply fastforce done @@ -463,7 +463,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[2] + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp)+)[2] apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -473,22 +473,22 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply (solves \wp static_imp_wp\)+ + apply (solves \wp hoare_weak_lift_imp\)+ apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def valid_pspace_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs'_def valid_pspace'_def) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[4] - apply ((wp static_imp_wp restart_invs' restart_valid_sched | wpc | clarsimp simp: if_apply_def2)+)[2] + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp)+)[4] + apply ((wp hoare_weak_lift_imp restart_invs' restart_valid_sched | wpc | clarsimp simp: if_apply_def2)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and tcb_at src and ex_nonz_cap_to dest and simple_sched_action and current_time_bounded" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def valid_pspace_def valid_idle_def dest!: idle_no_ex_cap ) - apply (wp suspend_nonz_cap_to_tcb static_imp_wp suspend_invs suspend_cap_to' + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp suspend_invs suspend_cap_to' suspend_valid_sched | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_idle_def @@ -1483,7 +1483,6 @@ lemma valid_tcb_ipc_buffer_update: \ (\tcb. valid_tcb' tcb s \ valid_tcb' (tcbIPCBuffer_update (\_. buf) tcb) s)" by (simp add: valid_tcb'_def tcb_cte_cases_def) - end consts @@ -2789,7 +2788,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: diff --git a/proof/refine/ARM/Untyped_R.thy b/proof/refine/ARM/Untyped_R.thy index 20a46874de..4d2cf337d2 100644 --- a/proof/refine/ARM/Untyped_R.thy +++ b/proof/refine/ARM/Untyped_R.thy @@ -3203,7 +3203,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -4035,12 +4035,13 @@ lemma idx_le_new_offs: end -crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) -crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) -crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" - (simp: crunch_simps wp: hoare_drop_imps unless_wp ignore: freeMemory) +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches deleteObjects + for ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and irq_node[wp]: "\s. P (irq_node' s)" + (simp: crunch_simps wp: hoare_drop_imps unless_wp) lemma deleteObjects_ksCurThread[wp]: "\\s. P (ksCurThread s)\ deleteObjects ptr sz \\_ s. P (ksCurThread s)\" @@ -4625,6 +4626,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) diff --git a/proof/refine/ARM/VSpace_R.thy b/proof/refine/ARM/VSpace_R.thy index 7cbf046489..2df629db18 100644 --- a/proof/refine/ARM/VSpace_R.thy +++ b/proof/refine/ARM/VSpace_R.thy @@ -457,13 +457,11 @@ lemma getHWASID_corres: apply (simp add: get_hw_asid_def getHWASID_def) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF loadHWASID_corres[where pd=pd]]) - apply (case_tac maybe_hw_asid; simp) + apply (corres_cases; simp) apply (rule corres_split_eqr[OF findFreeHWASID_corres]) apply (rule corres_split[OF storeHWASID_corres[where pd=pd]]) - apply (rule corres_trivial, simp ) - apply (wpsimp wp: load_hw_asid_wp)+ - apply (simp add: pd_at_asid_uniq) - apply simp + apply (rule corres_trivial, simp) + apply (wpsimp wp: load_hw_asid_wp hoare_drop_imp simp: pd_at_asid_uniq)+ done lemma setCurrentPD_to_abs: @@ -497,9 +495,8 @@ lemma handleVMFault_corres: "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) (handle_vm_fault thread fault) (handleVMFault thread fault)" apply (simp add: ARM_H.handleVMFault_def) - apply (cases fault) - apply simp - apply (rule corres_guard_imp) + apply corres_cases + apply simp apply (rule corres_splitEE) apply simp apply (rule corres_machine_op [where r="(=)"]) @@ -511,9 +508,7 @@ lemma handleVMFault_corres: apply (rule corres_Id, rule refl, simp) apply (rule no_fail_getDFSR) apply (rule corres_trivial, simp add: arch_fault_map_def) - apply wp+ - apply simp+ - apply (rule corres_guard_imp) + apply wpsimp+ apply (rule corres_splitEE) apply simp apply (rule asUser_corres') @@ -526,8 +521,7 @@ lemma handleVMFault_corres: apply (rule corres_Id, rule refl, simp) apply (rule no_fail_getIFSR) apply (rule corres_trivial, simp add: arch_fault_map_def) - apply wp+ - apply simp+ + apply wpsimp+ done lemma flushSpace_corres: @@ -544,18 +538,15 @@ lemma flushSpace_corres: apply (rule corres_guard_imp) apply (rule corres_split) apply (rule loadHWASID_corres[where pd=pd]) - apply (rule corres_split[where R="\_. \" and R'="\_. \"]) + apply (rule corres_split) apply (rule corres_machine_op [where r=dc]) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_cleanCaches_PoU) - apply (case_tac maybe_hw_asid) - apply simp - apply clarsimp + apply (corres_cases; clarsimp) apply (rule corres_machine_op) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_invalidateLocalTLB_ASID) - apply wp+ - apply clarsimp + apply wpsimp+ apply (simp add: pd_at_asid_uniq) apply simp done @@ -572,16 +563,13 @@ lemma invalidateTLBByASID_corres: (invalidate_tlb_by_asid asid) (invalidateTLBByASID asid)" apply (simp add: invalidate_tlb_by_asid_def invalidateTLBByASID_def) apply (rule corres_guard_imp) - apply (rule corres_split[where R="\_. \" and R'="\_. \"]) + apply (rule corres_split) apply (rule loadHWASID_corres[where pd=pd]) - apply (case_tac maybe_hw_asid) - apply simp - apply clarsimp + apply (corres_cases; clarsimp) apply (rule corres_machine_op) apply (rule corres_Id, rule refl, simp) apply (rule no_fail_invalidateLocalTLB_ASID) - apply wp+ - apply clarsimp + apply wpsimp+ apply (simp add: pd_at_asid_uniq) apply simp done @@ -834,8 +822,7 @@ lemma deleteASID_corres: apply (simp add: delete_asid_def deleteASID_def) apply (rule corres_guard_imp) apply (rule corres_split[OF corres_gets_asid]) - apply (case_tac "asid_table (asid_high_bits_of asid)", simp) - apply clarsimp + apply (corres_cases; clarsimp) apply (rule_tac P="\s. asid_high_bits_of asid \ dom (asidTable o ucast) \ asid_pool_at (the ((asidTable o ucast) (asid_high_bits_of asid))) s" and P'="pspace_aligned' and pspace_distinct'" and @@ -1067,10 +1054,10 @@ proof - and valid_arch_state and pspace_aligned and pspace_distinct) (pspace_aligned' and pspace_distinct' and no_0_obj') - (do arm_context_switch pd asid; + (do _ \ arm_context_switch pd asid; return True od) - (do armv_contextSwitch pd asid; + (do _ \ armv_contextSwitch pd asid; return True od)" apply (rule corres_guard_imp) @@ -1079,34 +1066,20 @@ proof - apply (wp | simp)+ done show ?thesis - apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getCurThread_corres]) - apply (rule corres_split[where R="\_. vspace_at_asid asid pd and K (asid \ 0 \ asid \ mask asid_bits) - and valid_asid_map and valid_vs_lookup - and valid_vspace_objs and valid_global_objs - and unique_table_refs o caps_of_state - and valid_arch_state - and pspace_aligned and pspace_distinct" - and R'="\_. pspace_aligned' and pspace_distinct' and no_0_obj'"]) - apply (rule getSlotCap_corres) - apply (simp add: cte_map_def tcb_cnode_index_def tcbVTableSlot_def to_bl_1) - apply (case_tac "isArchObjectCap rv' \ - isPageDirectoryCap (capCap rv') \ - capPDMappedASID (capCap rv') \ None \ - capPDBasePtr (capCap rv') = pd") - apply (case_tac rv, simp_all add: isCap_simps)[1] - apply (rename_tac arch_cap) - apply (case_tac arch_cap, auto)[1] - apply (case_tac rv, simp_all add: isCap_simps[simplified] X[simplified])[1] - apply (rename_tac arch_cap) - apply (case_tac arch_cap, auto simp: X[simplified] split: option.splits)[1] - apply wp+ - apply (clarsimp simp: cur_tcb_def) - apply (erule tcb_at_cte_at) - apply (simp add: tcb_cap_cases_def) - apply clarsimp - done + apply (simp add: set_vm_root_for_flush_def setVMRootForFlush_def getThreadVSpaceRoot_def locateSlot_conv) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres]) + apply (rule corres_split) + apply (rule getSlotCap_corres) + apply (simp add: cte_map_def tcb_cnode_index_def tcbVTableSlot_def to_bl_1) + apply (corres_cases; (simp add: isCap_simps, rule X)?)+ + apply (clarsimp simp: isCap_simps, rule X) + apply (wpsimp wp: hoare_drop_imp hoare_vcg_all_lift)+ + apply (clarsimp simp: cur_tcb_def) + apply (erule tcb_at_cte_at) + apply (simp add: tcb_cap_cases_def) + apply clarsimp + done qed crunches armv_contextSwitch, setVMRoot, setVMRootForFlush @@ -1372,13 +1345,11 @@ lemma pageTableMapped_corres: apply simp apply (simp add: liftE_bindE) apply (rule corres_split[OF getObject_PDE_corres']) - apply (rule corres_trivial) - apply (case_tac rv, - simp_all add: returnOk_def pde_relation_aligned_def - split:if_splits ARM_H.pde.splits)[1] - apply (wp | simp add: lookup_pd_slot_def Let_def)+ - apply (simp add: word_neq_0_conv) - apply simp + apply (corres_cases_both simp: pde_relation_aligned_def; + fastforce intro!: corres_returnOkTT + simp: pde_relation_aligned_def + split: if_split_asm) + apply (wpsimp simp: lookup_pd_slot_def word_neq_0_conv)+ done crunch inv[wp]: pageTableMapped "P" @@ -1643,17 +1614,11 @@ lemma doFlush_corres: "corres_underlying Id nf nf' dc \ \ (do_flush typ start end pstart) (doFlush (flush_type_map typ) start end pstart)" apply (simp add: do_flush_def doFlush_def) - apply (cases "typ", simp_all add: flush_type_map_def) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_cleanCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_invalidateCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (wp no_fail_cleanInvalidateCacheRange_RAM) - apply (rule corres_Id [where r=dc], rule refl, simp) - apply (rule no_fail_pre, wp add: no_fail_cleanCacheRange_PoU no_fail_invalidateCacheRange_I - no_fail_dsb no_fail_isb del: no_irq) - apply clarsimp + apply corres_pre + apply (corres_cases_both simp: flush_type_map_def; + (rule corres_Id[OF refl], simp, wpsimp wp: no_fail_dsb no_fail_isb)) + apply simp + apply simp done definition @@ -1669,25 +1634,22 @@ lemma performPageDirectoryInvocation_corres: and cur_tcb' and valid_arch_state') (perform_page_directory_invocation pdi) (performPageDirectoryInvocation pdi')" apply (simp add: perform_page_directory_invocation_def performPageDirectoryInvocation_def) - apply (cases pdi) - apply (clarsimp simp: page_directory_invocation_map_def) - apply (rule corres_guard_imp) - apply (rule corres_when, simp) - apply (rule corres_split[OF setVMRootForFlush_corres]) - apply (rule corres_split[OF corres_machine_op]) - apply (rule doFlush_corres) - apply (rule corres_when, simp) - apply (rule corres_split[OF getCurThread_corres]) - apply clarsimp - apply (rule setVMRoot_corres) - apply wp+ - apply (simp add: cur_tcb_def[symmetric]) - apply (wp hoare_drop_imps) - apply (simp add: cur_tcb'_def[symmetric]) - apply (wp hoare_drop_imps)+ - apply clarsimp - apply (auto simp: valid_pdi_def invs_vspace_objs[simplified])[2] - apply (clarsimp simp: page_directory_invocation_map_def) + apply (corres_cases_both simp: page_directory_invocation_map_def) + apply (clarsimp simp: page_directory_invocation_map_def) + apply (rule corres_when, simp) + apply (rule corres_split[OF setVMRootForFlush_corres]) + apply (rule corres_split[OF corres_machine_op]) + apply (rule doFlush_corres) + apply (rule corres_when, simp) + apply (rule corres_split[OF getCurThread_corres]) + apply clarsimp + apply (rule setVMRoot_corres) + apply wp+ + apply (simp flip: cur_tcb_def) + apply (wp hoare_drop_imps) + apply (simp flip: cur_tcb'_def) + apply (wp hoare_drop_imps)+ + apply (auto simp: valid_pdi_def invs_vspace_objs[simplified] page_directory_invocation_map_def) done definition @@ -2001,7 +1963,7 @@ lemma duplicate_address_set_simp: lemma valid_duplicates'_non_pd_pt_I: "\koTypeOf ko \ ArchT PDET; koTypeOf ko \ ArchT PTET; vs_valid_duplicates' (ksPSpace s) ; ksPSpace s p = Some ko; koTypeOf ko = koTypeOf m\ - \ vs_valid_duplicates' (ksPSpace s(p \ m))" + \ vs_valid_duplicates' ((ksPSpace s)(p \ m))" apply (subst vs_valid_duplicates'_def) apply (intro allI impI) apply (clarsimp split:if_splits simp:duplicate_address_set_simp option.splits) diff --git a/proof/refine/ARM/orphanage/Orphanage.thy b/proof/refine/ARM/orphanage/Orphanage.thy index cc44e43ff2..5bcc600448 100644 --- a/proof/refine/ARM/orphanage/Orphanage.thy +++ b/proof/refine/ARM/orphanage/Orphanage.thy @@ -458,7 +458,7 @@ lemma rescheduleRequired_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding rescheduleRequired_def apply (wp tcbSchedEnqueue_no_orphans hoare_vcg_all_lift ssa_no_orphans | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) apply (rename_tac word t p) apply (rule_tac P="word = t" in hoare_gen_asm) apply (wp hoare_disjI1 | clarsimp)+ @@ -470,7 +470,7 @@ lemma rescheduleRequired_almost_no_orphans [wp]: \ \rv s. almost_no_orphans tcb_ptr s \" unfolding rescheduleRequired_def apply (wp tcbSchedEnqueue_almost_no_orphans_lift hoare_vcg_all_lift | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) apply (rename_tac word t p) apply (rule_tac P="word = t" in hoare_gen_asm) apply (wp hoare_disjI1 | clarsimp)+ @@ -1048,7 +1048,7 @@ proof - apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ (t \ candidate \ sch_act_not t s)" in hoare_post_imp) - apply (wpsimp wp: stt_nosch static_imp_wp)+ + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) done @@ -1178,7 +1178,7 @@ lemma possibleSwitchTo_almost_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding possibleSwitchTo_def by (wp rescheduleRequired_valid_queues'_weak tcbSchedEnqueue_almost_no_orphans - ssa_almost_no_orphans static_imp_wp + ssa_almost_no_orphans hoare_weak_lift_imp | wpc | clarsimp | wp (once) hoare_drop_imp)+ @@ -1953,7 +1953,7 @@ lemma writereg_no_orphans: unfolding invokeTCB_def performTransfer_def postModifyRegisters_def apply simp apply (rule hoare_pre) - by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' static_imp_wp + by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp | strengthen invs_valid_queues' | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ @@ -1965,10 +1965,10 @@ lemma copyreg_no_orphans: unfolding invokeTCB_def performTransfer_def postModifyRegisters_def supply if_weak_cong[cong] apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wp hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans | wpc | clarsimp split del: if_splits)+ - apply (wp static_imp_wp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb | strengthen invs_valid_queues' | wpc | simp add: if_apply_def2)+ apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -1980,7 +1980,7 @@ lemma settlsbase_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans)+ done @@ -2046,19 +2046,19 @@ lemma tc_no_orphans: apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[2] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[3] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] apply ((simp only: simp_thms cong: conj_cong | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift threadSet_no_orphans - hoare_vcg_const_imp_lift_R static_imp_wp hoare_drop_imp threadSet_ipcbuffer_invs + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs | strengthen invs_valid_queues' | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, @@ -2136,7 +2136,7 @@ lemma performPageInvocation_no_orphans [wp]: apply (simp add: performPageInvocation_def cong: page_invocation.case_cong) apply (rule hoare_pre) - apply (wp mapM_x_wp' mapM_wp' static_imp_wp | wpc | clarsimp simp: pdeCheckIfMapped_def pteCheckIfMapped_def)+ + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp simp: pdeCheckIfMapped_def pteCheckIfMapped_def)+ done lemma performASIDControlInvocation_no_orphans [wp]: @@ -2189,13 +2189,13 @@ lemma performASIDControlInvocation_no_orphans [wp]: \\reply. no_orphans\" apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) - apply (wp static_imp_wp | clarsimp)+ + apply (wp hoare_weak_lift_imp | clarsimp)+ apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (wp | clarsimp simp:placeNewObject_def2)+ apply (wp createObjects'_wp_subst)+ - apply (wp static_imp_wp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') apply (clarsimp simp:conj_comms) apply (wp deleteObjects_invs'[where idx = idx and d=False] diff --git a/proof/refine/ARM_HYP/ArchAcc_R.thy b/proof/refine/ARM_HYP/ArchAcc_R.thy index 196e03386f..81edd38ee9 100644 --- a/proof/refine/ARM_HYP/ArchAcc_R.thy +++ b/proof/refine/ARM_HYP/ArchAcc_R.thy @@ -230,7 +230,7 @@ lemma setObject_ASIDPool_corres [corres]: corres dc (asid_pool_at p and valid_etcbs) (asid_pool_at' p) (set_asid_pool p a) (setObject p a')" apply (simp add: set_asid_pool_def) - apply (corressimp search: setObject_other_corres[where P="\_. True"] + apply (corresKsimp search: setObject_other_corres[where P="\_. True"] wp: get_object_ret get_object_wp) apply (simp add: other_obj_relation_def asid_pool_relation_def) apply (clarsimp simp: obj_at_simps ) @@ -1216,7 +1216,7 @@ lemma lookupPTSlot_corres [corres]: (pspace_aligned' and pspace_distinct') (lookup_pt_slot pd vptr) (lookupPTSlot pd vptr)" unfolding lookup_pt_slot_def lookupPTSlot_def lookupPTSlotFromPT_def - apply (corressimp simp: pde_relation_aligned_def lookup_failure_map_def + apply (corresKsimp simp: pde_relation_aligned_def lookup_failure_map_def wp: get_pde_wp_valid getPDE_wp) by (auto simp: lookup_failure_map_def obj_at_def) @@ -1321,7 +1321,7 @@ lemma createMappingEntries_corres [corres]: (create_mapping_entries base vptr pgsz vm_rights attrib pd) (createMappingEntries base vptr pgsz vm_rights' attrib' pd)" unfolding createMappingEntries_def mapping_map_def - by (cases pgsz; corressimp simp: vmattributes_map_def) + by (cases pgsz; corresKsimp simp: vmattributes_map_def) lemma pte_relation'_Invalid_inv [simp]: "pte_relation' x ARM_HYP_H.pte.InvalidPTE = (x = ARM_A.pte.InvalidPTE)" @@ -1354,7 +1354,7 @@ lemma createMappingEntries_valid_slots' [wp]: apply (auto elim: is_aligned_weaken) done -lemmas [corresc_simp] = master_pte_relation_def master_pde_relation_def +lemmas [corresKc_simp] = master_pte_relation_def master_pde_relation_def lemma ensureSafeMapping_corres [corres]: "mapping_map m m' \ @@ -1365,7 +1365,7 @@ lemma ensureSafeMapping_corres [corres]: unfolding mapping_map_def ensureSafeMapping_def apply (cases m; cases m'; simp; match premises in "(_ \ (=)) p p'" for p p' \ \cases "fst p"; cases "fst p'"\; clarsimp) - by (corressimp corresK: mapME_x_corresK_inv + by (corresKsimp corresK: mapME_x_corresK_inv wp: get_master_pte_wp get_master_pde_wp getPTE_wp getPDE_wp; auto simp add: valid_mapping_entries_def)+ @@ -1397,7 +1397,7 @@ lemma find_pd_for_asid_corres [@lift_corres_args, corres]: (pspace_aligned' and pspace_distinct' and no_0_obj') (find_pd_for_asid asid) (findPDForASID asid)" apply (simp add: find_pd_for_asid_def findPDForASID_def liftME_def bindE_assoc) - apply (corressimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def + apply (corresKsimp simp: liftE_bindE assertE_assert mask_asid_low_bits_ucast_ucast lookup_failure_map_def wp: getPDE_wp getASID_wp search: checkPDAt_corres corres_gets_asid) subgoal premises prems for s s' diff --git a/proof/refine/ARM_HYP/Arch_R.thy b/proof/refine/ARM_HYP/Arch_R.thy index 13240fb357..342ea2cffb 100644 --- a/proof/refine/ARM_HYP/Arch_R.thy +++ b/proof/refine/ARM_HYP/Arch_R.thy @@ -843,14 +843,14 @@ lemma decodeARMVCPUInvocation_corres: apply (frule list_all2_Cons) apply clarsimp apply (case_tac a; clarsimp simp add: cap_relation_def) - apply (corres corres: corres_returnOkTT) + apply (corresK corres: corres_returnOkTT) apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def) (* inject_irq *) apply (simp add: decode_vcpu_inject_irq_def decodeVCPUInjectIRQ_def isVCPUCap_def) apply (cases args; clarsimp) apply (case_tac list; clarsimp simp add: rangeCheck_def range_check_def unlessE_whenE) apply (clarsimp simp: shiftL_nat whenE_bindE_throwError_to_if) - apply (corressimp wp: get_vcpu_wp) + apply (corresKsimp wp: get_vcpu_wp) apply (clarsimp simp: archinv_relation_def vcpu_invocation_map_def ucast_id valid_cap'_def valid_cap_def make_virq_def makeVIRQ_def split:if_split) @@ -1251,7 +1251,7 @@ lemma invokeVCPUInjectIRQ_corres: (invokeVCPUInjectIRQ v index virq)" unfolding invokeVCPUInjectIRQ_def invoke_vcpu_inject_irq_def apply (clarsimp simp: bind_assoc) - apply (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres wp: get_vcpu_wp) + apply (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres wp: get_vcpu_wp) apply clarsimp done @@ -1264,7 +1264,7 @@ lemma invokeVCPUReadReg_corres: (invokeVCPUReadReg v r)" unfolding invoke_vcpu_read_register_def invokeVCPUReadReg_def read_vcpu_register_def readVCPUReg_def apply (rule corres_discard_r) - apply (corressimp corres: getObject_vcpu_corres wp: get_vcpu_wp) + apply (corresKsimp corres: getObject_vcpu_corres wp: get_vcpu_wp) apply (clarsimp simp: vcpu_relation_def split: option.splits) apply (wpsimp simp: getCurThread_def)+ done @@ -1279,7 +1279,7 @@ lemma invokeVCPUWriteReg_corres: unfolding invokeVCPUWriteReg_def invoke_vcpu_write_register_def write_vcpu_register_def writeVCPUReg_def apply (rule corres_discard_r) - apply (corressimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + apply (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) subgoal by (auto simp: vcpu_relation_def split: option.splits) apply (wpsimp simp: getCurThread_def)+ done @@ -1314,7 +1314,7 @@ lemma associateVCPUTCB_corres: (associateVCPUTCB v t)" unfolding associate_vcpu_tcb_def associateVCPUTCB_def apply (clarsimp simp: bind_assoc) - apply (corressimp search: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' + apply (corresKsimp search: getObject_vcpu_corres setObject_VCPU_corres vcpuSwitch_corres'' wp: get_vcpu_wp getVCPU_wp hoare_vcg_imp_lift' simp: vcpu_relation_def) apply (rule_tac Q="\_. invs and tcb_at t" in hoare_strengthen_post) @@ -1335,7 +1335,7 @@ lemma associateVCPUTCB_corres: apply (simp add: valid_vcpu'_def typ_at_tcb') apply (clarsimp simp: typ_at_to_obj_at_arches obj_at'_def) apply (fastforce simp: typ_at_to_obj_at_arches obj_at'_def) - apply (corressimp wp: arch_thread_get_wp getObject_tcb_wp + apply (corresKsimp wp: arch_thread_get_wp getObject_tcb_wp simp: archThreadGet_def)+ apply (simp add: vcpu_relation_def) apply (intro allI conjI impI; @@ -1361,7 +1361,7 @@ lemma invokeVCPUAckVPPI_corres: (invokeVCPUAckVPPI vcpu vppi)" unfolding invokeVCPUAckVPPI_def invoke_vcpu_ack_vppi_def write_vcpu_register_def writeVCPUReg_def - by (corressimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) + by (corresKsimp corres: setObject_VCPU_corres getObject_vcpu_corres wp: get_vcpu_wp) (auto simp: vcpu_relation_def split: option.splits) lemma performARMVCPUInvocation_corres: @@ -1413,13 +1413,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -2038,7 +2038,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -2093,7 +2093,7 @@ crunch cte_wp_at': "Arch.finaliseCap" "cte_wp_at' P p" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - armKSASIDTable_update (\_. (armKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + armKSASIDTable_update (\_. ((armKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -2168,7 +2168,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' diff --git a/proof/refine/ARM_HYP/Bits_R.thy b/proof/refine/ARM_HYP/Bits_R.thy index 6ffa1f1908..942850eb3a 100644 --- a/proof/refine/ARM_HYP/Bits_R.thy +++ b/proof/refine/ARM_HYP/Bits_R.thy @@ -482,7 +482,7 @@ lemma constOnFailure_wp : apply (wp|simp)+ done -lemma corres_throwError_str [corres_concrete_rER]: +lemma corres_throwError_str [corresK_concrete_rER]: "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throw b)" "corres_underlyingK sr nf nf' (r (Inl a) (Inl b)) r \ \ (throwError a) (throwError b)" by (simp add: corres_underlyingK_def)+ diff --git a/proof/refine/ARM_HYP/CNodeInv_R.thy b/proof/refine/ARM_HYP/CNodeInv_R.thy index d2a6e3ffca..efa6b06484 100644 --- a/proof/refine/ARM_HYP/CNodeInv_R.thy +++ b/proof/refine/ARM_HYP/CNodeInv_R.thy @@ -4880,7 +4880,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -5780,7 +5780,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5794,7 +5794,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -6297,7 +6297,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of disj_ac) apply (rule conjI, clarsimp simp: removeable'_def) @@ -7094,14 +7094,14 @@ next apply simp apply ((wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+) apply (elim conjE, strengthen exI[mk_strg I], strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - hoare_vcg_ex_lift static_imp_wp) + hoare_vcg_ex_lift hoare_weak_lift_imp) apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7143,7 +7143,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7284,7 +7284,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -8437,7 +8437,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8615,7 +8615,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/ARM_HYP/CSpace1_R.thy b/proof/refine/ARM_HYP/CSpace1_R.thy index ed35a66091..d6e54b253a 100644 --- a/proof/refine/ARM_HYP/CSpace1_R.thy +++ b/proof/refine/ARM_HYP/CSpace1_R.thy @@ -325,7 +325,7 @@ lemma getSlotCap_corres: (getSlotCap cte_ptr')" apply (simp add: getSlotCap_def) apply (subst bind_return [symmetric]) - apply (corressimp) + apply (corresKsimp) done lemma maskCapRights [simp]: @@ -607,7 +607,7 @@ proof (induct a arbitrary: c' cref' bits rule: resolve_address_bits'.induct) apply (simp add: Let_def unlessE_whenE) apply (simp add: caps isCap_defs Let_def whenE_bindE_throwError_to_if) apply (subst cnode_cap_case_if) - apply (corressimp search: getSlotCap_corres IH + apply (corresKsimp search: getSlotCap_corres IH wp: get_cap_wp getSlotCap_valid no_fail_stateAssert simp: locateSlot_conv) apply (simp add: drop_postfix_eq) @@ -823,7 +823,7 @@ lemma tcbVTable_upd_simp [simp]: by (cases tcb) simp lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -928,7 +928,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done diff --git a/proof/refine/ARM_HYP/CSpace_R.thy b/proof/refine/ARM_HYP/CSpace_R.thy index 356b58ea29..e56496a8d4 100644 --- a/proof/refine/ARM_HYP/CSpace_R.thy +++ b/proof/refine/ARM_HYP/CSpace_R.thy @@ -2227,7 +2227,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (revokable' src_cap cap) (revokable' src_cap cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" diff --git a/proof/refine/ARM_HYP/Detype_R.thy b/proof/refine/ARM_HYP/Detype_R.thy index 38fda2ce70..8d59c99823 100644 --- a/proof/refine/ARM_HYP/Detype_R.thy +++ b/proof/refine/ARM_HYP/Detype_R.thy @@ -126,7 +126,7 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -2044,13 +2044,13 @@ lemma cte_wp_at_top: apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (clarsimp simp:cte_check_def) apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (simp add:typeError_def fail_def cte_check_def split:Structures_H.kernel_object.splits)+ @@ -2740,7 +2740,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::ARM_HYP_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp:ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def wordsFromPDE_def get_def setObject_def headM_def tailM_def @@ -2988,7 +2988,7 @@ lemma cte_wp_at_modify_pde: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) diff --git a/proof/refine/ARM_HYP/Finalise_R.thy b/proof/refine/ARM_HYP/Finalise_R.thy index 15b0add5ca..51064db0a5 100644 --- a/proof/refine/ARM_HYP/Finalise_R.thy +++ b/proof/refine/ARM_HYP/Finalise_R.thy @@ -1283,7 +1283,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1473,13 +1473,13 @@ lemma deletedIRQHandler_corres: lemma arch_postCapDeletion_corres: "acap_relation cap cap' \ corres dc \ \ (arch_post_cap_deletion cap) (ARM_HYP_H.postCapDeletion cap')" - by (corressimp simp: arch_post_cap_deletion_def ARM_HYP_H.postCapDeletion_def) + by (corresKsimp simp: arch_post_cap_deletion_def ARM_HYP_H.postCapDeletion_def) lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1539,7 +1539,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2769,10 +2769,7 @@ lemma prepares_delete_helper'': apply (clarsimp simp: removeable'_def) done -lemma ctes_of_cteCaps_of_lift: - "\ \P. \\s. P (ctes_of s)\ f \\rv s. P (ctes_of s)\ \ - \ \\s. P (cteCaps_of s)\ f \\rv s. P (cteCaps_of s)\" - by (wp | simp add: cteCaps_of_def)+ +lemmas ctes_of_cteCaps_of_lift = cteCaps_of_ctes_of_lift crunches finaliseCapTrue_standin, unbindNotification for ctes_of[wp]: "\s. P (ctes_of s)" @@ -2780,7 +2777,7 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) @@ -3225,7 +3222,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -3667,7 +3664,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs live'_def hyp_live'_def ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3844,7 +3841,7 @@ lemma sym_refs_vcpu_tcb: lemma vcpuFinalise_corres [corres]: "corres dc (invs and vcpu_at vcpu) (invs' and vcpu_at' vcpu) (vcpu_finalise vcpu) (vcpuFinalise vcpu)" unfolding vcpuFinalise_def vcpu_finalise_def - apply (corressimp corres: getObject_vcpu_corres simp: vcpu_relation_def) + apply (corresKsimp corres: getObject_vcpu_corres simp: vcpu_relation_def) apply (wpsimp wp: get_vcpu_wp getVCPU_wp)+ apply (rule conjI) apply clarsimp @@ -3888,7 +3885,7 @@ lemma arch_finaliseCap_corres: elim!: is_aligned_weaken invs_valid_asid_map)[2] apply (rule corres_guard_imp, rule deleteASID_corres) apply (auto elim!: invs_valid_asid_map simp: mask_def valid_cap_def)[2] - apply corres + apply corresK apply (clarsimp simp: valid_cap_def valid_cap'_def) done @@ -4217,7 +4214,7 @@ definition set_thread_all :: "obj_ref \ Structures_A.tcb \ unit det_ext_monad" where "set_thread_all ptr tcb etcb \ do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); + kh \ return $ (kheap s)(ptr \ (TCB tcb)); ekh \ return $ (ekheap s)(ptr \ etcb); put (s\kheap := kh, ekheap := ekh\) od" diff --git a/proof/refine/ARM_HYP/InterruptAcc_R.thy b/proof/refine/ARM_HYP/InterruptAcc_R.thy index a0708471c1..174dad753f 100644 --- a/proof/refine/ARM_HYP/InterruptAcc_R.thy +++ b/proof/refine/ARM_HYP/InterruptAcc_R.thy @@ -119,7 +119,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp whenE_wp hoare_seq_ext [OF _ select_inv] alternative_wp hoare_drop_imps + | wp whenE_wp hoare_seq_ext [OF _ select_inv] hoare_drop_imps | simp)+ done diff --git a/proof/refine/ARM_HYP/Interrupt_R.thy b/proof/refine/ARM_HYP/Interrupt_R.thy index 7a93ec1cca..b2007e80ef 100644 --- a/proof/refine/ARM_HYP/Interrupt_R.thy +++ b/proof/refine/ARM_HYP/Interrupt_R.thy @@ -683,7 +683,7 @@ lemma timerTick_corres: apply (simp add:decDomainTime_def) apply wp apply (wp|wpc|unfold Let_def|simp)+ - apply (wp static_imp_wp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' + apply (wp hoare_weak_lift_imp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf tcbSchedAppend_valid_objs' rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues| simp)+ apply (strengthen sch_act_wf_weak) @@ -1040,7 +1040,7 @@ lemma handleInterrupt_corres: apply wp+ apply clarsimp apply clarsimp - apply corressimp + apply corresKsimp done lemma threadSet_ksDomainTime[wp]: diff --git a/proof/refine/ARM_HYP/InvariantUpdates_H.thy b/proof/refine/ARM_HYP/InvariantUpdates_H.thy index 29ef82a6b5..c15fac0053 100644 --- a/proof/refine/ARM_HYP/InvariantUpdates_H.thy +++ b/proof/refine/ARM_HYP/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] diff --git a/proof/refine/ARM_HYP/Invariants_H.thy b/proof/refine/ARM_HYP/Invariants_H.thy index 24fad7bcb9..7d3acef6cc 100644 --- a/proof/refine/ARM_HYP/Invariants_H.thy +++ b/proof/refine/ARM_HYP/Invariants_H.thy @@ -1529,7 +1529,7 @@ lemmas valid_duplicates'_D = valid_duplicates'_pdeD valid_duplicates'_pteD lemma valid_duplicates'_non_pd_pt_I: "\koTypeOf ko \ ArchT PDET; koTypeOf ko \ ArchT PTET; vs_valid_duplicates' (ksPSpace s) ; ksPSpace s p = Some ko; koTypeOf ko = koTypeOf m\ - \ vs_valid_duplicates' (ksPSpace s(p \ m))" + \ vs_valid_duplicates' ((ksPSpace s)(p \ m))" apply (subst vs_valid_duplicates'_def) apply (rule allI) apply (clarsimp simp: option.splits kernel_object.splits arch_kernel_object.splits) diff --git a/proof/refine/ARM_HYP/IpcCancel_R.thy b/proof/refine/ARM_HYP/IpcCancel_R.thy index 49a70b700c..b1defefcea 100644 --- a/proof/refine/ARM_HYP/IpcCancel_R.thy +++ b/proof/refine/ARM_HYP/IpcCancel_R.thy @@ -1457,7 +1457,7 @@ lemma archThreadGet_corres: "(\a a'. arch_tcb_relation a a' \ f a = f' a') \ corres (=) (tcb_at t) (tcb_at' t) (arch_thread_get f t) (archThreadGet f' t)" unfolding arch_thread_get_def archThreadGet_def - apply (corressimp corres: get_tcb_corres) + apply (corresKsimp corres: get_tcb_corres) apply (clarsimp simp: tcb_relation_def) done @@ -1486,7 +1486,7 @@ lemma corres_gets_current_vcpu[corres]: lemma vcpuInvalidateActive_corres[corres]: "corres dc \ no_0_obj' vcpu_invalidate_active vcpuInvalidateActive" unfolding vcpuInvalidateActive_def vcpu_invalidate_active_def - apply (corressimp corres: vcpuDisable_corres + apply (corresKsimp corres: vcpuDisable_corres corresK: corresK_modifyT simp: modifyArchState_def) apply (clarsimp simp: state_relation_def arch_state_relation_def) @@ -1500,7 +1500,7 @@ lemma archThreadSet_corres: "(\a a'. arch_tcb_relation a a' \ arch_tcb_relation (f a) (f' a')) \ corres dc (tcb_at t) (tcb_at' t) (arch_thread_set f t) (archThreadSet f' t)" apply (simp add: arch_thread_set_def archThreadSet_def) - apply (corres corres: get_tcb_corres setObject_update_TCB_corres') + apply (corresK corres: get_tcb_corres setObject_update_TCB_corres') apply wpsimp+ apply (auto simp add: tcb_relation_def tcb_cap_cases_def tcb_cte_cases_def exst_same_def)+ done @@ -1531,7 +1531,7 @@ lemma asUser_sanitiseRegister_corres[corres]: setRegister CPSR (sanitiseRegister b' CPSR cpsr) od))" unfolding sanitiseRegister_def sanitise_register_def - apply (corressimp corresK: corresK_as_user') + apply (corresKsimp corresK: corresK_as_user') done crunch typ_at'[wp]: vcpuInvalidateActive "\s. P (typ_at' T p s)" @@ -1553,13 +1553,13 @@ lemma dissociateVCPUTCB_corres [@lift_corres_args, corres]: (dissociate_vcpu_tcb v t) (dissociateVCPUTCB v t)" unfolding dissociate_vcpu_tcb_def dissociateVCPUTCB_def apply (clarsimp simp: bind_assoc when_fail_assert opt_case_when) - apply (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres get_tcb_corres) + apply (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres get_tcb_corres) apply (wpsimp wp: arch_thread_get_wp simp: archThreadSet_def tcb_ko_at' tcb_at_typ_at' | strengthen imp_drop_strg[where Q="tcb_at t s" for s] imp_drop_strg[where Q="vcpu_at' v s \ typ_at' TCBT t s" for s] - | corres_rv)+ - apply (corressimp wp: get_vcpu_wp getVCPU_wp getObject_tcb_wp arch_thread_get_wp corres_rv_wp_left + | corresK_rv)+ + apply (corresKsimp wp: get_vcpu_wp getVCPU_wp getObject_tcb_wp arch_thread_get_wp corres_rv_wp_left simp: archThreadGet_def tcb_ko_at')+ apply (clarsimp simp: typ_at_tcb' typ_at_to_obj_at_arches) apply normalise_obj_at' @@ -1582,7 +1582,7 @@ lemma prepareThreadDelete_corres: "corres dc (invs and tcb_at t) (valid_objs' and tcb_at' t and no_0_obj') (prepare_thread_delete t) (prepareThreadDelete t)" apply (simp add: prepare_thread_delete_def prepareThreadDelete_def) - apply (corressimp simp: tcb_vcpu_relation) + apply (corresKsimp simp: tcb_vcpu_relation) apply (wp arch_thread_get_wp) apply (wpsimp wp: getObject_tcb_wp simp: archThreadGet_def) apply clarsimp diff --git a/proof/refine/ARM_HYP/Ipc_R.thy b/proof/refine/ARM_HYP/Ipc_R.thy index 585b19c53d..48c77e4072 100644 --- a/proof/refine/ARM_HYP/Ipc_R.thy +++ b/proof/refine/ARM_HYP/Ipc_R.thy @@ -317,7 +317,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -361,7 +361,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -581,7 +581,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -624,7 +624,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -657,11 +657,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -1053,7 +1053,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1471,7 +1471,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1866,7 +1866,7 @@ lemma arch_getSanitiseRegisterInfo_corres: (getSanitiseRegisterInfo t)" unfolding arch_get_sanitise_register_info_def getSanitiseRegisterInfo_def apply (fold archThreadGet_def) - by (corressimp corres: archThreadGet_VCPU_corres) + by (corresKsimp corres: archThreadGet_VCPU_corres) crunch tcb_at'[wp]: getSanitiseRegisterInfo "tcb_at' t" @@ -2256,7 +2256,7 @@ lemma doReplyTransfer_corres: apply simp apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) @@ -2357,15 +2357,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2422,7 +2422,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2641,7 +2641,7 @@ lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch valid_objs'[wp]: asUser "valid_objs'" @@ -2790,7 +2790,7 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done @@ -2811,7 +2811,7 @@ lemma possibleSwitchTo_ksQ': possibleSwitchTo t \\_ s. t' \ set (ksReadyQueues s p)\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp + apply (wp hoare_weak_lift_imp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp | wpc | simp split del: if_split)+ apply (auto simp: obj_at'_def) @@ -2823,7 +2823,7 @@ lemma possibleSwitchTo_valid_queues'[wp]: possibleSwitchTo t \\rv. valid_queues'\" apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ + apply (wp hoare_weak_lift_imp threadGet_wp | wpc | simp)+ apply (auto simp: obj_at'_def) done @@ -3806,7 +3806,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def live'_def projectKOs split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -4027,7 +4027,7 @@ lemma rai_invs'[wp]: \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -4507,7 +4507,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/ARM_HYP/KHeap_R.thy b/proof/refine/ARM_HYP/KHeap_R.thy index 2a05f30b07..6eec9c3cba 100644 --- a/proof/refine/ARM_HYP/KHeap_R.thy +++ b/proof/refine/ARM_HYP/KHeap_R.thy @@ -1061,8 +1061,8 @@ lemma setEndpoint_corres [corres]: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres [corres]: @@ -1070,8 +1070,8 @@ lemma setNotification_corres [corres]: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -2262,21 +2262,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2284,7 +2284,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp diff --git a/proof/refine/ARM_HYP/LevityCatch.thy b/proof/refine/ARM_HYP/LevityCatch.thy index c3f6a79497..064702ca9d 100644 --- a/proof/refine/ARM_HYP/LevityCatch.thy +++ b/proof/refine/ARM_HYP/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) diff --git a/proof/refine/ARM_HYP/PageTableDuplicates.thy b/proof/refine/ARM_HYP/PageTableDuplicates.thy index bb09822212..e766fa8576 100644 --- a/proof/refine/ARM_HYP/PageTableDuplicates.thy +++ b/proof/refine/ARM_HYP/PageTableDuplicates.thy @@ -1898,7 +1898,7 @@ lemma performArchInvocation_valid_duplicates': apply (clarsimp simp:cte_wp_at_ctes_of) apply (case_tac ctea,clarsimp) apply (frule(1) ctes_of_valid_cap'[OF _ invs_valid_objs']) - apply (wp static_imp_wp|simp)+ + apply (wp hoare_weak_lift_imp|simp)+ apply (simp add:placeNewObject_def) apply (wp |simp add:alignError_def unless_def|wpc)+ apply (wp updateFreeIndex_pspace_no_overlap' hoare_drop_imp @@ -1948,10 +1948,10 @@ lemma tc_valid_duplicates': apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_invs_trivial - hoare_vcg_all_lift threadSet_cap_to' static_imp_wp | simp add: inQ_def | fastforce)+)[2] + hoare_vcg_all_lift threadSet_cap_to' hoare_weak_lift_imp | simp add: inQ_def | fastforce)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp setMCPriority_invs' static_imp_wp + apply ((wp case_option_wp setMCPriority_invs' hoare_weak_lift_imp typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | simp add: inQ_def | fastforce)+)[2] apply ((simp only: simp_thms cases_simp cong: conj_cong @@ -1970,7 +1970,7 @@ lemma tc_valid_duplicates': threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift - static_imp_wp + hoare_weak_lift_imp )[1] | wpc | simp add: inQ_def diff --git a/proof/refine/ARM_HYP/Refine.thy b/proof/refine/ARM_HYP/Refine.thy index 2ec62aecd3..5fc38d5121 100644 --- a/proof/refine/ARM_HYP/Refine.thy +++ b/proof/refine/ARM_HYP/Refine.thy @@ -280,7 +280,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -296,18 +296,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -422,9 +422,9 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_valid_duplicates' callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp + threadSet_invs_trivial threadSet_ct_running' TcbAcc_R.dmo_invs' callKernel_domain_time_left - static_imp_wp + hoare_weak_lift_imp | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' atcbContextSet_def valid_domain_list'_def)+ done @@ -504,7 +504,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -518,7 +518,7 @@ lemma doUserOp_valid_duplicates': doUserOp f tc \\_ s. vs_valid_duplicates' (ksPSpace s)\" apply (simp add: doUserOp_def split_def) - apply (wp dmo_invs' select_wp) + apply (wp dmo_invs') apply clarsimp done @@ -656,12 +656,12 @@ lemma entry_corres: apply (rule hoare_strengthen_post, rule akernel_invs_det_ext, simp add: invs_def cur_tcb_def) apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply ((wp thread_set_invs_trivial thread_set_ct_running - thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def thread_set_no_change_tcb_state)+)[1] apply (simp add: pred_conj_def cong: conj_cong) apply (wp threadSet_invs_trivial threadSet_ct_running' - static_imp_wp hoare_vcg_disj_lift + hoare_weak_lift_imp hoare_vcg_disj_lift | simp add: ct_in_state'_def atcbContextSet_def | (wps, wp threadSet_st_tcb_at2))+ apply (clarsimp simp: invs_def cur_tcb_def) diff --git a/proof/refine/ARM_HYP/Retype_R.thy b/proof/refine/ARM_HYP/Retype_R.thy index a9def1a0c3..f41e20fcbe 100644 --- a/proof/refine/ARM_HYP/Retype_R.thy +++ b/proof/refine/ARM_HYP/Retype_R.thy @@ -1527,7 +1527,7 @@ lemma retype_region_ext_modify_kheap_futz: done lemmas retype_region_ext_modify_kheap_futz' = - fun_cong[OF arg_cong[where f=NonDetMonad.bind, + fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: @@ -2584,7 +2584,6 @@ lemma update_gs_ksMachineState_update_swap: declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -4553,7 +4552,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed diff --git a/proof/refine/ARM_HYP/Schedule_R.thy b/proof/refine/ARM_HYP/Schedule_R.thy index d9467203ff..0b6d59bdc9 100644 --- a/proof/refine/ARM_HYP/Schedule_R.thy +++ b/proof/refine/ARM_HYP/Schedule_R.thy @@ -10,7 +10,7 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] @@ -41,7 +41,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -76,17 +76,17 @@ lemma vs_refs_pages_vcpu: by (simp add: vs_refs_pages_def) lemma vs_lookup_pages1_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages1 (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages1 (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = vs_lookup_pages1 s" by (clarsimp intro!: set_eqI simp: vs_lookup_pages1_def vs_refs_pages_vcpu obj_at_def) lemma vs_lookup_pages_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = vs_lookup_pages s" by (clarsimp simp: vs_lookup_pages_def vs_lookup_pages1_vcpu_update) lemma valid_vs_lookup_vcpu_update: - "typ_at (AArch AVCPU) vcpuPtr s \ valid_vs_lookup (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + "typ_at (AArch AVCPU) vcpuPtr s \ valid_vs_lookup (s\kheap := (kheap s)(vcpuPtr \ ArchObj (VCPU vcpu))\) = valid_vs_lookup s" apply (clarsimp simp: valid_vs_lookup_def caps_of_state_VCPU_update) apply (rule all_cong1) @@ -516,7 +516,7 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done @@ -769,12 +769,12 @@ lemma valid_vs_lookup_arm_current_vcpu_inv[simp]: "valid_vs_lookup (s\arc lemma vs_lookup_pages1_vcpu_update': "kheap s p = Some (ArchObj (VCPU x)) \ - vs_lookup_pages1 (s\kheap := kheap s(p \ ArchObj (VCPU x'))\) = vs_lookup_pages1 s" + vs_lookup_pages1 (s\kheap := (kheap s)(p \ ArchObj (VCPU x'))\) = vs_lookup_pages1 s" by (clarsimp simp: vs_lookup_pages1_def obj_at_def vs_refs_pages_def intro!: set_eqI) lemma vs_lookup_pages_vcpu_update': "kheap s y = Some (ArchObj (VCPU x)) \ - (ref \ p) s = (ref \ p) (s\kheap := kheap s(y \ ArchObj (VCPU x'))\)" + (ref \ p) s = (ref \ p) (s\kheap := (kheap s)(y \ ArchObj (VCPU x'))\)" by (clarsimp simp: vs_lookup_pages_def vs_lookup_pages1_vcpu_update') lemma tcb_at'_ksIdleThread_lift: @@ -792,7 +792,7 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" unfolding arch_switch_to_idle_thread_def ARM_HYP_H.switchToIdleThread_def - apply (corressimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args] vcpuSwitch_corres[where vcpu=None, simplified] + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres[@lift_corres_args] vcpuSwitch_corres[where vcpu=None, simplified] wp: tcb_at_idle_thread_lift tcb_at'_ksIdleThread_lift vcpuSwitch_it') apply (clarsimp simp: invs_valid_objs invs_arch_state invs_valid_asid_map invs_valid_vs_lookup invs_psp_aligned invs_distinct invs_unique_refs invs_vspace_objs) @@ -1490,7 +1490,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "obj_at' (P :: ('a :: no_vcpu) \ bool) t" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -2279,7 +2279,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd diff --git a/proof/refine/ARM_HYP/Syscall_R.thy b/proof/refine/ARM_HYP/Syscall_R.thy index c1bc9f1d20..231e0ea580 100644 --- a/proof/refine/ARM_HYP/Syscall_R.thy +++ b/proof/refine/ARM_HYP/Syscall_R.thy @@ -338,7 +338,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -689,7 +689,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -811,7 +811,7 @@ lemma doReply_invs[wp]: apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s \ st_tcb_at' awaiting_reply' t s \ t \ ksIdleThread s" @@ -829,7 +829,7 @@ lemma doReply_invs[wp]: apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" @@ -987,7 +987,7 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" @@ -1001,7 +1001,7 @@ lemma setDomain_invs': prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -1218,7 +1218,7 @@ crunch valid_duplicates'[wp]: rescheduleRequired "\s. vs_valid_duplicate crunch valid_duplicates'[wp]: setThreadState "\s. vs_valid_duplicates' (ksPSpace s)" -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +(*FIXME: move to Nondet_VCG.valid_validE_R *) lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) @@ -1349,7 +1349,7 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply (simp add: if_apply_def2) apply (wp gts_imp' | simp)+ apply (rule_tac Q'="\rv. invs'" in hoare_post_imp_R[rotated]) @@ -1979,7 +1979,7 @@ lemma handleHypervisorFault_corres: (handle_hypervisor_fault thread fault) (handleHypervisorFault thread fault)" apply (cases fault; clarsimp simp add: handleHypervisorFault_def returnOk_def2) - apply (corres corres: handleFault_corres) + apply (corresK corres: handleFault_corres) apply (simp add: ucast_id) apply (clarsimp simp: valid_fault_def) done diff --git a/proof/refine/ARM_HYP/TcbAcc_R.thy b/proof/refine/ARM_HYP/TcbAcc_R.thy index 3c7a3a2761..3c87297197 100644 --- a/proof/refine/ARM_HYP/TcbAcc_R.thy +++ b/proof/refine/ARM_HYP/TcbAcc_R.thy @@ -11,7 +11,6 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] declare storeWordUser_typ_at' [wp] @@ -50,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -384,7 +383,7 @@ proof - apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s") + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s") apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -2397,9 +2396,9 @@ lemma threadSet_queued_sch_act_wf[wp]: split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -4141,7 +4140,7 @@ lemma possibleSwitchTo_ct_not_inQ: possibleSwitchTo t \\_. ct_not_inQ\" (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ apply (fastforce simp: obj_at'_def) @@ -4160,7 +4159,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4180,7 +4179,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) diff --git a/proof/refine/ARM_HYP/Tcb_R.thy b/proof/refine/ARM_HYP/Tcb_R.thy index 64e7a6c9df..8f1b01fcd6 100644 --- a/proof/refine/ARM_HYP/Tcb_R.thy +++ b/proof/refine/ARM_HYP/Tcb_R.thy @@ -347,7 +347,7 @@ lemma invokeTCB_WriteRegisters_corres: apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' + apply ((wp hoare_weak_lift_imp restart_invs' | strengthen valid_sched_weak_strg einvs_valid_etcbs invs_valid_queues' invs_queues invs_weak_sch_act_wf | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def @@ -446,7 +446,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply (wp mapM_x_wp' static_imp_wp | simp)+ + apply (wp mapM_x_wp' hoare_weak_lift_imp | simp)+ apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -456,15 +456,15 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply (solves \wp static_imp_wp\)+ + apply (solves \wp hoare_weak_lift_imp\)+ apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[4] - apply ((wp static_imp_wp restart_invs' | wpc | clarsimp simp add: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp)+)[4] + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp add: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def dest!: idle_no_ex_cap) apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -636,9 +636,9 @@ lemma sp_corres2: apply wp apply wp apply clarsimp - apply (wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp) + apply (wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp) apply clarsimp - apply (wp hoare_vcg_if_lift static_imp_wp hoare_wp_combs isRunnable_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp hoare_wp_combs isRunnable_wp) apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) apply clarsimp apply (wp hoare_drop_imps) @@ -1673,30 +1673,30 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs @@ -2075,7 +2075,7 @@ lemma decodeSetPriority_corres: clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2094,7 +2094,7 @@ lemma decodeSetMCPriority_corres: clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2131,7 +2131,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2211,7 +2211,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2749,7 +2749,7 @@ lemma restart_makes_simple': \\rv. st_tcb_at' simple' t\" apply (simp add: restart_def) apply (wp sts_st_tcb_at'_cases cancelIPC_simple - cancelIPC_st_tcb_at static_imp_wp | simp)+ + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ apply (rule hoare_strengthen_post [OF isStopped_inv]) prefer 2 apply assumption diff --git a/proof/refine/ARM_HYP/Untyped_R.thy b/proof/refine/ARM_HYP/Untyped_R.thy index ea0e241e79..1a2f984f24 100644 --- a/proof/refine/ARM_HYP/Untyped_R.thy +++ b/proof/refine/ARM_HYP/Untyped_R.thy @@ -3232,7 +3232,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3556,7 +3556,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -4062,6 +4062,8 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) @@ -4637,6 +4639,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) diff --git a/proof/refine/ARM_HYP/VSpace_R.thy b/proof/refine/ARM_HYP/VSpace_R.thy index 29af495169..9c10addd83 100644 --- a/proof/refine/ARM_HYP/VSpace_R.thy +++ b/proof/refine/ARM_HYP/VSpace_R.thy @@ -804,18 +804,18 @@ lemma vcpuUpdate_corres[corres]: "\v1 v2. vcpu_relation v1 v2 \ vcpu_relation (f v1) (f' v2) \ corres dc (vcpu_at v) (vcpu_at' v) (vcpu_update v f) (vcpuUpdate v f')" - by (corressimp corres: getObject_vcpu_corres setObject_VCPU_corres + by (corresKsimp corres: getObject_vcpu_corres setObject_VCPU_corres simp: vcpu_update_def vcpuUpdate_def vcpu_relation_def) lemma vgicUpdate_corres[corres]: "\vgic vgic'. vgic_map vgic = vgic' \ vgic_map (f vgic) = (f' vgic') \ corres dc (vcpu_at v) (vcpu_at' v) (vgic_update v f) (vgicUpdate v f')" - by (corressimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) + by (corresKsimp simp: vgic_update_def vgicUpdate_def vcpu_relation_def) lemma vgicUpdateLR_corres[corres]: "corres dc (vcpu_at v) (vcpu_at' v) (vgic_update_lr v idx val) (vgicUpdateLR v idx val)" - by (corressimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) + by (corresKsimp simp: vgic_update_lr_def vgicUpdateLR_def vgic_map_def) lemma vcpuReadReg_corres[corres]: "corres (=) (vcpu_at v) (vcpu_at' v and no_0_obj') diff --git a/proof/refine/Move_R.thy b/proof/refine/Move_R.thy index 3eeb0d9f9d..50d6062252 100644 --- a/proof/refine/Move_R.thy +++ b/proof/refine/Move_R.thy @@ -207,7 +207,6 @@ lemma get_mapM_x_lower: (* Move to DetSchedDomainTime_AI *) crunch domain_list_inv[wp]: do_user_op "\s. P (domain_list s)" - (wp: select_wp) lemma next_child_child_set: "\next_child slot (cdt_list s) = Some child; valid_list s\ diff --git a/proof/refine/RISCV64/ArchAcc_R.thy b/proof/refine/RISCV64/ArchAcc_R.thy index 5f8a832f24..ee339a4765 100644 --- a/proof/refine/RISCV64/ArchAcc_R.thy +++ b/proof/refine/RISCV64/ArchAcc_R.thy @@ -249,10 +249,10 @@ lemma storePTE_state_refs_of[wp]: crunch cte_wp_at'[wp]: setIRQState "\s. P (cte_wp_at' P' p s)" crunch inv[wp]: getIRQSlot "P" -lemma setObject_ASIDPool_corres: - "a = inv ASIDPool a' o ucast \ +lemma setObject_ASIDPool_corres[corres]: + "\ a = inv ASIDPool a' o ucast; p' = p \ \ corres dc (asid_pool_at p and pspace_aligned and pspace_distinct) \ - (set_asid_pool p a) (setObject p a')" + (set_asid_pool p a) (setObject p' a')" apply (simp add: set_asid_pool_def) apply (rule corres_underlying_symb_exec_l[where P=P and Q="\_. P" for P]) apply (rule corres_no_failI; clarsimp) @@ -300,7 +300,8 @@ lemma corres_cross_over_pte_at: done lemma getObject_PTE_corres[corres]: - "p = p' \ corres pte_relation' (pte_at p and pspace_aligned and pspace_distinct) \ + "p = p' \ + corres pte_relation' (pte_at p and pspace_aligned and pspace_distinct) \ (get_pte p) (getObject p')" apply (rule corres_cross_over_pte_at, fastforce) apply (simp add: getObject_def gets_map_def split_def bind_assoc) @@ -407,9 +408,9 @@ lemma setObject_PT_corres: apply (simp add: caps_of_state_after_update obj_at_def swp_cte_at_caps_of) done -lemma storePTE_corres: - "pte_relation' pte pte' \ - corres dc (pte_at p and pspace_aligned and pspace_distinct) \ (store_pte p pte) (storePTE p pte')" +lemma storePTE_corres[corres]: + "\ p = p'; pte_relation' pte pte' \ \ + corres dc (pte_at p and pspace_aligned and pspace_distinct) \ (store_pte p pte) (storePTE p' pte')" apply (simp add: store_pte_def storePTE_def) apply (rule corres_assume_pre, simp add: pte_at_def) apply (rule corres_symb_exec_l) @@ -450,14 +451,32 @@ lemma page_table_at_cross: apply (clarsimp simp: obj_at_def pte_at_def table_base_plus_ucast is_aligned_pte_offset) done -lemma pt_at_lift: - "corres_inst_eq ptr ptr' \ \s s'. (s, s') \ state_relation \ True \ - (pspace_aligned s \ pspace_distinct s \ pt_at ptr s \ ptr = ptr') \ - \ s' \ page_table_at' ptr' s'" - by ( fastforce intro!: page_table_at_cross) +(* FIXME: use more recent guard crossing framework that don't need a specific goal form. + This was mostly left here to test compatibility bewtween corres and corresK methods *) +(* only applies when ptr is available in abstract and concrete guard at the same time *) +lemma pt_at_lift_eq: + "\s s'. (s, s') \ state_relation \ + (pspace_aligned s \ pspace_distinct s \ pt_at ptr s) \ + \ s' \ + page_table_at' ptr s'" + by (fastforce intro!: page_table_at_cross) + +(* only applies for the "getPPtrFromHWPTE pte" pattern *) +lemma pt_at_lift_relation: + "\ pte_relation' pte pte'; RISCV64_A.is_PageTablePTE pte \ \ + \s s'. (s, s') \ state_relation \ + (pspace_aligned s \ pspace_distinct s \ pt_at (pptr_from_pte pte) s) \ + \ s' \ + page_table_at' (getPPtrFromHWPTE pte') s'" + apply (cases pte; simp) + apply (simp add: getPPtrFromHWPTE_def pptr_from_pte_def addr_from_ppn_def pt_at_lift_eq) + done + +lemmas checkPTAt_corres_pte[corres] = + corres_stateAssert_r_cross[OF pt_at_lift_relation, folded checkPTAt_def] -lemmas checkPTAt_corres[corresK] = - corres_stateAssert_implied_frame[OF pt_at_lift, folded checkPTAt_def] +lemmas checkPTAt_corres_eq[corres] = + corres_stateAssert_r_cross[OF pt_at_lift_eq, folded checkPTAt_def] lemma lookupPTSlotFromLevel_inv: "lookupPTSlotFromLevel level pt_ptr vptr \P\" @@ -511,14 +530,14 @@ lemma pteAtIndex_corres: \ (get_pte (pt_slot_offset level pt vptr)) (pteAtIndex level' pt vptr)" - by (simp add: pteAtIndex_def) (fastforce intro: getObject_PTE_corres) + by (simp add: pteAtIndex_def getObject_PTE_corres) lemma user_region_or: "\ vref \ user_region; vref' \ user_region \ \ vref || vref' \ user_region" by (simp add: user_region_def canonical_user_def le_mask_high_bits word_size) -lemma lookupPTSlotFromLevel_corres: +lemma lookupPTSlotFromLevel_corres[corres]: "\ level' = size level; pt' = pt \ \ corres (\(level, p) (bits, p'). bits = pt_bits_left level \ p' = p) (pspace_aligned and pspace_distinct and valid_vspace_objs and valid_asid_table and @@ -633,7 +652,9 @@ lemma lookupPTSlot_corres: \ (gets_the (pt_lookup_slot pt vptr \ ptes_of)) (lookupPTSlot pt vptr)" unfolding lookupPTSlot_def pt_lookup_slot_def - by (corressimp corres: lookupPTSlotFromLevel_corres) + by corres + +declare RISCV64_A.pte.sel[datatype_schematic] lemma lookupPTFromLevel_corres: "\ level' = size level; pt' = pt \ \ @@ -655,6 +676,8 @@ proof (induct level arbitrary: level' pt pt') next case (minus level) + note minus.hyps(1)[corres] + (* FIXME: unfortunate duplication from lookupPTSlotFromLevel_corres *) from `0 < level` obtain nlevel where nlevel: "level = nlevel + 1" by (auto intro: that[of "level-1"]) @@ -707,29 +730,10 @@ next apply (subst lookupPTFromLevel.simps, subst pt_lookup_from_level_simps) apply (simp add: unlessE_whenE not_less) apply (rule corres_gen_asm, simp) - apply (rule corres_initial_splitE[where r'=dc]) - apply (corressimp simp: lookup_failure_map_def) - apply (rule corres_splitEE[where r'=pte_relation']) - apply (simp, rule getObject_PTE_corres, simp) - apply (rule whenE_throwError_corres) - apply (simp add: lookup_failure_map_def) + apply (corres simp: lookup_failure_map_def) apply (rename_tac pte pte', case_tac pte; simp add: isPageTablePTE_def) - apply (rule corres_if) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply (rule corres_returnOk[where P=\ and P'=\], rule refl) - apply (clarsimp simp: checkPTAt_def) - apply (subst liftE_bindE, rule corres_stateAssert_implied) - apply (rule minus.hyps) - apply (simp add: minus.hyps(2)) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply clarsimp - apply (rule page_table_at_cross; assumption?) - apply (drule vs_lookup_table_pt_at; simp?) - apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def getPPtrFromHWPTE_def - addr_from_ppn_def) - apply (simp add: state_relation_def) + apply (corres term_simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def + getPPtrFromHWPTE_def addr_from_ppn_def minus.hyps(2)) apply wpsimp+ apply (simp add: bit0.neq_0_conv) apply (frule (5) vs_lookup_table_is_aligned) @@ -742,17 +746,21 @@ next apply (simp add: bit_simps) apply (rule is_aligned_shiftl, simp) apply clarsimp - apply (rule_tac x=asid in exI) - apply (rule_tac x="vref_step vref" in exI) - apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) apply (rule conjI) - apply (clarsimp simp: level_defs) - apply (subst pt_walk_split_Some[where level'=level]; simp?) - apply (drule bit0.pred) - apply simp - apply (subst pt_walk.simps) - apply (simp add: in_omonad) - apply wpsimp + apply (rule_tac x=asid in exI) + apply (rule_tac x="vref_step vref" in exI) + apply (clarsimp simp: vs_lookup_table_def in_omonad split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: level_defs) + apply (subst pt_walk_split_Some[where level'=level]; simp?) + apply (drule bit0.pred) + apply simp + apply (subst pt_walk.simps) + apply (simp add: in_omonad) + apply (drule (1) valid_vspace_objs_pte, fastforce) + apply (clarsimp simp: RISCV64_A.is_PageTablePTE_def pptr_from_pte_def + table_index_max_level_slots) + apply simp done qed @@ -775,22 +783,19 @@ lemma corres_gets_global_pt [corres]: apply (case_tac "riscvKSGlobalPTs (ksArchState s') maxPTLevel"; simp) done -lemmas storePTE_corres'[corres] = storePTE_corres[@lift_corres_args] - lemma copy_global_mappings_corres [@lift_corres_args, corres]: "corres dc (valid_global_arch_objs and pspace_aligned and pspace_distinct and pt_at pt) \ (copy_global_mappings pt) (copyGlobalMappings pt)" (is "corres _ ?apre _ _ _") unfolding copy_global_mappings_def copyGlobalMappings_def objBits_simps archObjSize_def pptr_base_def - apply corressimp - apply (rule_tac P="pt_at global_pt and ?apre" and P'="\" - in corresK_mapM_x[OF order_refl]) - apply (corressimp simp: objBits_def mask_def wp: get_pte_wp getPTE_wp)+ - apply (drule valid_global_arch_objs_pt_at) - apply (clarsimp simp: ptIndex_def ptBitsLeft_def maxPTLevel_def ptTranslationBits_def pageBits_def - pt_index_def pt_bits_left_def level_defs) - apply (fastforce intro!: page_table_pte_atI simp add: bit_simps word_le_nat_alt word_less_nat_alt) + apply corres + apply (rule_tac P="pt_at global_pt and ?apre" and P'="\" in corres_mapM_x[OF _ _ _ _ order_refl]) + apply (corres simp: ptIndex_def ptBitsLeft_def maxPTLevel_def ptTranslationBits_def pageBits_def + pt_index_def pt_bits_left_def level_defs + | fastforce dest!: valid_global_arch_objs_pt_at + intro!: page_table_pte_atI + simp: bit_simps word_le_nat_alt word_less_nat_alt)+ done lemma arch_cap_rights_update: @@ -899,7 +904,7 @@ lemma find_vspace_for_asid_rewite: apply (simp add: liftE_bindE bind_assoc exec_gets opt_map_def asid_low_bits_of_def) done -lemma findVSpaceForASID_corres: +lemma findVSpaceForASID_corres[corres]: assumes "asid' = ucast asid" shows "corres (lfr \ (=)) (valid_vspace_objs and valid_asid_table @@ -940,13 +945,7 @@ lemma findVSpaceForASID_corres: apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def returnOk_def lookup_failure_map_def ucast_ucast_a is_down split: option.split) - apply clarsimp - apply (simp add: returnOk_liftE checkPTAt_def liftE_bindE) - apply (rule corres_stateAssert_implied[where P=\, simplified]) - apply simp - apply clarsimp - apply (rule page_table_at_cross; assumption?) - apply fastforce + apply (corres corres: corres_returnTT) apply (wpsimp wp: getObject_inv)+ apply (clarsimp simp: o_def) apply (rule conjI) diff --git a/proof/refine/RISCV64/Arch_R.thy b/proof/refine/RISCV64/Arch_R.thy index 60a85e8f16..c257a7a5b6 100644 --- a/proof/refine/RISCV64/Arch_R.thy +++ b/proof/refine/RISCV64/Arch_R.thy @@ -430,12 +430,7 @@ lemma checkSlot_corres: (check_slot p test) (checkSlot p test')" apply (simp add: check_slot_def checkSlot_def unlessE_whenE liftE_bindE) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getObject_PTE_corres], simp) - apply (rule corres_whenE, simp) - apply (rule corres_trivial, simp) - apply simp - apply wpsimp+ + apply (corres corres: corres_throwErrorTT[of ser]) done lemma vmrights_map_vm_kernel_only[simp]: @@ -951,13 +946,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1201,7 +1196,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -1241,7 +1236,7 @@ crunch st_tcb_at' [wp]: "Arch.finaliseCap" "st_tcb_at' P t" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - riscvKSASIDTable_update (\_. (riscvKSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + riscvKSASIDTable_update (\_. ((riscvKSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_dom_schedule'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -1327,7 +1322,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' invs_pspace_bounded' diff --git a/proof/refine/RISCV64/Bits_R.thy b/proof/refine/RISCV64/Bits_R.thy index f325e2eb99..5da9523bf4 100644 --- a/proof/refine/RISCV64/Bits_R.thy +++ b/proof/refine/RISCV64/Bits_R.thy @@ -275,6 +275,8 @@ where lfr_def[simp]: "lfr x y \ (y = lookup_failure_map x)" +lemmas corres_throwError_lfr[corres_term] = corres_throwErrorTT[of lfr] + text \Correspondence and weakest precondition rules for the "on failure" transformers\ diff --git a/proof/refine/RISCV64/CNodeInv_R.thy b/proof/refine/RISCV64/CNodeInv_R.thy index bf4d45c02f..c7bd31856b 100644 --- a/proof/refine/RISCV64/CNodeInv_R.thy +++ b/proof/refine/RISCV64/CNodeInv_R.thy @@ -4841,7 +4841,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -5711,7 +5711,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5725,7 +5725,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -6293,7 +6293,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) clarsimp simp: pred_tcb_at'_def obj_at'_def) apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -7095,17 +7095,17 @@ next apply simp apply ((wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms)+)[1] apply (simp(no_asm_use)) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply (elim conjE, strengthen subst[where P="cap_relation cap" for cap, mk_strg I _ E]) apply simp apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - static_imp_wp)+ + hoare_weak_lift_imp)+ apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7149,7 +7149,7 @@ next obj_at'_def)[1] apply (wpsimp wp: isFinal[where x="cte_map slot"] simp: is_final_cap_def) - apply (wpsimp wp: isFinalCapability_inv static_imp_wp isFinal + apply (wpsimp wp: isFinalCapability_inv hoare_weak_lift_imp isFinal simp: is_final_cap_def) apply (wpsimp wp: get_cap_wp) apply (wpsimp wp: getCTE_wp') @@ -7292,7 +7292,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -8449,7 +8449,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8627,7 +8627,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/RISCV64/CSpace1_R.thy b/proof/refine/RISCV64/CSpace1_R.thy index aeee49c49e..763f0e66f7 100644 --- a/proof/refine/RISCV64/CSpace1_R.thy +++ b/proof/refine/RISCV64/CSpace1_R.thy @@ -807,7 +807,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -906,7 +906,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done diff --git a/proof/refine/RISCV64/CSpace_R.thy b/proof/refine/RISCV64/CSpace_R.thy index 764e212c74..95fa9880ab 100644 --- a/proof/refine/RISCV64/CSpace_R.thy +++ b/proof/refine/RISCV64/CSpace_R.thy @@ -2237,7 +2237,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -2945,7 +2945,7 @@ lemma cteInsert_valid_irq_handlers'[wp]: done lemma setCTE_arch_ctes_of_wp [wp]: - "\\s. P (ksArchState s) (ctes_of s (p \ cte))\ + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ setCTE p cte \\rv s. P (ksArchState s) (ctes_of s)\" apply (simp add: setCTE_def ctes_of_setObject_cte) diff --git a/proof/refine/RISCV64/Detype_R.thy b/proof/refine/RISCV64/Detype_R.thy index ffdf27550f..545ab25de0 100644 --- a/proof/refine/RISCV64/Detype_R.thy +++ b/proof/refine/RISCV64/Detype_R.thy @@ -131,7 +131,7 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) apply (rule bind_eqI, rule ext)+ apply (simp add: bind_assoc[symmetric]) apply (rule bind_cong[rotated], rule refl) @@ -2680,7 +2680,7 @@ lemma setCTE_pte_at': lemma storePTE_det: "ko_wp_at' ((=) (KOArch (KOPTE pte))) ptr s \ storePTE ptr (new_pte::pte) s = - modify (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPTE new_pte)))) s" + modify (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPTE new_pte)))) s" apply (clarsimp simp: ko_wp_at'_def storePTE_def split_def bind_def gets_def return_def get_def setObject_def @@ -2845,8 +2845,7 @@ lemma modify_obj_commute': lemma dmo_bounded'[wp]: "doMachineOp f \pspace_bounded'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) - apply clarsimp + apply wpsimp done lemma setCTE_doMachineOp_commute: diff --git a/proof/refine/RISCV64/Finalise_R.thy b/proof/refine/RISCV64/Finalise_R.thy index d9f47b9fc0..1e8c734c89 100644 --- a/proof/refine/RISCV64/Finalise_R.thy +++ b/proof/refine/RISCV64/Finalise_R.thy @@ -1283,7 +1283,7 @@ crunch gsMaxObjectSize[wp]: emptySlot "\s. P (gsMaxObjectSize s)" end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1527,8 +1527,8 @@ lemma arch_postCapDeletion_corres: lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1586,7 +1586,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2834,7 +2834,7 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) @@ -3623,7 +3623,7 @@ crunches schedContextCancelYieldTo, tcbReleaseRemove lemma suspend_cte_wp_at': "suspend t \cte_wp_at' (\cte. P (cteCap cte)) p\" unfolding updateRestartPC_def suspend_def - apply (wpsimp wp: hoare_vcg_imp_lift hoare_disjI2[where Q="\_. cte_wp_at' a b" for a b]) + apply (wpsimp wp: hoare_vcg_imp_lift hoare_disjI2[where R="\_. cte_wp_at' a b" for a b]) done context begin interpretation Arch . (*FIXME: arch_split*) @@ -4036,7 +4036,7 @@ lemma cteDeleteOne_invs[wp]: apply (rule conjI) apply fastforce apply (fastforce dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/RISCV64/InterruptAcc_R.thy b/proof/refine/RISCV64/InterruptAcc_R.thy index d8f7060105..b383da585d 100644 --- a/proof/refine/RISCV64/InterruptAcc_R.thy +++ b/proof/refine/RISCV64/InterruptAcc_R.thy @@ -114,11 +114,11 @@ lemma updateTimeStamp_corres[corres]: apply (prop_tac "minBudget = MIN_BUDGET") apply (clarsimp simp: minBudget_def MIN_BUDGET_def kernelWCETTicks_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurTime_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[where r'="(=)"]) apply (rule corres_guard_imp) apply (rule corres_machine_op) - apply corressimp + apply corresKsimp apply (wpsimp simp: getCurrentTime_def) apply simp apply simp @@ -202,7 +202,7 @@ lemma scActive_corres: (scActive scPtr)" apply (rule corres_cross[where Q' = "sc_at' scPtr", OF sc_at'_cross_rel]) apply (fastforce simp: obj_at_def is_sc_obj_def valid_obj_def valid_pspace_def sc_at_pred_n_def) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: sc_relation_def get_sc_active_def scActive_def active_sc_def) done @@ -251,7 +251,7 @@ lemma preemptionPoint_corres: supply if_split[split del] apply (simp add: preemption_point_def preemptionPoint_def) apply (rule corres_splitEE_skip; - corressimp corres: update_work_units_corres + corresKsimp corres: update_work_units_corres simp: update_work_units_def) apply (clarsimp simp: bindE_def liftE_def) apply (rule_tac Q'="\rv s. rv = ksWorkUnitsCompleted s \ ?conc s" in corres_symb_exec_r[rotated]) @@ -287,22 +287,22 @@ lemma preemptionPoint_corres: apply (rule corres_split_skip) apply (wpsimp simp: reset_work_units_def) apply (wpsimp simp: setWorkUnits_def) - apply (corressimp corres: setWorkUnits_corres) + apply (corresKsimp corres: setWorkUnits_corres) apply (rule corres_split_skip) apply wpsimp apply wpsimp - apply (corressimp corres: updateTimeStamp_corres) + apply (corresKsimp corres: updateTimeStamp_corres) apply (rule corres_split_skip) apply (wpsimp simp: cur_sc_tcb_def) apply wpsimp - apply (corressimp corres: corres_machine_op) + apply (corresKsimp corres: corres_machine_op) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_underlying_split[rotated 2, OF gets_sp getConsumedTime_sp]) - apply (corressimp corres: getConsumedTime_corres) + apply (corresKsimp corres: getConsumedTime_corres) apply (clarsimp simp: andM_def ifM_def bind_assoc) apply (rule corres_underlying_split[rotated 2, OF get_sc_active_sp scActive_sp]) - apply (corressimp corres: scActive_corres) + apply (corresKsimp corres: scActive_corres) apply (fastforce dest: valid_objs_valid_sched_context_size simp: cur_sc_tcb_def obj_at_def is_sc_obj_def sc_at_pred_n_def) apply (clarsimp split: if_split) @@ -321,7 +321,7 @@ lemma preemptionPoint_corres: active_sc_def sc_valid_refills_def rr_valid_refills_def split: if_splits) apply simp - apply corressimp + apply corresKsimp apply (fastforce intro: corres_returnOkTT) apply (clarsimp split: if_split) apply (clarsimp split: if_split) diff --git a/proof/refine/RISCV64/Interrupt_R.thy b/proof/refine/RISCV64/Interrupt_R.thy index ad1703bc7f..541e4281fb 100644 --- a/proof/refine/RISCV64/Interrupt_R.thy +++ b/proof/refine/RISCV64/Interrupt_R.thy @@ -684,7 +684,7 @@ lemma handleInterrupt_corres: apply ((wp | simp)+) apply clarsimp apply fastforce - apply (corressimp corres: corres_machine_op setReprogramTimer_corres + apply (corresKsimp corres: corres_machine_op setReprogramTimer_corres simp: ackDeadlineIRQ_def ackInterrupt_def) by fastforce diff --git a/proof/refine/RISCV64/InvariantUpdates_H.thy b/proof/refine/RISCV64/InvariantUpdates_H.thy index 4a381fb7a1..358ba7831b 100644 --- a/proof/refine/RISCV64/InvariantUpdates_H.thy +++ b/proof/refine/RISCV64/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] diff --git a/proof/refine/RISCV64/IpcCancel_R.thy b/proof/refine/RISCV64/IpcCancel_R.thy index 1cb102e9cd..7800de9e03 100644 --- a/proof/refine/RISCV64/IpcCancel_R.thy +++ b/proof/refine/RISCV64/IpcCancel_R.thy @@ -1260,7 +1260,7 @@ lemma replyPop_corres: loadObject_default_def RISCV64_H.fromPPtr_def split: if_split_asm option.split_asm dest!: readObject_misc_ko_at') - apply (prop_tac "ksPSpace s'(rp \ + apply (prop_tac "(ksPSpace s')(rp \ KOReply (replyNext_update Map.empty reply)) = ksPSpace s'") apply (rule ext) @@ -2474,7 +2474,7 @@ crunches cancelSignal, cleanReply lemma tcbFault_update_valid_queues: "\ko_at' tcb t s; valid_queues s\ - \ valid_queues (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbFault_update Map.empty tcb))\)" + \ valid_queues (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbFault_update Map.empty tcb))\)" by (fastforce simp: valid_queues_def valid_queues_no_bitmap_def valid_bitmapQ_def bitmapQ_def bitmapQ_no_L2_orphans_def bitmapQ_no_L1_orphans_def obj_at'_def inQ_def objBitsKO_def) @@ -2547,7 +2547,7 @@ lemma (in delete_one_conc_pre) suspend_nonq: apply (simp add: suspend_def unless_def) unfolding updateRestartPC_def apply (wpsimp wp: hoare_allI tcbSchedDequeue_t_notksQ sts_ksQ_oaQ hoare_vcg_imp_lift - hoare_disjI2[where Q="\_. valid_queues"]) + hoare_disjI2[where R="\_. valid_queues"]) done lemma suspend_makes_inactive: @@ -2585,7 +2585,7 @@ lemma updateSchedContext_valid_tcbs'[wp]: lemma valid_refills'_tcbQueued_update[simp]: "scp \ t \ valid_refills' scp - (s\ksPSpace := ksPSpace s(t \ KOTCB (tcbQueued_update (\_. True) tcb))\) + (s\ksPSpace := (ksPSpace s)(t \ KOTCB (tcbQueued_update (\_. True) tcb))\) = valid_refills' scp s" by (clarsimp simp: valid_refills'_def opt_pred_def) @@ -2740,7 +2740,7 @@ lemma cancelAllIPC_corres_helper: st = Structures_A.thread_state.BlockedOnReceive ep r_opt pl") apply (clarsimp simp: when_def split: option.splits) apply (intro conjI impI allI; clarsimp simp: isReceive_def) - apply (corressimp corres: restart_thread_if_no_fault_corres) + apply (corresKsimp corres: restart_thread_if_no_fault_corres) apply (clarsimp simp: pred_tcb_at_def obj_at_def is_tcb valid_sched_def) apply (rule corres_guard_imp) apply (rule corres_split[OF replyUnlinkTcb_corres]) @@ -2757,7 +2757,7 @@ lemma cancelAllIPC_corres_helper: apply (case_tac st; clarsimp simp: isReceive_def) apply (case_tac st; clarsimp simp: isReceive_def; - (corressimp corres: restart_thread_if_no_fault_corres, + (corresKsimp corres: restart_thread_if_no_fault_corres, fastforce simp: obj_at_def)) apply (wpsimp wp: gts_wp) apply (wpsimp wp: gts_wp') diff --git a/proof/refine/RISCV64/Ipc_R.thy b/proof/refine/RISCV64/Ipc_R.thy index 018b43e740..24fa411ae8 100644 --- a/proof/refine/RISCV64/Ipc_R.thy +++ b/proof/refine/RISCV64/Ipc_R.thy @@ -326,7 +326,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -370,7 +370,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -591,7 +591,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -634,7 +634,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -667,11 +667,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -1047,7 +1047,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1447,7 +1447,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -1580,7 +1580,7 @@ lemma makeFaultMessage_corres: apply (wpsimp simp: sched_context_update_consumed_def setTimeArg_def)+ apply (fastforce dest!: valid_tcb_objs simp: valid_tcb_def valid_bound_obj_def obj_at_def) apply clarsimp - apply (corressimp corres: makeArchFaultMessage_corres) + apply (corresKsimp corres: makeArchFaultMessage_corres) done crunches makeFaultMessage @@ -2776,7 +2776,7 @@ context begin interpretation Arch . (*FIXME: arch_split*) crunches cancel_ipc for cur[wp]: "cur_tcb" and ntfn_at[wp]: "ntfn_at t" - (wp: select_wp crunch_wps simp: crunch_simps ignore: set_object) + (wp: crunch_wps simp: crunch_simps ignore: set_object) lemma valid_sched_weak_strg: "valid_sched s \ weak_valid_sched_action s" @@ -3625,7 +3625,7 @@ lemma possibleSwitchTo_ksQ': possibleSwitchTo t \\_ s. t' \ set (ksReadyQueues s p)\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs inReleaseQueue_def) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp + apply (wp hoare_weak_lift_imp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp | wpc | simp split del: if_split)+ apply (auto simp: obj_at'_def) @@ -4947,7 +4947,7 @@ lemma receiveSignal_corres: and tcb_at' thread and ntfn_at' cap_ntfn_ptr and valid_ntfn' rv' and ko_at' rv' cap_ntfn_ptr" in corres_underlying_split) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: ntfn_at_def2 valid_cap_def st_tcb_at_tcb_at valid_cap'_def) defer apply (wpsimp wp: get_simple_ko_wp) @@ -4977,7 +4977,7 @@ lemma receiveSignal_corres: pred_tcb_at_def obj_at_def is_obj_defs split: if_split_asm option.splits)+ apply (fastforce simp: valid_tcb_state'_def) - apply (corressimp corres: doNBRecvFailedTransfer_corres) + apply (corresKsimp corres: doNBRecvFailedTransfer_corres) apply fastforce \ \WaitingNtfn\ apply (case_tac isBlocking; simp) @@ -5002,7 +5002,7 @@ lemma receiveSignal_corres: pred_tcb_at_def obj_at_def is_obj_defs split: if_split_asm option.splits)+ apply (fastforce simp: valid_tcb_state'_def valid_ntfn'_def) - apply (corressimp corres: doNBRecvFailedTransfer_corres) + apply (corresKsimp corres: doNBRecvFailedTransfer_corres) apply fastforce \ \ActiveNtfn\ apply (rule corres_guard_imp) @@ -5194,7 +5194,7 @@ lemma completeSignal_invs': apply (wpsimp wp: maybeDonateSc_invs') apply (clarsimp simp: obj_at'_def) apply (wpsimp wp: set_ntfn_minor_invs') - apply (wpsimp wp: hoare_vcg_ex_lift static_imp_wp simp: valid_ntfn'_def) + apply (wpsimp wp: hoare_vcg_ex_lift hoare_weak_lift_imp simp: valid_ntfn'_def) apply wpsimp apply clarsimp apply (intro conjI impI) diff --git a/proof/refine/RISCV64/KHeap_R.thy b/proof/refine/RISCV64/KHeap_R.thy index c6ab318aa6..4373944f5a 100644 --- a/proof/refine/RISCV64/KHeap_R.thy +++ b/proof/refine/RISCV64/KHeap_R.thy @@ -956,7 +956,7 @@ lemma max_word_minus_1[simp]: "0xFFFFFFFFFFFFFFFF + 2^x = (2^x - 1::64 word)" by simp lemma ctes_of'_after_update: - "ko_wp_at' (same_caps' val) p s \ ctes_of (s\ksPSpace := ksPSpace s(p \ val)\) x = ctes_of s x" + "ko_wp_at' (same_caps' val) p s \ ctes_of (s\ksPSpace := (ksPSpace s)(p \ val)\) x = ctes_of s x" apply (clarsimp simp only: ko_wp_at'_def map_to_ctes_def Let_def) apply (rule if_cong) apply (cases val; fastforce split: if_splits) @@ -969,7 +969,7 @@ lemma ctes_of'_after_update: lemma ex_cap_to'_after_update: "\ ex_nonz_cap_to' p s; ko_wp_at' (same_caps' val) p' s \ - \ ex_nonz_cap_to' p (s\ksPSpace := ksPSpace s(p' \ val)\)" + \ ex_nonz_cap_to' p (s\ksPSpace := (ksPSpace s)(p' \ val)\)" unfolding ex_nonz_cap_to'_def cte_wp_at_ctes_of using ctes_of'_after_update by fastforce @@ -1232,7 +1232,7 @@ lemma obj_relation_cut_same_type: lemma replyNexts_of_non_reply_update: "\s'. \typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ ReplyT \ - \ replyNexts_of (s'\ksPSpace := ksPSpace s'(ptr \ ko)\) = replyNexts_of s'" + \ replyNexts_of (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = replyNexts_of s'" by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs split: kernel_object.splits) @@ -1246,14 +1246,14 @@ lemma replyNexts_of_replyNext_same_update: "\s'. \typ_at' ReplyT ptr s'; ksPSpace s' ptr = Some ko; koTypeOf (injectKO (ob':: 'a :: pspace_storable)) = ReplyT; projectKO_opt ko = Some ab; replyNext_same (ob':: 'a) ab\ - \ replyNexts_of (s'\ksPSpace := ksPSpace s'(ptr \ injectKO ob')\) = replyNexts_of s'" + \ replyNexts_of (s'\ksPSpace := (ksPSpace s')(ptr \ injectKO ob')\) = replyNexts_of s'" apply (cases "injectKO ob'"; clarsimp simp: typ_at'_def ko_wp_at'_def) by (cases ko; fastforce simp add: replyNext_same_def project_inject projectKO_opts_defs opt_map_def) lemma replyPrevs_of_non_reply_update: "\s'. \typ_at' (koTypeOf ko) ptr s'; koTypeOf ko \ ReplyT \ - \ replyPrevs_of (s'\ksPSpace := ksPSpace s'(ptr \ ko)\) = replyPrevs_of s'" + \ replyPrevs_of (s'\ksPSpace := (ksPSpace s')(ptr \ ko)\) = replyPrevs_of s'" by (fastforce simp: typ_at'_def ko_wp_at'_def opt_map_def projectKO_opts_defs split: kernel_object.splits) @@ -1267,7 +1267,7 @@ lemma replyPrevs_of_replyPrev_same_update: "\s'. \typ_at' ReplyT ptr s'; ksPSpace s' ptr = Some ko; koTypeOf (injectKO (ob':: 'a :: pspace_storable)) = ReplyT; projectKO_opt ko = Some ab; replyPrev_same (ob':: 'a) ab\ - \ replyPrevs_of (s'\ksPSpace := ksPSpace s'(ptr \ injectKO ob')\) = replyPrevs_of s'" + \ replyPrevs_of (s'\ksPSpace := (ksPSpace s')(ptr \ injectKO ob')\) = replyPrevs_of s'" apply (cases "injectKO ob'"; clarsimp simp: typ_at'_def ko_wp_at'_def) by (cases ko; fastforce simp add: replyPrev_same_def project_inject projectKO_opts_defs opt_map_def) @@ -1347,8 +1347,8 @@ lemma setEndpoint_corres: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres: @@ -1356,8 +1356,8 @@ lemma setNotification_corres: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma reply_at'_cross: @@ -3028,21 +3028,21 @@ lemmas valid_globals_cte_wpD'_idleSC = valid_globals_cte_wpD'[OF _ _ idle_sc_is_ lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -3050,7 +3050,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp @@ -3150,14 +3150,14 @@ lemma obj_relation_cuts_obj_bits: lemma typ_at'_same_type: assumes "typ_at' T p s" "koTypeOf k = koTypeOf ko" "objBitsKO k = objBitsKO ko" "ksPSpace s p' = Some ko" - shows "typ_at' T p (s\ksPSpace :=ksPSpace s(p' \ k)\)" + shows "typ_at' T p (s\ksPSpace :=(ksPSpace s)(p' \ k)\)" using assms by (clarsimp simp: typ_at'_def ko_wp_at'_def ps_clear_upd) lemma cte_at'_same_type: "\cte_wp_at' \ t s; koTypeOf k = koTypeOf ko;objBitsKO k = objBitsKO ko; ksPSpace s p = Some ko\ - \ cte_wp_at' \ t (s\ksPSpace := ksPSpace s(p \ k)\)" + \ cte_wp_at' \ t (s\ksPSpace := (ksPSpace s)(p \ k)\)" apply (simp add: cte_at_typ' typ_at'_same_type) apply (elim exE disjE) apply (rule disjI1, clarsimp simp: typ_at'_same_type) @@ -3166,7 +3166,7 @@ lemma cte_at'_same_type: lemma valid_ep'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOEndpoint obj) \ - \ valid_ep' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_ep' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (erule (1) valid_objsE') apply (fastforce simp: valid_objs'_def valid_obj'_def obj_at'_def projectKOs valid_ep'_def split: endpoint.splits) @@ -3174,7 +3174,7 @@ lemma valid_ep'_ep_update: lemma valid_cap'_ep_update: "\ valid_cap' cap s; valid_objs' s; valid_ep' ep s; ep_at' epPtr s \ - \ valid_cap' cap (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_cap' cap (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" supply ps_clear_upd[simp] apply (clarsimp simp: typ_at'_same_type ko_wp_at'_def cte_at'_same_type valid_cap'_def obj_at'_def projectKOs objBits_simps @@ -3198,7 +3198,7 @@ lemma valid_cap'_ep_update: lemma valid_cap'_reply_update: "\ valid_cap' cap s; valid_objs' s; valid_reply' reply s; reply_at' rptr s \ - \ valid_cap' cap (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_cap' cap (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" supply ps_clear_upd[simp] apply (clarsimp simp: typ_at'_same_type ko_wp_at'_def cte_at'_same_type valid_cap'_def obj_at'_def projectKOs objBits_simps @@ -3220,7 +3220,7 @@ lemma valid_cap'_reply_update: lemma valid_tcb_state'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb_state' (tcbState obj) (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_tcb_state' (tcbState obj) (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_tcb_state'_def valid_bound_obj'_def valid_tcb'_def obj_at'_def @@ -3228,7 +3228,7 @@ lemma valid_tcb_state'_ep_update: lemma valid_tcb_state'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb_state' (tcbState obj) (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_tcb_state' (tcbState obj) (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_bound_obj'_def valid_tcb'_def valid_tcb_state'_def obj_at'_def @@ -3236,7 +3236,7 @@ lemma valid_tcb_state'_reply_update: lemma valid_tcb'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some (KOTCB obj) \ - \ valid_tcb' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_tcb' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (fastforce simp: typ_at'_same_type ps_clear_upd objBits_simps valid_obj'_def projectKOs valid_bound_obj'_def valid_tcb'_def obj_at'_def valid_tcb_state'_ep_update @@ -3247,7 +3247,7 @@ end lemma valid_obj'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s; ksPSpace s x = Some obj\ - \ valid_obj' obj (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_obj' obj (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (rule valid_objsE', simp, simp) by (cases obj; clarsimp simp: typ_at'_same_type valid_obj'_def obj_at'_def ps_clear_upd @@ -3259,7 +3259,7 @@ lemma valid_obj'_ep_update: lemma valid_obj'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s; ksPSpace s x = Some obj \ - \ valid_obj' obj (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_obj' obj (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (rule valid_objsE', simp, simp) apply (cases obj; clarsimp simp: valid_obj'_def) apply (fastforce simp: valid_ep'_def obj_at'_def projectKOs split: endpoint.split) @@ -3278,7 +3278,7 @@ lemma valid_obj'_reply_update: lemma valid_objs'_ep_update: "\ valid_objs' s; valid_ep' ep s; ep_at' epPtr s \ - \ valid_objs' (s\ksPSpace := ksPSpace s(epPtr \ KOEndpoint ep)\)" + \ valid_objs' (s\ksPSpace := (ksPSpace s)(epPtr \ KOEndpoint ep)\)" apply (clarsimp simp: valid_objs'_def obj_at'_def projectKOs) apply (erule ranE) apply (clarsimp simp: ps_clear_upd split: if_split_asm) @@ -3290,7 +3290,7 @@ lemma valid_objs'_ep_update: lemma valid_objs'_reply_update: "\ valid_objs' s; valid_reply' reply s; reply_at' rptr s \ - \ valid_objs' (s\ksPSpace := ksPSpace s(rptr \ KOReply reply)\)" + \ valid_objs' (s\ksPSpace := (ksPSpace s)(rptr \ KOReply reply)\)" apply (clarsimp simp: valid_objs'_def obj_at'_def projectKOs) apply (erule ranE) apply (clarsimp split: if_split_asm) @@ -3304,14 +3304,14 @@ lemma valid_release_queue_ksPSpace_update: "\valid_release_queue s; ko_wp_at' (\ko'. koTypeOf ko' = koTypeOf ko \ objBitsKO ko' = objBitsKO ko) ptr s; koTypeOf ko \ TCBT\ \ - valid_release_queue (s\ksPSpace := ksPSpace s(ptr \ ko)\)" + valid_release_queue (s\ksPSpace := (ksPSpace s)(ptr \ ko)\)" by (fastforce simp: valid_release_queue_def ko_wp_at'_def obj_at'_def projectKOs ps_clear_upd) lemma valid_release_queue'_ksPSpace_update: "\valid_release_queue' s; ko_wp_at' (\ko'. koTypeOf ko' = koTypeOf ko \ objBitsKO ko' = objBitsKO ko) ptr s; koTypeOf ko \ TCBT\ \ - valid_release_queue' (s\ksPSpace := ksPSpace s(ptr \ ko)\)" + valid_release_queue' (s\ksPSpace := (ksPSpace s)(ptr \ ko)\)" by (fastforce simp: valid_release_queue'_def ko_wp_at'_def obj_at'_def projectKOs ps_clear_upd) lemma sym_ref_Receive_or_Reply_replyTCB': @@ -4259,7 +4259,7 @@ lemma state_relation_sc_update: | _ \ hp' ptr else hp' p) (ksPSpace s)) = map_to_ctes (ksPSpace s)" by (clarsimp simp: obj_at_simps fun_upd_def[symmetric]) have z: "\s sc'::sched_context. ko_at' sc' ptr s - \ map_to_ctes (ksPSpace s(ptr \ KOSchedContext (f' sc'))) = map_to_ctes (ksPSpace s)" + \ map_to_ctes ((ksPSpace s)(ptr \ KOSchedContext (f' sc'))) = map_to_ctes (ksPSpace s)" by (clarsimp simp: obj_at_simps) assume H: "(s, s') \ state_relation" "P s" "P' s'" "sc_at ptr s" "sc_at' ptr s'" show ?thesis @@ -4327,7 +4327,7 @@ qed (* update wp rules without ko_at' *) lemma updateSchedContext_wp: "\ \s. sc_at' sc_ptr s \ - Q (s\ksPSpace := ksPSpace s(sc_ptr \ KOSchedContext (f' (the (scs_of' s sc_ptr))))\) \ + Q (s\ksPSpace := (ksPSpace s)(sc_ptr \ KOSchedContext (f' (the (scs_of' s sc_ptr))))\) \ updateSchedContext sc_ptr f' \ \rv. Q \" by (wpsimp simp: updateSchedContext_def wp: set_sc'.set_wp) @@ -4453,18 +4453,18 @@ lemma getSchedContext_setSchedContext_decompose: apply simp+ apply (rename_tac s'; erule disjE; clarsimp?) - apply (drule_tac Q2="\s'. s' = (s\ksPSpace := ksPSpace s(scPtr \ injectKO (f sc))\)" + apply (drule_tac Q2="\s'. s' = (s\ksPSpace := (ksPSpace s)(scPtr \ injectKO (f sc))\)" in use_valid[OF _ setObject_sc_wp]) apply simp+ - apply (prop_tac "sc_at' scPtr (s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\)") + apply (prop_tac "sc_at' scPtr (s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\)") apply (clarsimp simp: obj_at'_def projectKOs objBits_simps' ps_clear_upd) - apply (frule_tac s="s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\" + apply (frule_tac s="s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\" in no_failD[OF no_fail_getMiscObject(4)]) apply clarsimp apply (rename_tac s') - apply (drule_tac Q2="\s'. s' = (s\ksPSpace := ksPSpace s(scPtr \ injectKO (f sc))\)" + apply (drule_tac Q2="\s'. s' = (s\ksPSpace := (ksPSpace s)(scPtr \ injectKO (f sc))\)" in use_valid[OF _ setObject_sc_wp]) apply simp+ @@ -4472,7 +4472,7 @@ lemma getSchedContext_setSchedContext_decompose: apply (drule use_valid[OF _ get_sc_ko'], simp) apply (clarsimp simp: obj_at'_def projectKOs) apply (prop_tac "obj_at' (\k. objBits k = objBits (g (f sc))) scPtr - (s\ksPSpace := ksPSpace s(scPtr \ KOSchedContext (f sc))\)") + (s\ksPSpace := (ksPSpace s)(scPtr \ KOSchedContext (f sc))\)") apply (clarsimp simp: obj_at'_def projectKOs projectKO_opt_sc) apply (rule_tac x="f sc" in exI, clarsimp simp: projectKO_opt_sc) apply (drule_tac ob1="g (f sc)" in no_failD[OF no_fail_setObject_other, rotated]) @@ -4554,7 +4554,7 @@ lemma updateSchedContext_corres_gen: apply (rule conjI, clarsimp) apply (erule use_valid[OF _ updateSchedContext_wp]) apply clarsimp - apply (rule_tac x="((), s\kheap := kheap s(ptr \ + apply (rule_tac x="((), s\kheap := (kheap s)(ptr \ kernel_object.SchedContext (f sc) n)\)" in bexI) apply clarsimp apply (drule state_relation_sc_update[OF R1 R2 sz, simplified]) diff --git a/proof/refine/RISCV64/LevityCatch.thy b/proof/refine/RISCV64/LevityCatch.thy index 4a95bc2a6d..9ed1c7eda7 100644 --- a/proof/refine/RISCV64/LevityCatch.thy +++ b/proof/refine/RISCV64/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) diff --git a/proof/refine/RISCV64/Refine.thy b/proof/refine/RISCV64/Refine.thy index 7535f14a80..5ffd4b6237 100644 --- a/proof/refine/RISCV64/Refine.thy +++ b/proof/refine/RISCV64/Refine.thy @@ -230,8 +230,8 @@ lemma kernel_entry_invs_det_ext: kernel_entry e us \\_ s :: det_state. invs s \ (ct_running s \ ct_idle s)\" apply (simp add: kernel_entry_def) - apply (wp akernel_invs_det_ext thread_set_invs_trivial thread_set_ct_in_state select_wp - static_imp_wp hoare_vcg_disj_lift hoare_vcg_imp_lift' + apply (wp akernel_invs_det_ext thread_set_invs_trivial thread_set_ct_in_state + hoare_weak_lift_imp hoare_vcg_disj_lift hoare_vcg_imp_lift' | clarsimp simp add: tcb_cap_cases_def)+ done @@ -246,7 +246,7 @@ lemma kernel_entry_valid_sched: \\_. valid_sched :: det_state \ _\" apply (simp add: kernel_entry_def) apply (wp call_kernel_valid_sched thread_set_invs_trivial thread_set_ct_in_state - static_imp_wp hoare_vcg_disj_lift thread_set_not_state_valid_sched + hoare_weak_lift_imp hoare_vcg_disj_lift thread_set_not_state_valid_sched | clarsimp simp add: tcb_cap_cases_def)+ done @@ -280,7 +280,7 @@ lemma kernel_entry_invs: apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state - static_imp_wp call_kernel_cur_sc_offset_ready_and_sufficient + hoare_weak_lift_imp call_kernel_cur_sc_offset_ready_and_sufficient | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (wpsimp wp: kernel_entry_valid_sched) @@ -293,20 +293,20 @@ lemma kernel_entry_invs: apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift - ct_in_state_thread_state_lift thread_set_no_change_tcb_state static_imp_wp + ct_in_state_thread_state_lift thread_set_no_change_tcb_state hoare_weak_lift_imp call_kernel_schact_is_rct[unfolded schact_is_rct_def] | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift - ct_in_state_thread_state_lift thread_set_no_change_tcb_state static_imp_wp + ct_in_state_thread_state_lift thread_set_no_change_tcb_state hoare_weak_lift_imp call_kernel_cur_sc_active | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) apply (wp thread_set_invs_trivial thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state - static_imp_wp call_kernel_ct_not_in_release_q + hoare_weak_lift_imp call_kernel_ct_not_in_release_q | clarsimp simp: tcb_cap_cases_def)+ apply (rule hoare_vcg_conj_lift_pre_fix) apply (clarsimp simp: kernel_entry_def) @@ -338,7 +338,6 @@ crunches do_user_op, check_active_irq and cur_sc_offset_ready[wp]: "\s. cur_sc_offset_ready (consumed_time s) s" and cur_sc_offset_sufficient[wp]: "\s. cur_sc_offset_sufficient (consumed_time s) s" and consumed_time_bounded[wp]: consumed_time_bounded - (wp: select_wp) lemma device_update_valid_machine_time[wp]: "do_machine_op (device_memory_update ds) \valid_machine_time\" @@ -355,7 +354,7 @@ lemma user_memory_update_valid_machine_time[wp]: lemma do_user_op_valid_machine_time[wp]: "do_user_op f tc \valid_machine_time\" apply (simp add: do_user_op_def) - apply (wpsimp wp: select_wp) + apply wpsimp done lemma check_active_irq_valid_machine_time[wp]: @@ -560,13 +559,13 @@ lemma kernelEntry_invs': apply (clarsimp simp: obj_at'_tcb_scs_of_equiv obj_at'_sc_tcbs_of_equiv sym_heap_def) apply (fastforce simp: ct_in_state'_def pred_tcb_at'_def obj_at'_def) apply (simp add: kernelEntry_def) - apply (wpsimp wp: ckernel_invs threadSet_invs_trivial - threadSet_ct_in_state' static_imp_wp hoare_vcg_disj_lift threadSet_sym_heap_tcbSCs + apply (wpsimp wp: ckernel_invs threadSet_invs_trivial threadSet_ct_in_state' + hoare_weak_lift_imp hoare_vcg_disj_lift threadSet_sym_heap_tcbSCs | wps)+ apply (rule hoare_vcg_conj_lift) apply (wpsimp wp: threadSet_wp) apply (wpsimp wp: threadSet_invs_trivial; simp?) - apply (wpsimp wp: threadSet_ct_running' static_imp_wp)+ + apply (wpsimp wp: threadSet_ct_running' hoare_weak_lift_imp)+ apply (fastforce simp: obj_at'_def pred_map_def opt_map_red) done @@ -624,7 +623,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' doMachineOp_ct_in_state' select_wp + apply (wp device_update_invs' doMachineOp_ct_in_state' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -1007,7 +1006,7 @@ lemma entry_corres: apply wpsimp apply (wp thread_set_invs_trivial threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state | (wps, wp threadSet_st_tcb_at2) )+ @@ -1172,7 +1171,7 @@ lemma domain_list_rel_eq: by (clarsimp simp: state_relation_def) crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' - (wp: crunch_wps select_wp) + (wp: crunch_wps) lemma ct_running_cross: "\(a,c) \ state_relation; ct_running a; pspace_aligned a; pspace_distinct a\ \ ct_running' c" diff --git a/proof/refine/RISCV64/Reply_R.thy b/proof/refine/RISCV64/Reply_R.thy index 8be0447d5c..81442c7557 100644 --- a/proof/refine/RISCV64/Reply_R.thy +++ b/proof/refine/RISCV64/Reply_R.thy @@ -860,7 +860,7 @@ lemma updateReply_obj_at'_inv: "\x. P (f x) = P x \ updateReply rPtr f \\s. Q (obj_at' (P :: reply \ bool) rp s)\" apply (wpsimp wp: updateReply_wp_all) - apply (subgoal_tac "obj_at' P rp s = (obj_at' P rp (s\ksPSpace := ksPSpace s(rPtr \ KOReply (f ko))\))") + apply (subgoal_tac "obj_at' P rp s = (obj_at' P rp (s\ksPSpace := (ksPSpace s)(rPtr \ KOReply (f ko))\))") apply simp by (force simp: obj_at'_real_def ko_wp_at'_def objBitsKO_def ps_clear_def) @@ -1037,7 +1037,7 @@ end lemma replyPrevs_of_replyNext_update: "ko_at' reply' rp s' \ - replyPrevs_of (s'\ksPSpace := ksPSpace s'(rp \ + replyPrevs_of (s'\ksPSpace := (ksPSpace s')(rp \ KOReply (reply' \ replyNext := v \))\) = replyPrevs_of s'" apply (clarsimp simp: obj_at'_def isNext_def split: option.split_asm reply_next.split_asm) @@ -1045,7 +1045,7 @@ lemma replyPrevs_of_replyNext_update: lemma scs_of'_reply_update: "reply_at' rp s' \ - scs_of' (s'\ksPSpace := ksPSpace s'(rp \ KOReply reply)\) = scs_of' s'" + scs_of' (s'\ksPSpace := (ksPSpace s')(rp \ KOReply reply)\) = scs_of' s'" apply (clarsimp simp: obj_at'_def isNext_def split: option.split_asm reply_next.split_asm) by (fastforce simp: projectKO_opt_sc opt_map_def) diff --git a/proof/refine/RISCV64/Retype_R.thy b/proof/refine/RISCV64/Retype_R.thy index 1a5ec73513..16a3f41d3b 100644 --- a/proof/refine/RISCV64/Retype_R.thy +++ b/proof/refine/RISCV64/Retype_R.thy @@ -2544,7 +2544,6 @@ lemmas object_splits = declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -4627,7 +4626,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def st_tcb_at'_def) done qed diff --git a/proof/refine/RISCV64/SchedContextInv_R.thy b/proof/refine/RISCV64/SchedContextInv_R.thy index de1a30c8b2..9aa1fc6ec6 100644 --- a/proof/refine/RISCV64/SchedContextInv_R.thy +++ b/proof/refine/RISCV64/SchedContextInv_R.thy @@ -174,25 +174,25 @@ lemma decodeSchedcontext_Bind_corres: apply (rename_tac cap list) apply (cases excaps'; clarsimp) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (case_tac cap; clarsimp) apply (clarsimp simp: bindE_assoc) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]; (solves wpsimp)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def ntfn_relation_def valid_cap_def valid_cap'_def wp: hoare_vcg_all_lift) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def sc_relation_def) apply (clarsimp simp: returnOk_def) apply (clarsimp simp: bindE_assoc get_tcb_obj_ref_def) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) apply (rule corres_guard_imp) @@ -203,7 +203,7 @@ lemma decodeSchedcontext_Bind_corres: apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) apply (rule corres_splitEE_skip; (solves \wpsimp simp: valid_cap'_def obj_at'_def\)?) - apply (corressimp corres: getNotification_corres + apply (corresKsimp corres: getNotification_corres simp: get_sk_obj_ref_def sc_relation_def) apply (rule corres_guard_imp) apply (rule corres_split_eqrE) @@ -237,25 +237,25 @@ lemma decodeSchedContext_UnbindObject_corres: apply (case_tac cap; clarsimp) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) - apply corressimp + apply corresKsimp done lemma decodeSchedContext_YieldTo_corres: @@ -268,19 +268,19 @@ lemma decodeSchedContext_YieldTo_corres: apply (clarsimp simp: decode_sched_context_yield_to_def decodeSchedContext_YieldTo_def) apply (clarsimp simp: bindE_assoc get_sc_obj_ref_def liftE_bind_return_bindE_returnOk) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply (corressimp simp: sc_relation_def) + apply (corresKsimp simp: sc_relation_def) apply (clarsimp simp: sc_relation_def) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) @@ -302,7 +302,7 @@ lemma decodeSchedContext_YieldTo_corres: apply (fastforce simp: cur_tcb'_def) apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) apply (rule corres_splitEE_forwards'[where r'="(=)"]) apply (subst corres_liftE_rel_sum) apply (rule corres_guard_imp) @@ -312,7 +312,7 @@ lemma decodeSchedContext_YieldTo_corres: apply fastforce apply (rule liftE_validE[THEN iffD2, OF thread_get_sp]) apply (rule liftE_validE[THEN iffD2, OF threadGet_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) done lemma decode_sc_inv_corres: @@ -330,20 +330,20 @@ lemma decode_sc_inv_corres: ; clarsimp split: gen_invocation_labels.split list.splits split del: if_split) apply (clarsimp simp: returnOk_def) - apply (corressimp corres: decodeSchedcontext_Bind_corres) + apply (corresKsimp corres: decodeSchedcontext_Bind_corres) defer - apply (corressimp corres: decodeSchedContext_UnbindObject_corres) - apply (corressimp corres: decodeSchedContext_YieldTo_corres) + apply (corresKsimp corres: decodeSchedContext_UnbindObject_corres) + apply (corresKsimp corres: decodeSchedContext_YieldTo_corres) apply (rule corres_splitEE_forwards') - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: sc_at'_cross_rel[unfolded cross_rel_def, rule_format]) apply (rule liftE_validE[THEN iffD2, OF get_sched_context_sp]) apply (rule liftE_validE[THEN iffD2, OF get_sc_sp']) apply (rule corres_splitEE_forwards') - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule liftE_validE[THEN iffD2, OF gets_sp]) apply (rule liftE_validE[THEN iffD2, OF getCurThread_sp]) - apply (rule corres_splitEE_skip; corressimp; fastforce?) + apply (rule corres_splitEE_skip; corresKsimp; fastforce?) apply (clarsimp simp: sc_relation_def) done @@ -359,12 +359,12 @@ lemma decode_sc_ctrl_inv_corres: apply (rename_tac cap list) apply (cases excaps'; clarsimp) apply (rule corres_splitEE_skip; (solves wpsimp)?) - apply corressimp + apply corresKsimp apply (rule corres_splitEE_forwards') - apply corressimp + apply corresKsimp apply (case_tac cap; clarsimp simp: isSchedContextCap_def) apply (rule whenE_throwError_sp[simplified validE_R_def])+ - apply corressimp + apply corresKsimp apply (auto simp: minBudgetUs_def MIN_BUDGET_US_def maxPeriodUs_def parse_time_arg_def parseTimeArg_def usToTicks_def minRefills_def MIN_REFILLS_def max_num_refills_eq_refillAbsoluteMax' refillAbsoluteMax_def max_refills_cap_def @@ -1296,7 +1296,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (wpsimp wp: setSchedContext_invs') apply (fastforce dest!: sc_ko_at_valid_objs_valid_sc') - apply (corressimp corres: updateSchedContext_corres) + apply (corresKsimp corres: updateSchedContext_corres) apply (intro conjI impI allI) apply (rename_tac abs_state conc_state n') apply (frule_tac ptr=sc_ptr and s=abs_state in state_relation_sc_relation; simp?) @@ -1328,7 +1328,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (wpsimp wp: setSchedContext_invs') apply (fastforce dest!: sc_ko_at_valid_objs_valid_sc') - apply (corressimp corres: updateSchedContext_corres) + apply (corresKsimp corres: updateSchedContext_corres) apply (intro conjI impI allI) apply (rename_tac abs_state conc_state n') apply (frule_tac ptr=sc_ptr and s=abs_state in state_relation_sc_relation; simp?) @@ -1381,7 +1381,7 @@ lemma invokeSchedControlConfigureFlags_corres: in corres_underlying_split) apply (clarsimp simp: when_def split del: if_split) - apply (rule corres_if_split; (solves \corressimp simp: sc_relation_def\)?) + apply (rule corres_if_split; (solves \corresKsimp simp: sc_relation_def\)?) apply (rule corres_symb_exec_l[rotated]) apply (wpsimp wp: exs_valid_assert_opt) apply (rule assert_opt_sp) @@ -1500,7 +1500,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (rule corres_symb_exec_l[rotated 2, OF assert_opt_sp]; (solves wpsimp)?) apply (rule corres_underlying_split[rotated 2, OF gts_sp isRunnable_sp]) - apply (corressimp corres: isRunnable_corres') + apply (corresKsimp corres: isRunnable_corres') apply (fastforce simp: sc_relation_def sc_at_pred_n_def obj_at_def intro!: tcb_at_cross Some_to_the) @@ -1601,7 +1601,7 @@ lemma invokeSchedControlConfigureFlags_corres: apply (clarsimp simp: sc_relation_def) apply (rule corres_underlying_split[rotated 2, OF gts_sp isRunnable_sp]) - apply (corressimp corres: isRunnable_corres') + apply (corresKsimp corres: isRunnable_corres') apply (fastforce simp: sc_relation_def sc_at_pred_n_def obj_at_def intro!: tcb_at_cross Some_to_the) diff --git a/proof/refine/RISCV64/SchedContext_R.thy b/proof/refine/RISCV64/SchedContext_R.thy index 1180a98253..f292c13077 100644 --- a/proof/refine/RISCV64/SchedContext_R.thy +++ b/proof/refine/RISCV64/SchedContext_R.thy @@ -171,7 +171,7 @@ lemma schedContextUpdateConsumed_corres: apply (clarsimp simp: sched_context_update_consumed_def schedContextUpdateConsumed_def) apply (simp add: maxTicksToUs_def ticksToUs_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac abs_sc conc_sc) apply (rule corres_if_split) apply (clarsimp simp: sc_relation_def) @@ -837,7 +837,7 @@ lemma get_sc_released_corres: (get_sc_released sc_ptr) (scReleased sc_ptr)" apply (simp add: get_sc_released_def scReleased_def scActive_def refillReady_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc') apply (rule corres_symb_exec_l[rotated 2, OF gets_sp]; (solves wpsimp)?) apply (rule corres_symb_exec_r[rotated, OF gets_the_sp]; (solves wpsimp)?) diff --git a/proof/refine/RISCV64/Schedule_R.thy b/proof/refine/RISCV64/Schedule_R.thy index b706c1475f..0f1d0ef6e9 100644 --- a/proof/refine/RISCV64/Schedule_R.thy +++ b/proof/refine/RISCV64/Schedule_R.thy @@ -14,7 +14,7 @@ crunches scReleased, getReprogramTimer, getCurTime, getRefills, getReleaseQueue, context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] @@ -138,7 +138,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -541,7 +541,7 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done @@ -800,7 +800,7 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def RISCV64_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb valid_arch_state_asid_table valid_arch_state_global_arch_objs) done @@ -1355,7 +1355,7 @@ lemma switchToIdleThread_invs': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -3250,7 +3250,7 @@ lemma tcbReleaseDequeue_corres: apply (rename_tac rq) apply (simp add: bind_assoc) apply (rule corres_underlying_split[rotated 2, OF gets_sp getReleaseQueue_sp]) - apply (corressimp corres: getReleaseQueue_corres) + apply (corresKsimp corres: getReleaseQueue_corres) apply clarsimp apply (rename_tac rq') @@ -3336,13 +3336,13 @@ lemma awakenBody_corres: apply (wpsimp simp: tcb_release_dequeue_def) apply (force simp: valid_release_q_def vs_all_heap_simps obj_at_def is_tcb_def) apply wpsimp - apply (corressimp corres: tcbReleaseDequeue_corres) + apply (corresKsimp corres: tcbReleaseDequeue_corres) apply (rule corres_symb_exec_r[OF _ isRunnable_sp, rotated]; (solves wpsimp)?) apply (rule corres_symb_exec_r[OF _ assert_sp, rotated]; (solves wpsimp)?) apply wpsimp apply (clarsimp simp: st_tcb_at'_def obj_at'_def objBitsKO_def) apply (case_tac "tcbState tcb'"; clarsimp) - apply (corressimp corres: possibleSwitchTo_corres) + apply (corresKsimp corres: possibleSwitchTo_corres) done lemma tcbReleaseDequeue_no_fail: @@ -3559,7 +3559,7 @@ lemma isRoundRobin_corres: "corres (=) (sc_at sc_ptr) (sc_at' sc_ptr) (is_round_robin sc_ptr) (isRoundRobin sc_ptr)" apply (clarsimp simp: is_round_robin_def isRoundRobin_def) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: sc_relation_def) done @@ -3657,7 +3657,7 @@ lemma refillHeadOverlapping_corres_eq: by linarith+ lemma refillPopHead_scs_of'[wp]: - "\\s'. P (scs_of' s'(scp \ (\sc'. scRefillCount_update (\_. scRefillCount sc' - Suc 0) + "\\s'. P ((scs_of' s')(scp \ (\sc'. scRefillCount_update (\_. scRefillCount sc' - Suc 0) (scRefillHead_update (\_. refillNextIndex (scRefillHead sc') sc') sc')) (the (scs_of' s' scp))))\ @@ -3672,7 +3672,7 @@ crunches update_refill_hd, refill_pop_head, merge_refills, schedule_used, handle simp: crunch_simps update_refill_hd_rewrite update_sched_context_set_refills_rewrite) lemma merge_refills_scs_of2[wp]: - "\\s. P (scs_of2 s(scp \ (\sc. sc_refills_update + "\\s. P ((scs_of2 s)(scp \ (\sc. sc_refills_update (\_. merge_refill (refill_hd sc) (hd (tl (sc_refills sc))) # tl (tl (sc_refills sc))) sc) (the (scs_of2 s scp)))) \ merge_refills scp @@ -3949,15 +3949,15 @@ lemma maybeAddEmptyTail_corres: apply (fastforce dest!: sc_at'_cross[OF state_relation_pspace_relation]) apply (clarsimp simp: maybe_add_empty_tail_def maybeAddEmptyTail_def get_refills_def) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) + apply (corresKsimp corres: isRoundRobin_corres) apply (clarsimp simp: obj_at_def is_sc_obj) apply (clarsimp simp: when_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (fastforce intro: valid_objs_valid_sched_context_size simp: obj_at_def is_sc_obj_def) apply (rename_tac sc') - apply (corressimp corres: refillAddTail_corres) + apply (corresKsimp corres: refillAddTail_corres) apply (frule refill_hd_relation; clarsimp simp: obj_at'_def opt_map_red opt_pred_def) apply (fastforce dest: valid_objs_valid_sched_context_size simp: obj_at_def is_sc_obj_def refill_map_def) @@ -3991,7 +3991,7 @@ lemma refillBudgetCheckRoundRobin_corres: apply (subst is_active_sc_rewrite) apply (clarsimp simp: refill_budget_check_round_robin_def refillBudgetCheckRoundRobin_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule_tac Q="\s. is_active_sc' (ksCurSc s) s" in corres_cross_add_guard) apply (rule_tac ptr="ksCurSc s'" in is_active_sc'_cross[OF state_relation_pspace_relation]; simp) apply clarsimp @@ -4111,7 +4111,7 @@ lemma nonOverlappingMergeRefills_corres: and Q'="\_. valid_refills' scPtr and sc_at' scPtr" in corres_underlying_split ; (solves wpsimp)?) - apply (corressimp corres: refillPopHead_corres + apply (corresKsimp corres: refillPopHead_corres simp: obj_at_def vs_all_heap_simps pred_map_simps sc_at_ppred_def) apply (subst update_refill_hd_comp) apply (rule corres_guard_imp) @@ -4250,15 +4250,13 @@ lemma headInsufficientLoop_corres: apply (rule_tac Q="active_sc_at' scPtr" in corres_cross_add_guard) apply (fastforce dest: active_sc_at'_cross) apply (rule corres_whileLoop_abs; simp?) - apply (frule head_insufficient_equiv[where scPtr=scPtr]; simp?) - apply (fastforce intro: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills) - apply (corressimp corres: nonOverlappingMergeRefills_corres) - apply (fastforce dest: head_insufficient_length_at_least_two) - apply (wpsimp wp: non_overlapping_merge_refills_no_fail) - apply (fastforce intro!: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills sc_atD1 - simp: obj_at_def) - apply (wpsimp wp: non_overlapping_merge_refills_refills_unat_sum_lower_bound - non_overlapping_merge_refills_refills_unat_sum) + apply (frule head_insufficient_equiv[where scPtr=scPtr]; simp?) + apply (fastforce intro: refills_unat_sum_MIN_BUDGET_implies_non_empty_refills) + apply (corresKsimp corres: nonOverlappingMergeRefills_corres) + apply (fastforce dest: head_insufficient_length_at_least_two) + apply (wpsimp wp: non_overlapping_merge_refills_no_fail) + apply (wpsimp wp: non_overlapping_merge_refills_refills_unat_sum_lower_bound + non_overlapping_merge_refills_refills_unat_sum) apply (fastforce dest: head_insufficient_length_greater_than_one) apply (wpsimp wp: nonOverlappingMergeRefills_valid_objs') apply (fastforce intro!: non_overlapping_merge_refills_terminates) @@ -4286,8 +4284,8 @@ lemma refillFull_corres: apply (fastforce intro: sc_at_cross) apply (clarsimp simp: refill_full_def refillFull_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) - apply (corressimp corres: corres_return_eq_same) + apply (corresKsimp corres: get_sc_corres) + apply (corresKsimp corres: corres_return_eq_same) apply (fastforce simp: sc_relation_def obj_at_simps valid_refills'_def opt_map_red opt_pred_def) done @@ -4303,7 +4301,7 @@ lemma scheduleUsed_corres: apply (rule_tac Q="is_active_sc' scPtr" in corres_cross_add_guard) apply (fastforce intro: is_active_sc'_cross) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc sc') apply (rule corres_symb_exec_r[rotated, OF assert_sp]; (solves wpsimp)?) apply wpsimp @@ -4314,21 +4312,21 @@ lemma scheduleUsed_corres: apply (fastforce dest: length_sc_refills_cross[where P="\l. 0 = l"] simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; (solves simp)?) - apply (corressimp corres: refillAddTail_corres simp: refill_map_def) + apply (corresKsimp corres: refillAddTail_corres simp: refill_map_def) apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule_tac F="sc_valid_refills' sc'" in corres_req) apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; (solves simp)?) apply (fastforce dest: refills_tl_equal simp: refill_map_def can_merge_refill_def) - apply (corressimp corres: updateRefillTl_corres + apply (corresKsimp corres: updateRefillTl_corres simp: refill_map_def) apply (rule corres_underlying_split[rotated 2, OF refill_full_sp refillFull_sp]) - apply (corressimp corres: refillFull_corres) + apply (corresKsimp corres: refillFull_corres) apply (rule corres_if_split; (solves simp)?) - apply (corressimp corres: refillAddTail_corres) + apply (corresKsimp corres: refillAddTail_corres) apply (clarsimp simp: refill_map_def obj_at_simps opt_map_red opt_pred_def) - apply (corressimp corres: updateRefillTl_corres simp: refill_map_def) + apply (corresKsimp corres: updateRefillTl_corres simp: refill_map_def) done lemma head_time_buffer_simp: @@ -4403,18 +4401,18 @@ lemma handleOverrunLoopBody_corres: apply (fastforce intro: is_active_sc'_cross simp: state_relation_def) apply (clarsimp simp: handle_overrun_loop_body_def handleOverrunLoopBody_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_underlying_split[rotated 2, OF refill_single_sp refillSingle_sp]) - apply (corressimp corres: refillSingle_corres) + apply (corresKsimp corres: refillSingle_corres) apply (fastforce simp: obj_at_simps valid_refills'_def opt_map_red opt_pred_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rename_tac sc sc') apply (rule_tac Q="\_ s. sc_refills sc \ []" and Q'="\_ _. sc_valid_refills' sc'" and r'=dc in corres_underlying_split[rotated]) - apply corressimp + apply corresKsimp apply (fastforce dest: refill_hd_relation simp: refill_map_def) apply (wpsimp simp: update_refill_hd_def wp: update_sched_context_wp) @@ -4422,7 +4420,7 @@ lemma handleOverrunLoopBody_corres: apply wpsimp apply (clarsimp simp: valid_refills'_def obj_at_simps opt_map_red opt_pred_def) apply (rule corres_if_split; simp?) - apply (corressimp corres: updateRefillHd_corres) + apply (corresKsimp corres: updateRefillHd_corres) apply (fastforce simp: refill_map_def sc_relation_def) apply (rule_tac F="1 < scRefillCount sc'" in corres_req) apply (frule_tac scp="scPtr" and P="\l. 1 < l" in length_sc_refills_cross) @@ -4592,11 +4590,9 @@ lemma handleOverrunLoop_corres: apply (fastforce intro: is_active_sc'_cross simp: state_relation_def) apply (clarsimp simp: handle_overrun_loop_def handleOverrunLoop_def runReaderT_def) apply (rule corres_whileLoop_abs; simp?) - apply (frule_tac usage=r' in head_time_buffer_equiv; simp?) - apply fastforce - apply (corressimp corres: handleOverrunLoopBody_corres) - apply (wpsimp wp: handle_overrun_loop_body_no_fail) - apply (clarsimp simp: vs_all_heap_simps) + apply (frule_tac usage=r' in head_time_buffer_equiv; simp?) + apply fastforce + apply (corresKsimp corres: handleOverrunLoopBody_corres) apply (wps_conj_solves wp: handle_overrun_loop_body_non_zero_refills handle_overrun_loop_body_refills_unat_sum_equals_budget) apply wps_conj_solves @@ -4637,13 +4633,13 @@ lemma refillBudgetCheck_corres: apply (clarsimp simp: refill_budget_check_def refillBudgetCheck_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_symb_exec_r[rotated, OF scActive_sp]; (solves \wpsimp simp: scActive_def\)?) apply (rule corres_symb_exec_r[rotated, OF assert_sp]; (solves wpsimp)?) apply (wpsimp wp: no_fail_assert simp: is_active_sc'_def opt_map_red opt_pred_def obj_at_simps) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) + apply (corresKsimp corres: isRoundRobin_corres) apply (rule corres_symb_exec_l[rotated, OF _ assert_sp]; (solves wpsimp)?) apply (rule_tac F="\roundRobin" in corres_req) apply clarsimp @@ -4664,7 +4660,7 @@ lemma refillBudgetCheck_corres: (sc_refill_cfgs_of s) sc_ptr" and Q'="\_ s'. valid_refills' scPtr s' \ active_sc_at' scPtr s' \ scPtr = ksCurSc s'" in corres_underlying_split) - apply (corressimp corres: handleOverrunLoop_corres) + apply (corresKsimp corres: handleOverrunLoop_corres) apply (fastforce intro: valid_refills_refills_unat_sum_equals_budget simp: vs_all_heap_simps cfg_valid_refills_def round_robin_def sp_valid_refills_def is_active_sc_rewrite[symmetric]) @@ -4687,7 +4683,7 @@ lemma refillBudgetCheck_corres: apply (clarsimp simp: get_refills_def) apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres + apply (corresKsimp corres: get_sc_corres simp: state_relation_def active_sc_at'_def obj_at_simps) apply (rename_tac sc sc') apply (rule_tac Q="\_ s. ?P s @@ -4700,7 +4696,7 @@ lemma refillBudgetCheck_corres: and Q'="\_ s'. valid_refills' scPtr s' \ active_sc_at' scPtr s' \ scPtr = ksCurSc s'" and r'=dc in corres_underlying_split[rotated]) - apply (corressimp corres: headInsufficientLoop_corres) + apply (corresKsimp corres: headInsufficientLoop_corres) apply (fastforce simp: vs_all_heap_simps word_le_nat_alt) apply (intro hoare_vcg_conj_lift_pre_fix; (solves wpsimp)?) apply schedule_used_simple @@ -4801,10 +4797,10 @@ lemma commitTime_corres: apply (fastforce intro: sc_at_cross simp: state_relation_def) apply (clarsimp simp: commit_time_def commitTime_def liftM_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply clarsimp apply (rule corres_underlying_split[rotated 2, OF get_sched_context_sp get_sc_sp']) - apply (corressimp corres: get_sc_corres) + apply (corresKsimp corres: get_sc_corres) apply (rule corres_symb_exec_r[rotated, OF getIdleSC_sp]) apply wpsimp apply (wpsimp simp: getIdleSC_def) @@ -4820,7 +4816,7 @@ lemma commitTime_corres: apply (rule corres_if_split; fastforce?) apply (fastforce simp: sc_relation_def active_sc_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getConsumedTime_sp]) - apply corressimp + apply corresKsimp apply clarsimp apply (rename_tac consumed) apply (rule_tac Q="\_ s. sc_at (cur_sc s) s \ csc = cur_sc s" @@ -4851,8 +4847,8 @@ lemma commitTime_corres: apply (wpsimp simp: isRoundRobin_def | wps)+ apply (clarsimp simp: ifM_def split: if_split) apply (rule corres_underlying_split[rotated 2, OF is_round_robin_sp isRoundRobin_sp]) - apply (corressimp corres: isRoundRobin_corres) - apply (corressimp corres: refillBudgetCheckRoundRobin_corres refillBudgetCheck_corres) + apply (corresKsimp corres: isRoundRobin_corres) + apply (corresKsimp corres: refillBudgetCheckRoundRobin_corres refillBudgetCheck_corres) apply (fastforce simp: vs_all_heap_simps is_sc_obj_def obj_at_simps sc_relation_def is_active_sc'_def opt_map_red opt_pred_def active_sc_def) done @@ -4871,10 +4867,10 @@ lemma switchSchedContext_corres: apply add_cur_tcb' apply (clarsimp simp: switch_sched_context_def switchSchedContext_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (clarsimp, rename_tac curScPtr) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply corressimp + apply corresKsimp apply (clarsimp, rename_tac ct) apply (rule corres_underlying_split[rotated 2, OF gsc_sp threadGet_sp, where r'="(=)"]) apply (rule corres_guard_imp) @@ -4905,19 +4901,19 @@ lemma switchSchedContext_corres: (solves wpsimp)?) apply (clarsimp simp: when_def) apply (rule corres_split_skip; (solves \wpsimp wp: hoare_vcg_ex_lift\)?) - apply (corressimp corres: setReprogramTimer_corres) - apply (corressimp corres: ifCondRefillUnblockCheck_corres) + apply (corresKsimp corres: setReprogramTimer_corres) + apply (corresKsimp corres: ifCondRefillUnblockCheck_corres) apply (fastforce intro: valid_objs'_valid_refills' sc_at_cross is_active_sc'2_cross valid_sched_context_size_objsI simp: obj_at_def pred_tcb_at_def vs_all_heap_simps is_sc_obj_def opt_map_red opt_pred_def) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: getReprogramTimer_corres) + apply (corresKsimp corres: getReprogramTimer_corres) apply (rule_tac Q="\\" and Q'="\\" and r'=dc in corres_underlying_split; (solves wpsimp)?) - apply (corressimp corres: commitTime_corres) + apply (corresKsimp corres: commitTime_corres) apply (fastforce intro!: valid_objs'_valid_refills' sc_at_cross simp: state_relation_def) - apply (corressimp corres: setCurSc_corres) + apply (corresKsimp corres: setCurSc_corres) apply (wpsimp wp: hoare_vcg_imp_lift' | wps)+ apply (fastforce intro: valid_sched_context_size_objsI active_sc_valid_refillsE simp: obj_at_def is_sc_obj_def) @@ -5014,26 +5010,26 @@ lemma schedule_corres: apply (wpsimp wp: awaken_valid_sched hoare_vcg_imp_lift') apply fastforce apply (wpsimp wp: awaken_invs') - apply (corressimp corres: awaken_corres) + apply (corresKsimp corres: awaken_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def) apply (rule corres_split_skip) apply (wpsimp wp: hoare_vcg_imp_lift' cur_sc_active_lift) apply wpsimp - apply (corressimp corres: checkDomainTime_corres) + apply (corresKsimp corres: checkDomainTime_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[rotated 2, OF is_schedulable_sp' isSchedulable_sp]) - apply (corressimp corres: isSchedulable_corres) + apply (corresKsimp corres: isSchedulable_corres) apply (fastforce intro: weak_sch_act_wf_at_cross simp: invs_def valid_state_def state_relation_def cur_tcb_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getSchedulerAction_sp]) - apply (corressimp corres: getSchedulerAction_corres) + apply (corresKsimp corres: getSchedulerAction_corres) apply (case_tac "action = resume_cur_thread"; clarsimp) - apply (corressimp corres: scAndTimer_corres) + apply (corresKsimp corres: scAndTimer_corres) subgoal by (fastforce intro: valid_sched_context_size_objsI dest: schact_is_rct_ct_active_sc simp: invs_def cur_sc_tcb_def sc_at_pred_n_def obj_at_def is_sc_obj_def @@ -5101,12 +5097,12 @@ lemma schedule_corres: apply (clarsimp simp: invs'_def isSchedulable_bool_def st_tcb_at'_def pred_map_simps obj_at_simps vs_all_heap_simps cur_tcb'_def elim!: opt_mapE) - apply (corressimp corres: tcbSchedEnqueue_corres) + apply (corresKsimp corres: tcbSchedEnqueue_corres) apply (fastforce dest: invs_cur simp: cur_tcb_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getIdleThread_sp]) - apply corressimp + apply corresKsimp apply (rule corres_underlying_split[rotated 2, OF thread_get_sp threadGet_sp, where r'="(=)"]) apply (rule corres_guard_imp) apply (rule threadGet_corres) @@ -5142,15 +5138,15 @@ lemma schedule_corres: apply (rule corres_underlying_split[rotated 2, OF schedule_switch_thread_fastfail_inv scheduleSwitchThreadFastfail_inv]) - apply (corressimp corres: scheduleSwitchThreadFastfail_corres) + apply (corresKsimp corres: scheduleSwitchThreadFastfail_corres) apply (fastforce dest: invs_cur simp: cur_tcb_def obj_at_def is_tcb_def state_relation_def cur_tcb'_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp curDomain_sp]) - apply (corressimp corres: curDomain_corres) + apply (corresKsimp corres: curDomain_corres) apply (clarsimp simp: isHighestPrio_def' split del: if_split) apply (rule corres_underlying_split[rotated 2, OF gets_sp gets_sp, where r'="(=)"]) - apply (corressimp corres: isHighestPrio_corres) + apply (corresKsimp corres: isHighestPrio_corres) apply (clarsimp simp: is_highest_prio_def) apply (subst bitmapL1_zero_ksReadyQueues) apply (fastforce dest: invs_queues simp: valid_queues_def) diff --git a/proof/refine/RISCV64/SubMonad_R.thy b/proof/refine/RISCV64/SubMonad_R.thy index 0eb6a1978c..1ff2611a3b 100644 --- a/proof/refine/RISCV64/SubMonad_R.thy +++ b/proof/refine/RISCV64/SubMonad_R.thy @@ -27,6 +27,10 @@ lemma corres_machine_op: apply (simp_all add: state_relation_def swp_def) done +lemmas corres_machine_op_Id = corres_machine_op[OF corres_Id] +lemmas corres_machine_op_Id_eq[corres_term] = corres_machine_op_Id[where r="(=)"] +lemmas corres_machine_op_Id_dc[corres_term] = corres_machine_op_Id[where r="dc::unit \ unit \ bool"] + lemma doMachineOp_mapM: assumes "\x. empty_fail (m x)" shows "doMachineOp (mapM m l) = mapM (doMachineOp \ m) l" diff --git a/proof/refine/RISCV64/Syscall_R.thy b/proof/refine/RISCV64/Syscall_R.thy index cd8f329426..22a8b3af8b 100644 --- a/proof/refine/RISCV64/Syscall_R.thy +++ b/proof/refine/RISCV64/Syscall_R.thy @@ -338,7 +338,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps set_tcb'.ksSchedulerAction) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -678,7 +678,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -1387,7 +1387,7 @@ lemma hinv_invs'[wp]: apply (rule validE_valid) apply (intro hoare_vcg_seqE[OF _ stateAssertE_sp]) apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ @@ -1681,7 +1681,6 @@ lemma hw_invs'[wp]: apply (rename_tac readright; case_tac readright; (wp getNotification_wp |simp)+) apply (clarsimp simp: obj_at_simps isNotificationCap_def) by (wpsimp simp: lookupReply_def getCapReg_def - wp: hoare_vcg_conj_liftE | wp (once) hoare_drop_imps)+ (clarsimp simp: obj_at_simps ct_in_state'_def pred_tcb_at'_def) @@ -1955,7 +1954,7 @@ lemma chargeBudget_corres: apply (fastforce intro: cur_sc_tcb_sc_at_cur_sc) apply add_cur_tcb' apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurSc_sp]) - apply (corressimp corres: getCurSc_corres) + apply (corresKsimp corres: getCurSc_corres) apply (rule corres_symb_exec_r[rotated, OF getIdleSC_sp]; wpsimp simp: getIdleSC_def) apply (rule_tac F="idle_sc_ptr = idleSCPtr" in corres_req) apply (clarsimp simp: state_relation_def) @@ -1963,7 +1962,7 @@ lemma chargeBudget_corres: and Q'="\_. invs' and cur_tcb'" in corres_underlying_split) apply (clarsimp simp: when_def split del: if_split) - apply (rule corres_if_split; (solves corressimp)?) + apply (rule corres_if_split; (solves corresKsimp)?) apply (rule corres_guard_imp) apply (rule corres_split_eqr[OF isRoundRobin_corres]) apply (rule corres_split[OF corres_if2], simp) diff --git a/proof/refine/RISCV64/TcbAcc_R.thy b/proof/refine/RISCV64/TcbAcc_R.thy index 0ba7d47fef..6f792ae1ff 100644 --- a/proof/refine/RISCV64/TcbAcc_R.thy +++ b/proof/refine/RISCV64/TcbAcc_R.thy @@ -11,7 +11,6 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] declare storeWordUser_typ_at' [wp] @@ -54,7 +53,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -398,7 +397,7 @@ proof - assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def assert_def a_type_def[split_simps kernel_object.split arch_kernel_obj.split] dest!: get_tcb_SomeD) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -2228,13 +2227,13 @@ lemma rescheduleRequired_corres_weak: apply (rule corres_underlying_split[OF _ _ gets_sp, rotated 2]) apply (clarsimp simp: getSchedulerAction_def) apply (rule gets_sp) - apply (corressimp corres: getSchedulerAction_corres) + apply (corresKsimp corres: getSchedulerAction_corres) apply (rule corres_underlying_split[where r'=dc, rotated]; (solves \wpsimp\)?) - apply (corressimp corres: setSchedulerAction_corres) + apply (corresKsimp corres: setSchedulerAction_corres) apply (case_tac action; clarsimp?) apply (rename_tac tp) apply (rule corres_underlying_split[OF _ _ is_schedulable_sp isSchedulable_inv, rotated 2]) - apply (corressimp corres: isSchedulable_corres) + apply (corresKsimp corres: isSchedulable_corres) apply (clarsimp simp: weaker_valid_sched_action_def obj_at_def vs_all_heap_simps is_tcb_def) apply (clarsimp simp: when_def) @@ -2277,7 +2276,7 @@ lemma rescheduleRequired_corres_weak: obj_at_def pred_tcb_at_def weaker_valid_sched_action_def) apply (clarsimp simp: no_fail_def return_def vs_all_heap_simps obj_at_def pred_tcb_at_def weaker_valid_sched_action_def) - apply (corressimp corres: tcbSchedEnqueue_corres + apply (corresKsimp corres: tcbSchedEnqueue_corres simp: obj_at_def is_tcb_def weak_sch_act_wf_def) done @@ -2993,9 +2992,9 @@ lemma threadSet_queued_sch_act_wf[wp]: split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply wps - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -4953,7 +4952,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4973,7 +4972,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) diff --git a/proof/refine/RISCV64/Tcb_R.thy b/proof/refine/RISCV64/Tcb_R.thy index 7366c61882..63ceedd8be 100644 --- a/proof/refine/RISCV64/Tcb_R.thy +++ b/proof/refine/RISCV64/Tcb_R.thy @@ -348,21 +348,21 @@ lemma invokeTCB_WriteRegisters_corres: frameRegisters_def gpRegisters_def getSanitiseRegisterInfo_def sanitiseRegister_def sanitise_register_def) apply (rule corres_underlying_split[rotated 2, OF gets_sp getCurThread_sp]) - apply (corressimp corres: getCurThread_corres) + apply (corresKsimp corres: getCurThread_corres) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: asUser_corres + apply (corresKsimp corres: asUser_corres simp: zipWithM_mapM getRestartPC_def setNextPC_def wp: no_fail_mapM no_fail_setRegister) apply (rule corres_split_skip; (solves wpsimp)?) - apply (corressimp corres: asUser_postModifyRegisters_corres[simplified]) + apply (corresKsimp corres: asUser_postModifyRegisters_corres[simplified]) apply fastforce apply (rule_tac Q="\_. einvs" and Q'="\_. invs'" in corres_underlying_split[rotated 2]) apply (wpsimp wp: restart_valid_sched) using idle_no_ex_cap apply fastforce apply (wpsimp wp: restart_invs') using global'_no_ex_cap apply fastforce - apply (corressimp corres: restart_corres) - apply (corressimp corres: rescheduleRequired_corres) + apply (corresKsimp corres: restart_corres) + apply (corresKsimp corres: rescheduleRequired_corres) apply fastforce done @@ -472,22 +472,23 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply (solves \wp static_imp_wp\)+ + apply (solves \wp hoare_weak_lift_imp\)+ apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def valid_pspace_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs'_def valid_pspace'_def) - apply ((wp mapM_x_wp' static_imp_wp | simp)+)[4] - apply ((wp static_imp_wp restart_invs' restart_valid_sched | wpc | clarsimp simp: if_apply_def2)+)[2] + apply ((wp mapM_x_wp' hoare_weak_lift_imp | simp)+)[4] + apply ((wp hoare_weak_lift_imp restart_invs' restart_valid_sched | wpc + | clarsimp simp: if_apply_def2)+)[2] apply (rule_tac Q="\_. einvs and tcb_at dest and tcb_at src and ex_nonz_cap_to dest and simple_sched_action and current_time_bounded" in hoare_strengthen_post[rotated]) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def valid_state_def valid_pspace_def valid_idle_def dest!: idle_no_ex_cap ) - apply (wp suspend_nonz_cap_to_tcb static_imp_wp suspend_invs suspend_cap_to' + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp suspend_invs suspend_cap_to' suspend_valid_sched | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_idle_def @@ -2795,7 +2796,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: diff --git a/proof/refine/RISCV64/Untyped_R.thy b/proof/refine/RISCV64/Untyped_R.thy index 9cd23ab20b..8c8058e568 100644 --- a/proof/refine/RISCV64/Untyped_R.thy +++ b/proof/refine/RISCV64/Untyped_R.thy @@ -3226,7 +3226,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -4080,11 +4080,12 @@ lemma idx_le_new_offs: end -crunch ksIdleThread[wp]: deleteObjects "\s. P (ksIdleThread s)" - (simp: crunch_simps wp: hoare_drop_imps unless_wp) -crunch ksCurDomain[wp]: deleteObjects "\s. P (ksCurDomain s)" - (simp: crunch_simps wp: hoare_drop_imps unless_wp) -crunch irq_node[wp]: deleteObjects "\s. P (irq_node' s)" +context begin interpretation Arch . (*FIXME: arch_split*) + +crunches deleteObjects + for ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and ksCurDomain[wp]: "\s. P (ksCurDomain s)" + and irq_node[wp]: "\s. P (irq_node' s)" (simp: crunch_simps wp: hoare_drop_imps unless_wp) lemma deleteObjects_ksCurThread[wp]: @@ -4404,8 +4405,6 @@ lemma resetUntypedCap_corres: apply (auto simp: descendants_range_in'_def valid_untyped'_def) done -end - lemma deleteObjects_ex_cte_cap_wp_to': "\invs' and ex_cte_cap_wp_to' P slot and (\s. descendants_of' p (ctes_of s) = {}) and cte_wp_at' (\cte. \idx d. cteCap cte = UntypedCap d ptr sz idx) p\ @@ -4429,6 +4428,8 @@ lemma deleteObjects_ex_cte_cap_wp_to': apply (auto simp: add_mask_fold) done +end + lemma updateCap_cte_cap_wp_to': "\\s. cte_wp_at' (\cte. p' \ cte_refs' (cteCap cte) (irq_node' s) \ P (cteCap cte) \ p' \ cte_refs' cap (irq_node' s) \ P cap) p s @@ -4660,6 +4661,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) diff --git a/proof/refine/RISCV64/VSpace_R.thy b/proof/refine/RISCV64/VSpace_R.thy index 57851a7b60..b8b2ba5b27 100644 --- a/proof/refine/RISCV64/VSpace_R.thy +++ b/proof/refine/RISCV64/VSpace_R.thy @@ -51,25 +51,17 @@ lemma asidBits_asid_bits[simp]: "asidBits = asid_bits" by (simp add: bit_simps' asid_bits_def asidBits_def) -lemma no_fail_read_stval[intro!,simp]: +lemma no_fail_read_stval[wp, intro!, simp]: "no_fail \ read_stval" by (simp add: read_stval_def) lemma handleVMFault_corres: "corres (fr \ dc) (tcb_at thread) (tcb_at' thread) (handle_vm_fault thread fault) (handleVMFault thread fault)" - apply (simp add: RISCV64_H.handleVMFault_def handle_vm_fault_def) - apply (rule corres_guard_imp) - apply (rule corres_split_eqrE) - apply simp - apply (rule corres_machine_op[where r="(=)"]) - apply (rule corres_Id, rule refl, simp) - apply (rule no_fail_read_stval) - apply (cases fault; simp) - apply wpsimp+ - done + unfolding handleVMFault_def handle_vm_fault_def + by (corres | corres_cases_both)+ -lemma no_fail_setVSpaceRoot[intro!, simp]: +lemma no_fail_setVSpaceRoot[wp, intro!, simp]: "no_fail \ (setVSpaceRoot v a)" by (simp add: setVSpaceRoot_def) @@ -92,10 +84,7 @@ proof - (do globalPT <- gets (riscvKSGlobalPT \ ksArchState); doMachineOp (setVSpaceRoot (addrFromKPPtr globalPT) 0) od)" for P Q - apply (corressimp corres: corres_gets_global_pt corres_machine_op) - apply fastforce - apply (simp add: addrFromKPPtr_def) - done + by corres show ?thesis unfolding set_vm_root_def setVMRoot_def catchFailure_def withoutFailure_def throw_def @@ -139,17 +128,7 @@ proof - apply (case_tac acap; clarsimp simp: isCap_simps catch_throwError intro!: global) apply (rename_tac m) apply (case_tac m; clarsimp simp: isCap_simps catch_throwError intro!: global) - apply (rule corres_guard_imp) - apply (rule corres_split_catch [where f=lfr and E'="\_. \"]) - apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) - apply (rule whenE_throwError_corres; simp add: lookup_failure_map_def) - apply (rule corres_machine_op) - apply corressimp - apply fastforce - apply simp - apply wpsimp+ - apply (rule global, assumption) - apply wpsimp+ + apply (corres simp: lookup_failure_map_def) apply (frule (1) cte_wp_at_valid_objs_valid_cap) apply (clarsimp simp: valid_cap_def mask_def wellformed_mapdata_def) apply (wpsimp wp: get_cap_wp simp: getThreadVSpaceRoot_def)+ @@ -158,7 +137,7 @@ proof - qed -lemma get_asid_pool_corres_inv': +lemma get_asid_pool_corres_inv'[corres]: assumes "p' = p" shows "corres (\p. (\p'. p = p' o ucast) \ inv ASIDPool) (asid_pool_at p and pspace_aligned and pspace_distinct) \ @@ -199,60 +178,29 @@ lemma no_fail_hwAIDFlush[intro!, wp, simp]: lemma hwASIDFlush_corres[corres]: "corres dc \ \ (do_machine_op (hwASIDFlush x)) (doMachineOp (hwASIDFlush x))" - by (corressimp corres: corres_machine_op) + by (corresKsimp corres: corres_machine_op) -lemma deleteASID_corres [corres]: +lemma deleteASID_corres[corres]: assumes "asid' = ucast asid" "pm' = pm" shows "corres dc invs no_0_obj' (delete_asid asid pm) (deleteASID asid' pm')" unfolding delete_asid_def deleteASID_def using assms apply simp - apply (rule corres_guard_imp) - apply (rule corres_split[OF corres_gets_asid]) - apply (case_tac "asid_table (asid_high_bits_of asid)", simp) - apply clarsimp - apply (rule_tac P="\s. asid_high_bits_of asid \ dom (asidTable o ucast) \ - asid_pool_at (the ((asidTable o ucast) (asid_high_bits_of asid))) s \ - pspace_aligned s \ pspace_distinct s" and - P'="\" and - Q="invs and - (\s. asid_table s = asidTable \ ucast)" in - corres_split) - apply (simp add: dom_def) - apply (rule get_asid_pool_corres_inv'[OF refl, unfolded pred_conj_def, simplified]) - apply (rule corres_when) - apply (simp add: mask_asid_low_bits_ucast_ucast asid_low_bits_of_def ucast_ucast_a is_down) - apply (rule corres_split[OF hwASIDFlush_corres]) - apply (rule_tac P="asid_pool_at (the (asidTable (ucast (asid_high_bits_of asid)))) - and pspace_aligned and pspace_distinct" - and P'="\" - in corres_split) - apply (simp del: fun_upd_apply) - apply (rule setObject_ASIDPool_corres) - apply (simp add: inv_def mask_asid_low_bits_ucast_ucast) + apply (corres simp: liftM_def mask_asid_low_bits_ucast_ucast asid_low_bits_of_def + ucast_ucast_a is_down + | corres_cases_both)+ + (* side condition of setObject_ASIDPool_corres needs manual work *) apply (rule ext) - apply (clarsimp simp: o_def ucast_ucast_a is_down asid_low_bits_of_def) + apply (clarsimp simp: ucast_ucast_a is_down asid_low_bits_of_def + mask_asid_low_bits_ucast_ucast inv_def) apply (word_bitwise, clarsimp) - apply (rule corres_split[OF getCurThread_corres]) - apply simp - apply (rule setVMRoot_corres[OF refl]) - apply wp+ - apply (thin_tac "x = f o g" for x f g) - apply (simp del: fun_upd_apply) - apply (fold cur_tcb_def) + (* continue rest of corres proof: *) + apply (corres corres: getCurThread_corres) apply (wp set_asid_pool_vs_lookup_unmap' - set_asid_pool_vspace_objs_unmap_single - | strengthen valid_arch_state_asid_table valid_arch_state_global_arch_objs)+ - apply (auto simp: obj_at_def a_type_def graph_of_def - split: if_split_asm dest: invs_valid_asid_table)[1] - apply (wp getASID_wp) - apply clarsimp - apply assumption - apply wp+ - apply clarsimp - apply (frule invs_valid_asid_table) - apply (drule (1) valid_asid_tableD) - apply (clarsimp simp: invs_distinct) + set_asid_pool_vspace_objs_unmap_single getASID_wp + | strengthen valid_arch_state_asid_table valid_arch_state_global_arch_objs + | simp flip: cur_tcb_def)+ + apply (fastforce dest: valid_asid_tableD invs_valid_asid_table) apply simp done @@ -382,18 +330,7 @@ lemma unmapPageTable_corres: (unmap_page_table asid vptr pt) (unmapPageTable asid' vptr' pt')" apply (clarsimp simp: assms unmap_page_table_def unmapPageTable_def ignoreFailure_def const_def) - apply (rule corres_guard_imp) - apply (rule corres_split_catch[where E="\\" and E'="\\", OF _ corres_return_trivial]) - apply (rule corres_split_eqrE[OF findVSpaceForASID_corres[OF refl]]) - apply (rule corres_split_eqrE[OF lookupPTFromLevel_corres[OF _ refl]]) - apply simp - apply (simp add: liftE_bindE) - apply (rule corres_split[OF storePTE_corres]) - apply simp - apply simp - apply (rule corres_machine_op) - apply (rule corres_Id; simp) - apply (wpsimp wp: pt_lookup_from_level_wp)+ + apply (corres corres: lookupPTFromLevel_corres wp: pt_lookup_from_level_wp) apply (clarsimp simp: invs_distinct invs_psp_aligned invs_vspace_objs invs_valid_asid_table pte_at_eq) apply (rule_tac x=asid in exI) @@ -452,7 +389,8 @@ lemma unmapPage_corres: apply fastforce apply (rule corres_splitEE[OF checkMappingPPtr_corres]; assumption?) apply (simp add: liftE_bindE) - apply (rule corres_split[OF storePTE_corres], simp) + apply (rule corres_split[OF storePTE_corres], rule refl) + apply simp apply simp apply (rule corres_machine_op, rule corres_Id, rule refl; simp) apply wpsimp+ @@ -549,12 +487,7 @@ lemma performPageInvocation_corres: apply (simp add: bind_assoc) apply (rule corres_guard_imp) apply (simp add: perform_pg_inv_map_def) - apply (rule corres_split[OF updateCap_same_master]) - apply simp - apply (rule corres_split[OF storePTE_corres]) - apply assumption - apply (rule corres_machine_op, rule corres_Id; simp) - apply wpsimp+ + apply (corres corres: updateCap_same_master) apply (clarsimp simp: invs_valid_objs invs_distinct invs_psp_aligned) apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def is_cap_simps) apply (clarsimp simp: same_ref_def) @@ -663,12 +596,7 @@ lemma performPageTableInvocation_corres: apply (clarsimp simp: valid_pti_def valid_pti'_def split: arch_cap.splits capability.split_asm arch_capability.split_asm) apply (rule corres_guard_imp) - apply (rule corres_split[OF updateCap_same_master]) - apply simp - apply (rule corres_split[OF storePTE_corres]) - apply assumption - apply (rule corres_machine_op, rule corres_Id; simp) - apply wpsimp+ + apply (corres corres: updateCap_same_master) apply (clarsimp simp: cte_wp_at_caps_of_state is_arch_update_def invs_valid_objs invs_psp_aligned invs_distinct) apply (case_tac cap; simp add: is_cap_simps cap_master_cap_simps) @@ -730,26 +658,15 @@ lemma performASIDPoolInvocation_corres: using assms apply (clarsimp simp: perform_asid_pool_invocation_def performASIDPoolInvocation_def) apply (cases ap, simp add: asid_pool_invocation_map_def) - apply (rule corres_guard_imp) - apply (rule corres_split[OF getSlotCap_corres[OF refl] _ get_cap_wp getSlotCap_wp]) - apply (rule corres_assert_gen_asm_l, rule corres_assert_gen_asm_l) - apply (rule_tac F="is_pt_cap pt_cap" in corres_gen_asm) - apply (rule corres_split[OF updateCap_same_master]) - apply (clarsimp simp: is_cap_simps update_map_data_def) - apply (rule corres_split[OF copy_global_mappings_corres]) - apply (clarsimp simp: is_cap_simps) - apply (unfold store_asid_pool_entry_def)[1] - apply (rule corres_split[where r'="\pool pool'. pool = pool' \ ucast"]) - apply (simp cong: corres_weak_cong) - apply (rule corres_rel_imp) - apply (rule getObject_ASIDPool_corres[OF refl]) - apply simp - apply (simp cong: corres_weak_cong) - apply (rule setObject_ASIDPool_corres) - apply (rule ext) - apply (clarsimp simp: inv_def is_cap_simps ucast_up_inj) - apply (wp getASID_wp)+ - apply (wpsimp wp: set_cap_typ_at hoare_drop_imps|strengthen valid_arch_state_global_arch_objs)+ + (* The fastforce is needed for the side conditions of setObject_ASIDPool_corres used at the end. + Guarded by "match" to not slow down the rest too much. *) + apply (corres' \match conclusion in "f' = inv f x \ g" for f' f x g \ + \fastforce simp: inv_def is_cap_simps ucast_up_inj\\ + corres: getSlotCap_corres corres_assert_gen_asm_l updateCap_same_master + simp: update_map_data_def cap.is_ArchObjectCap_def arch_cap.is_PageTableCap_def + liftM_def store_asid_pool_entry_def) + apply (wpsimp wp: set_cap_typ_at hoare_drop_imps get_cap_wp + | strengthen valid_arch_state_global_arch_objs)+ apply (clarsimp simp: valid_apinv_def cte_wp_at_caps_of_state is_cap_simps cap_master_cap_simps update_map_data_def in_omonad) apply (drule (1) caps_of_state_valid_cap) diff --git a/proof/refine/RISCV64/orphanage/Orphanage.thy b/proof/refine/RISCV64/orphanage/Orphanage.thy index 1307c892db..46f6622f12 100644 --- a/proof/refine/RISCV64/orphanage/Orphanage.thy +++ b/proof/refine/RISCV64/orphanage/Orphanage.thy @@ -449,7 +449,7 @@ lemma rescheduleRequired_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding rescheduleRequired_def apply (wp tcbSchedEnqueue_no_orphans hoare_vcg_all_lift ssa_no_orphans | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) apply (rename_tac word t p) apply (rule_tac P="word = t" in hoare_gen_asm) apply (wp hoare_disjI1 | clarsimp)+ @@ -461,7 +461,7 @@ lemma rescheduleRequired_almost_no_orphans [wp]: \ \rv s. almost_no_orphans tcb_ptr s \" unfolding rescheduleRequired_def apply (wp tcbSchedEnqueue_almost_no_orphans_lift hoare_vcg_all_lift | wpc | clarsimp)+ - apply (wps tcbSchedEnqueue_nosch, wp static_imp_wp) + apply (wps tcbSchedEnqueue_nosch, wp hoare_weak_lift_imp) apply (rename_tac word t p) apply (rule_tac P="word = t" in hoare_gen_asm) apply (wp hoare_disjI1 | clarsimp)+ @@ -1078,7 +1078,7 @@ proof - apply (rule_tac Q="\_ s. (t = candidate \ ksCurThread s = candidate) \ (t \ candidate \ sch_act_not t s)" in hoare_post_imp) - apply (wpsimp wp: stt_nosch static_imp_wp)+ + apply (wpsimp wp: stt_nosch hoare_weak_lift_imp)+ apply (fastforce dest!: in_all_active_tcb_ptrsD simp: all_queued_tcb_ptrs_def comp_def) done @@ -1207,7 +1207,7 @@ lemma possibleSwitchTo_almost_no_orphans [wp]: \ \rv s. no_orphans s \" unfolding possibleSwitchTo_def by (wp rescheduleRequired_valid_queues'_weak tcbSchedEnqueue_almost_no_orphans - ssa_almost_no_orphans static_imp_wp + ssa_almost_no_orphans hoare_weak_lift_imp | wpc | clarsimp | wp (once) hoare_drop_imp)+ @@ -1920,7 +1920,7 @@ lemma writereg_no_orphans: unfolding invokeTCB_def performTransfer_def postModifyRegisters_def apply simp apply (rule hoare_pre) - by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' static_imp_wp + by (wp hoare_vcg_if_lift hoare_vcg_conj_lift restart_invs' hoare_weak_lift_imp | strengthen invs_valid_queues' | clarsimp simp: invs'_def valid_state'_def dest!: global'_no_ex_cap )+ lemma copyreg_no_orphans: @@ -1930,8 +1930,8 @@ lemma copyreg_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def postModifyRegisters_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) - apply (wp static_imp_wp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) + apply (wp hoare_weak_lift_imp hoare_vcg_conj_lift hoare_drop_imp mapM_x_wp' restart_invs' restart_no_orphans asUser_no_orphans suspend_nonz_cap_to_tcb | strengthen invs_valid_queues' | wpc | simp add: if_apply_def2)+ apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -1943,7 +1943,7 @@ lemma settlsbase_no_orphans: \ \rv s. no_orphans s \" unfolding invokeTCB_def performTransfer_def apply simp - apply (wp hoare_vcg_if_lift static_imp_wp) + apply (wp hoare_vcg_if_lift hoare_weak_lift_imp) apply (wpsimp wp: hoare_vcg_imp_lift' mapM_x_wp' asUser_no_orphans)+ done @@ -2009,19 +2009,19 @@ lemma tc_no_orphans: apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[2] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (cases mcp; clarsimp simp: pred_conj_def option.splits[where P="\x. x s" for s]) apply ((wp case_option_wp threadSet_no_orphans threadSet_invs_trivial setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] - threadSet_cap_to' hoare_vcg_all_lift static_imp_wp | clarsimp simp: inQ_def)+)[3] + threadSet_cap_to' hoare_vcg_all_lift hoare_weak_lift_imp | clarsimp simp: inQ_def)+)[3] apply ((simp only: simp_thms cong: conj_cong | wp cteDelete_deletes cteDelete_invs' cteDelete_sch_act_simple case_option_wp[where m'="return ()", OF setPriority_no_orphans return_inv,simplified] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] checkCap_inv[where P=no_orphans] checkCap_inv[where P="tcb_at' a"] threadSet_cte_wp_at' hoare_vcg_all_lift_R hoare_vcg_all_lift threadSet_no_orphans - hoare_vcg_const_imp_lift_R static_imp_wp hoare_drop_imp threadSet_ipcbuffer_invs + hoare_vcg_const_imp_lift_R hoare_weak_lift_imp hoare_drop_imp threadSet_ipcbuffer_invs | strengthen invs_valid_queues' | (simp add: locateSlotTCB_def locateSlotBasic_def objBits_def objBitsKO_def tcbIPCBufferSlot_def tcb_cte_cases_def, @@ -2096,7 +2096,7 @@ lemma performPageInvocation_no_orphans [wp]: apply (simp add: performPageInvocation_def cong: page_invocation.case_cong) apply (rule hoare_pre) - apply (wp mapM_x_wp' mapM_wp' static_imp_wp | wpc | clarsimp)+ + apply (wp mapM_x_wp' mapM_wp' hoare_weak_lift_imp | wpc | clarsimp)+ done lemma performASIDControlInvocation_no_orphans [wp]: @@ -2150,13 +2150,13 @@ lemma performASIDControlInvocation_no_orphans [wp]: \\reply. no_orphans\" apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) - apply (wp static_imp_wp | clarsimp)+ + apply (wp hoare_weak_lift_imp | clarsimp)+ apply (rule_tac Q="\rv s. no_orphans s" in hoare_post_imp) apply (clarsimp simp: no_orphans_def all_active_tcb_ptrs_def is_active_tcb_ptr_def all_queued_tcb_ptrs_def) apply (wp | clarsimp simp:placeNewObject_def2)+ apply (wp createObjects'_wp_subst)+ - apply (wp static_imp_wp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ + apply (wp hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap'[where sz= pageBits] getSlotCap_wp | simp)+ apply (strengthen invs_pspace_aligned' invs_pspace_distinct' invs_valid_pspace') apply (clarsimp simp:conj_comms) apply (wp deleteObjects_invs'[where idx = idx and d=False] diff --git a/proof/refine/X64/ArchAcc_R.thy b/proof/refine/X64/ArchAcc_R.thy index d4dfa37733..df214412a0 100644 --- a/proof/refine/X64/ArchAcc_R.thy +++ b/proof/refine/X64/ArchAcc_R.thy @@ -1411,11 +1411,11 @@ lemma copy_global_mappings_corres [@lift_corres_args, corres]: (copyGlobalMappings pm)" (is "corres _ ?apre _ _ _") unfolding copy_global_mappings_def copyGlobalMappings_def objBits_simps archObjSize_def pptr_base_def apply (fold word_size_bits_def) - apply corressimp + apply corresKsimp apply (rule_tac P="page_map_l4_at global_pm and ?apre" and P'="page_map_l4_at' skimPM and page_map_l4_at' pm" in corresK_mapM_x[OF order_refl]) - apply (corressimp simp: objBits_def mask_def wp: get_pde_wp getPDE_wp)+ + apply (corresKsimp simp: objBits_def mask_def wp: get_pde_wp getPDE_wp)+ apply(rule conjI) subgoal by (auto intro!: page_map_l4_pml4e_atI page_map_l4_pml4e_atI' simp: page_bits_def le_less_trans ptTranslationBits_def) diff --git a/proof/refine/X64/Arch_R.thy b/proof/refine/X64/Arch_R.thy index fd8c8b8b44..395b554113 100644 --- a/proof/refine/X64/Arch_R.thy +++ b/proof/refine/X64/Arch_R.thy @@ -1321,7 +1321,7 @@ lemma perform_port_inv_corres: apply (clarsimp simp: perform_io_port_invocation_def performX64PortInvocation_def archinv_relation_def ioport_invocation_map_def) apply (case_tac x; clarsimp) - apply (corressimp corres: port_in_corres simp: ioport_data_relation_def) + apply (corresKsimp corres: port_in_corres simp: ioport_data_relation_def) by (auto simp: no_fail_in8 no_fail_in16 no_fail_in32 no_fail_out8 no_fail_out16 no_fail_out32) @@ -1480,13 +1480,13 @@ lemma performASIDControlInvocation_tcb_at': apply (rule hoare_name_pre_state) apply (clarsimp simp: performASIDControlInvocation_def split: asidcontrol_invocation.splits) apply (clarsimp simp: valid_aci'_def cte_wp_at_ctes_of cong: conj_cong) - apply (wp static_imp_wp |simp add:placeNewObject_def2)+ - apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp static_imp_wp)+ + apply (wp hoare_weak_lift_imp |simp add:placeNewObject_def2)+ + apply (wp createObjects_orig_obj_at2' updateFreeIndex_pspace_no_overlap' getSlotCap_wp hoare_weak_lift_imp)+ apply (clarsimp simp: projectKO_opts_defs) apply (strengthen st_tcb_strg' [where P=\]) apply (wp deleteObjects_invs_derivatives[where p="makePoolParent aci"] hoare_vcg_ex_lift deleteObjects_cte_wp_at'[where d=False] - deleteObjects_st_tcb_at'[where p="makePoolParent aci"] static_imp_wp + deleteObjects_st_tcb_at'[where p="makePoolParent aci"] hoare_weak_lift_imp updateFreeIndex_pspace_no_overlap' deleteObject_no_overlap[where d=False])+ apply (case_tac ctea) apply (clarsimp) @@ -1894,7 +1894,7 @@ lemma performASIDControlInvocation_st_tcb_at': hoare_vcg_ex_lift deleteObjects_cte_wp_at' deleteObjects_invs_derivatives deleteObjects_st_tcb_at' - static_imp_wp + hoare_weak_lift_imp | simp add: placeNewObject_def2)+ apply (case_tac ctea) apply (clarsimp) @@ -1950,7 +1950,7 @@ crunch cte_wp_at': "Arch.finaliseCap" "cte_wp_at' P p" lemma invs_asid_table_strengthen': "invs' s \ asid_pool_at' ap s \ asid \ 2 ^ asid_high_bits - 1 \ invs' (s\ksArchState := - x64KSASIDTable_update (\_. (x64KSASIDTable \ ksArchState) s(asid \ ap)) (ksArchState s)\)" + x64KSASIDTable_update (\_. ((x64KSASIDTable \ ksArchState) s)(asid \ ap)) (ksArchState s)\)" apply (clarsimp simp: invs'_def valid_state'_def) apply (rule conjI) apply (clarsimp simp: valid_global_refs'_def global_refs'_def) @@ -2041,7 +2041,7 @@ lemma performASIDControlInvocation_invs' [wp]: updateFreeIndex_caps_no_overlap'' updateFreeIndex_descendants_of2 updateFreeIndex_caps_overlap_reserved - updateCap_cte_wp_at_cases static_imp_wp + updateCap_cte_wp_at_cases hoare_weak_lift_imp getSlotCap_wp)+ apply (clarsimp simp:conj_comms ex_disj_distrib is_aligned_mask | strengthen invs_valid_pspace' invs_pspace_aligned' diff --git a/proof/refine/X64/CNodeInv_R.thy b/proof/refine/X64/CNodeInv_R.thy index e15856b5ac..7e73232588 100644 --- a/proof/refine/X64/CNodeInv_R.thy +++ b/proof/refine/X64/CNodeInv_R.thy @@ -4906,7 +4906,7 @@ lemma cteSwap_iflive'[wp]: simp only: if_live_then_nonz_cap'_def imp_conv_disj ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift - hoare_vcg_ex_lift updateCap_cte_wp_at_cases static_imp_wp)+ + hoare_vcg_ex_lift updateCap_cte_wp_at_cases hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -5831,7 +5831,7 @@ lemma cteSwap_cte_wp_cteCap: apply simp apply (wp hoare_drop_imps)[1] apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - getCTE_wp' hoare_vcg_all_lift static_imp_wp)+ + getCTE_wp' hoare_vcg_all_lift hoare_weak_lift_imp)+ apply simp apply (clarsimp simp: o_def) done @@ -5845,7 +5845,7 @@ lemma capSwap_cte_wp_cteCap: apply(simp add: capSwapForDelete_def) apply(wp) apply(rule cteSwap_cte_wp_cteCap) - apply(wp getCTE_wp getCTE_cte_wp_at static_imp_wp)+ + apply(wp getCTE_wp getCTE_cte_wp_at hoare_weak_lift_imp)+ apply(clarsimp) apply(rule conjI) apply(simp add: cte_at_cte_wp_atD) @@ -6383,7 +6383,7 @@ proof (induct arbitrary: P p rule: finalise_spec_induct2) apply clarsimp apply (case_tac "cteCap rv", simp_all add: isCap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp | simp | wp (once) isFinal[where x=sl])+ + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp | wp (once) isFinal[where x=sl])+ apply (wp getCTE_wp') apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule conjI, clarsimp simp: removeable'_def) @@ -7167,14 +7167,14 @@ next apply simp apply (wp replace_cap_invs final_cap_same_objrefs set_cap_cte_wp_at set_cap_cte_cap_wp_to - hoare_vcg_const_Ball_lift static_imp_wp + hoare_vcg_const_Ball_lift hoare_weak_lift_imp | simp add: conj_comms | erule finalise_cap_not_reply_master [simplified])+ apply (elim conjE, strengthen exI[mk_strg I], strengthen asm_rl[where psi="(cap_relation cap cap')" for cap cap', mk_strg I E]) apply (wp make_zombie_invs' updateCap_cap_to' updateCap_cte_wp_at_cases - hoare_vcg_ex_lift static_imp_wp) + hoare_vcg_ex_lift hoare_weak_lift_imp) apply clarsimp apply (drule_tac cap=a in cap_relation_removables, clarsimp, assumption+) @@ -7216,7 +7216,7 @@ next apply (clarsimp dest!: isCapDs simp: cte_wp_at_ctes_of) apply (case_tac "cteCap rv'", auto simp add: isCap_simps is_cap_simps final_matters'_def)[1] - apply (wp isFinalCapability_inv static_imp_wp + apply (wp isFinalCapability_inv hoare_weak_lift_imp | simp add: is_final_cap_def conj_comms cte_wp_at_eq_simp)+ apply (rule isFinal[where x="cte_map slot"]) apply (wp get_cap_wp| simp add: conj_comms)+ @@ -7357,7 +7357,7 @@ next apply (rule updateCap_corres) apply simp apply (simp add: is_cap_simps) - apply (rule_tac Q="\rv. cte_at' (cte_map ?target)" in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (cte_map ?target)" in hoare_post_add) apply (wp, (wp getCTE_wp)+) apply (clarsimp simp: cte_wp_at_ctes_of) apply (rule no_fail_pre, wp, simp) @@ -8537,7 +8537,7 @@ lemma cteMove_iflive'[wp]: ex_nonz_cap_to'_def) apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift hoare_vcg_ex_lift updateCap_cte_wp_at_cases - getCTE_wp static_imp_wp)+ + getCTE_wp hoare_weak_lift_imp)+ apply (clarsimp simp: cte_wp_at_ctes_of) apply (drule(1) if_live_then_nonz_capE') apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) @@ -8742,7 +8742,7 @@ lemma cteMove_cte_wp_at: \\_ s. cte_wp_at' (\c. Q (cteCap c)) ptr s\" unfolding cteMove_def apply (fold o_def) - apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp static_imp_wp|simp add: o_def)+ + apply (wp updateCap_cte_wp_at_cases updateMDB_weak_cte_wp_at getCTE_wp hoare_weak_lift_imp|simp add: o_def)+ apply (clarsimp simp: cte_wp_at_ctes_of) done diff --git a/proof/refine/X64/CSpace1_R.thy b/proof/refine/X64/CSpace1_R.thy index 6f751f138d..9d060788b7 100644 --- a/proof/refine/X64/CSpace1_R.thy +++ b/proof/refine/X64/CSpace1_R.thy @@ -833,7 +833,7 @@ lemma setCTE_tcb_in_cur_domain': done lemma setCTE_ctes_of_wp [wp]: - "\\s. P (ctes_of s (p \ cte))\ + "\\s. P ((ctes_of s) (p \ cte))\ setCTE p cte \\rv s. P (ctes_of s)\" by (simp add: setCTE_def ctes_of_setObject_cte) @@ -938,7 +938,7 @@ lemma cteInsert_weak_cte_wp_at: \\uu. cte_wp_at'(\c. P (cteCap c)) p\" unfolding cteInsert_def error_def updateCap_def setUntypedCapAsFull_def apply (simp add: bind_assoc split del: if_split) - apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at static_imp_wp | simp)+ + apply (wp setCTE_weak_cte_wp_at updateMDB_weak_cte_wp_at hoare_weak_lift_imp | simp)+ apply (wp getCTE_ctes_wp)+ apply (clarsimp simp: isCap_simps split:if_split_asm| rule conjI)+ done diff --git a/proof/refine/X64/CSpace_R.thy b/proof/refine/X64/CSpace_R.thy index eb02e8ac8c..8917fedb54 100644 --- a/proof/refine/X64/CSpace_R.thy +++ b/proof/refine/X64/CSpace_R.thy @@ -2319,7 +2319,7 @@ proof - let ?c2 = "(CTE capability.NullCap (MDB 0 0 bool1 bool2))" let ?C = "(modify_map (modify_map - (modify_map (ctes_of s(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest + (modify_map ((ctes_of s)(dest \ CTE cap (MDB 0 0 bool1 bool2))) dest (cteMDBNode_update (\a. MDB word1 src (isCapRevocable cap src_cap) (isCapRevocable cap src_cap)))) src (cteMDBNode_update (mdbNext_update (\_. dest)))) word1 (cteMDBNode_update (mdbPrev_update (\_. dest))))" @@ -3044,7 +3044,7 @@ definition cap_ioports' newcap - cap_ioports' oldcap \ issued_ioports' (ksArchState s)" lemma setCTE_arch_ctes_of_wp [wp]: - "\\s. P (ksArchState s) (ctes_of s (p \ cte))\ + "\\s. P (ksArchState s) ((ctes_of s)(p \ cte))\ setCTE p cte \\rv s. P (ksArchState s) (ctes_of s)\" apply (simp add: setCTE_def ctes_of_setObject_cte) diff --git a/proof/refine/X64/Detype_R.thy b/proof/refine/X64/Detype_R.thy index 94fb2cbc52..199c85a4dd 100644 --- a/proof/refine/X64/Detype_R.thy +++ b/proof/refine/X64/Detype_R.thy @@ -125,7 +125,7 @@ lemma deleteObjects_def2: then None else gsCNodes s x \); stateAssert ksASIDMapSafe [] od" - apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def) + apply (simp add: deleteObjects_def is_aligned_mask[symmetric] unless_def deleteGhost_def) apply (rule bind_eqI, rule ext) apply (rule bind_eqI, rule ext) apply (simp add: bind_assoc[symmetric]) @@ -2002,13 +2002,13 @@ lemma cte_wp_at_top: apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (clarsimp simp:cte_check_def) apply (simp add:alignCheck_def bind_def alignError_def fail_def return_def objBits_simps magnitudeCheck_def in_monad is_aligned_mask - when_def split:option.splits) + when_def unless_def split:option.splits) apply (intro conjI impI allI,simp_all add:not_le) apply (simp add:typeError_def fail_def cte_check_def split:Structures_H.kernel_object.splits)+ @@ -2643,7 +2643,7 @@ lemma storePDE_det: "ko_wp_at' ((=) (KOArch (KOPDE pde))) ptr s \ storePDE ptr (new_pde::X64_H.pde) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPDE new_pde)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPDE new_pde)))) s" apply (clarsimp simp:ko_wp_at'_def storePDE_def split_def bind_def gets_def return_def get_def setObject_def @@ -2796,7 +2796,7 @@ lemma cte_wp_at_modify_pde: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPDE pde)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPDE pde')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPDE pde')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) @@ -3006,7 +3006,7 @@ lemma storePML4E_det: "ko_wp_at' ((=) (KOArch (KOPML4E pml4e))) ptr s \ storePML4E ptr (new_pml4e::X64_H.pml4e) s = modify - (ksPSpace_update (\_. ksPSpace s(ptr \ KOArch (KOPML4E new_pml4e)))) s" + (ksPSpace_update (\_. (ksPSpace s)(ptr \ KOArch (KOPML4E new_pml4e)))) s" apply (clarsimp simp:ko_wp_at'_def storePML4E_def split_def bind_def gets_def return_def get_def setObject_def @@ -3208,7 +3208,7 @@ lemma cte_wp_at_modify_pml4e: atLeastAtMost_iff shows "\ksPSpace s ptr' = Some (KOArch (KOPML4E pml4e)); pspace_aligned' s;cte_wp_at' \ ptr s\ - \ cte_wp_at' \ ptr (s\ksPSpace := ksPSpace s(ptr' \ (KOArch (KOPML4E pml4e')))\)" + \ cte_wp_at' \ ptr (s\ksPSpace := (ksPSpace s)(ptr' \ (KOArch (KOPML4E pml4e')))\)" apply (simp add:cte_wp_at_obj_cases_mask obj_at'_real_def) apply (frule(1) pspace_alignedD') apply (elim disjE) diff --git a/proof/refine/X64/Finalise_R.thy b/proof/refine/X64/Finalise_R.thy index 6e34b0f220..733a72db45 100644 --- a/proof/refine/X64/Finalise_R.thy +++ b/proof/refine/X64/Finalise_R.thy @@ -1296,7 +1296,7 @@ crunches deletedIRQHandler, getSlotCap, clearUntypedFreeIndex, updateMDB, getCTE end lemma emptySlot_cteCaps_of: - "\\s. P (cteCaps_of s(p \ NullCap))\ + "\\s. P ((cteCaps_of s)(p \ NullCap))\ emptySlot p opt \\rv s. P (cteCaps_of s)\" apply (simp add: emptySlot_def case_Null_If) @@ -1646,8 +1646,8 @@ lemma arch_postCapDeletion_corres: lemma postCapDeletion_corres: "cap_relation cap cap' \ corres dc \ \ (post_cap_deletion cap) (postCapDeletion cap')" apply (cases cap; clarsimp simp: post_cap_deletion_def Retype_H.postCapDeletion_def) - apply (corressimp corres: deletedIRQHandler_corres) - by (corressimp corres: arch_postCapDeletion_corres) + apply (corresKsimp corres: deletedIRQHandler_corres) + by (corresKsimp corres: arch_postCapDeletion_corres) lemma set_cap_trans_state: "((),s') \ fst (set_cap c p s) \ ((),trans_state f s') \ fst (set_cap c p (trans_state f s))" @@ -1707,7 +1707,7 @@ lemma emptySlot_corres: defer apply wpsimp+ apply (rule corres_no_failI) - apply (rule no_fail_pre, wp static_imp_wp) + apply (rule no_fail_pre, wp hoare_weak_lift_imp) apply (clarsimp simp: cte_wp_at_ctes_of valid_pspace'_def) apply (clarsimp simp: valid_mdb'_def valid_mdb_ctes_def) apply (rule conjI, clarsimp) @@ -2622,7 +2622,7 @@ crunches finaliseCapTrue_standin, unbindNotification lemma cteDeleteOne_cteCaps_of: "\\s. (cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap)))\ + P ((cteCaps_of s)(p \ NullCap)))\ cteDeleteOne p \\rv s. P (cteCaps_of s)\" apply (simp add: cteDeleteOne_def unless_def split_def) @@ -2994,7 +2994,7 @@ crunch ctes_of[wp]: cancelSignal "\s. P (ctes_of s)" lemma cancelIPC_cteCaps_of: "\\s. (\p. cte_wp_at' (\cte. \final. finaliseCap (cteCap cte) final True \ fail) p s \ - P (cteCaps_of s(p \ NullCap))) \ + P ((cteCaps_of s)(p \ NullCap))) \ P (cteCaps_of s)\ cancelIPC t \\rv s. P (cteCaps_of s)\" @@ -3434,7 +3434,7 @@ lemma cteDeleteOne_invs[wp]: subgoal by auto subgoal by (auto dest!: isCapDs simp: pred_tcb_at'_def obj_at'_def projectKOs ko_wp_at'_def) - apply (wp isFinalCapability_inv getCTE_wp' static_imp_wp + apply (wp isFinalCapability_inv getCTE_wp' hoare_weak_lift_imp | wp (once) isFinal[where x=ptr])+ apply (fastforce simp: cte_wp_at_ctes_of) done @@ -3923,7 +3923,7 @@ definition set_thread_all :: "obj_ref \ Structures_A.tcb \ unit det_ext_monad" where "set_thread_all ptr tcb etcb \ do s \ get; - kh \ return $ kheap s(ptr \ (TCB tcb)); + kh \ return $ (kheap s)(ptr \ (TCB tcb)); ekh \ return $ (ekheap s)(ptr \ etcb); put (s\kheap := kh, ekheap := ekh\) od" diff --git a/proof/refine/X64/InterruptAcc_R.thy b/proof/refine/X64/InterruptAcc_R.thy index df0c445ba4..47d19ccf96 100644 --- a/proof/refine/X64/InterruptAcc_R.thy +++ b/proof/refine/X64/InterruptAcc_R.thy @@ -112,7 +112,7 @@ lemma preemptionPoint_inv: shows "\P\ preemptionPoint \\_. P\" using assms apply (simp add: preemptionPoint_def setWorkUnits_def getWorkUnits_def modifyWorkUnits_def) apply (wpc - | wp whenE_wp hoare_seq_ext [OF _ select_inv] alternative_wp hoare_drop_imps + | wp whenE_wp hoare_seq_ext [OF _ select_inv] hoare_drop_imps | simp)+ done diff --git a/proof/refine/X64/Interrupt_R.thy b/proof/refine/X64/Interrupt_R.thy index 5ccf200744..95f5d11448 100644 --- a/proof/refine/X64/Interrupt_R.thy +++ b/proof/refine/X64/Interrupt_R.thy @@ -743,7 +743,7 @@ lemma timerTick_corres: apply (simp add:decDomainTime_def) apply wp apply (wp|wpc|unfold Let_def|simp)+ - apply (wp static_imp_wp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' + apply (wp hoare_weak_lift_imp threadSet_timeslice_invs threadSet_valid_queues threadSet_valid_queues' threadSet_pred_tcb_at_state threadSet_weak_sch_act_wf tcbSchedAppend_valid_objs' rescheduleRequired_weak_sch_act_wf tcbSchedAppend_valid_queues| simp)+ apply (strengthen sch_act_wf_weak) diff --git a/proof/refine/X64/InvariantUpdates_H.thy b/proof/refine/X64/InvariantUpdates_H.thy index 523019315d..25a95f3d15 100644 --- a/proof/refine/X64/InvariantUpdates_H.thy +++ b/proof/refine/X64/InvariantUpdates_H.thy @@ -16,7 +16,7 @@ lemma ps_clear_domE[elim?]: lemma ps_clear_upd: "ksPSpace s y = Some v \ - ps_clear x n (ksPSpace_update (\a. ksPSpace s(y \ v')) s') = ps_clear x n s" + ps_clear x n (ksPSpace_update (\a. (ksPSpace s)(y \ v')) s') = ps_clear x n s" by (rule iffI | clarsimp elim!: ps_clear_domE | fastforce)+ lemmas ps_clear_updE[elim] = iffD2[OF ps_clear_upd, rotated] diff --git a/proof/refine/X64/Ipc_R.thy b/proof/refine/X64/Ipc_R.thy index b8bd0758fb..4c31d2ec32 100644 --- a/proof/refine/X64/Ipc_R.thy +++ b/proof/refine/X64/Ipc_R.thy @@ -320,7 +320,7 @@ lemma cteInsert_cte_wp_at: cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" apply (simp add: cteInsert_def) - apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp static_imp_wp + apply (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp hoare_weak_lift_imp | clarsimp simp: comp_def | unfold setUntypedCapAsFull_def)+ apply (drule cte_at_cte_wp_atD) @@ -364,7 +364,7 @@ lemma cteInsert_weak_cte_wp_at3: else cte_wp_at' (\c. P (cteCap c)) p s\ cteInsert cap src dest \\uu. cte_wp_at' (\c. P (cteCap c)) p\" - by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' static_imp_wp + by (wp updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases getCTE_wp' hoare_weak_lift_imp | clarsimp simp: comp_def cteInsert_def | unfold setUntypedCapAsFull_def | auto simp: cte_wp_at'_def dest!: imp)+ @@ -584,7 +584,7 @@ lemma cteInsert_cte_cap_to': apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of) apply (rule_tac x = "cref" in exI) apply (rule conjI) @@ -627,7 +627,7 @@ lemma cteInsert_weak_cte_wp_at2: apply (rule hoare_use_eq_irq_node' [OF cteInsert_ksInterruptState]) apply (clarsimp simp:cteInsert_def) apply (wp hoare_vcg_ex_lift updateMDB_weak_cte_wp_at updateCap_cte_wp_at_cases - setUntypedCapAsFull_cte_wp_at getCTE_wp static_imp_wp) + setUntypedCapAsFull_cte_wp_at getCTE_wp hoare_weak_lift_imp) apply (clarsimp simp:cte_wp_at_ctes_of weak) apply auto done @@ -660,11 +660,11 @@ lemma transferCapsToSlots_presM: apply (wp eb hoare_vcg_const_Ball_lift hoare_vcg_const_imp_lift | assumption | wpc)+ apply (rule cteInsert_assume_Null) - apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' static_imp_wp) + apply (wp x hoare_vcg_const_Ball_lift cteInsert_cte_cap_to' hoare_weak_lift_imp) apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift static_imp_wp)+ + apply (wp hoare_vcg_const_Ball_lift hoare_weak_lift_imp)+ apply (rule cteInsert_weak_cte_wp_at2,clarsimp) - apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at static_imp_wp + apply (wp hoare_vcg_const_Ball_lift cteInsert_cte_wp_at hoare_weak_lift_imp deriveCap_derived_foo)+ apply (thin_tac "\slots. PROP P slots" for P) apply (clarsimp simp: cte_wp_at_ctes_of remove_rights_def @@ -1069,7 +1069,7 @@ lemma transferCaps_corres: apply (rule corres_rel_imp, rule transferCapsToSlots_corres, simp_all add: split_def)[1] apply (case_tac info, simp) - apply (wp hoare_vcg_all_lift get_rs_cte_at static_imp_wp + apply (wp hoare_vcg_all_lift get_rs_cte_at hoare_weak_lift_imp | simp only: ball_conj_distrib)+ apply (simp add: cte_map_def tcb_cnode_index_def split_def) apply (clarsimp simp: valid_pspace'_def valid_ipc_buffer_ptr'_def2 @@ -1502,7 +1502,7 @@ lemma doNormalTransfer_corres: hoare_valid_ipc_buffer_ptr_typ_at' copyMRs_typ_at' hoare_vcg_const_Ball_lift lookupExtraCaps_length | simp add: if_apply_def2)+) - apply (wp static_imp_wp | strengthen valid_msg_length_strengthen)+ + apply (wp hoare_weak_lift_imp | strengthen valid_msg_length_strengthen)+ apply clarsimp apply auto done @@ -2244,7 +2244,7 @@ lemma doReplyTransfer_corres: apply simp apply (fold dc_def, rule possibleSwitchTo_corres) apply simp - apply (wp static_imp_wp static_imp_conj_wp set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' + apply (wp hoare_weak_lift_imp hoare_weak_lift_imp_conj set_thread_state_runnable_weak_valid_sched_action sts_st_tcb_at' sts_st_tcb' sts_valid_queues | simp | force simp: valid_sched_def valid_sched_action_def valid_tcb_state'_def)+ apply (rule corres_guard_imp) apply (rule setThreadState_corres) @@ -2344,15 +2344,15 @@ lemma setupCallerCap_corres: tcb_cnode_index_def cte_level_bits_def) apply (simp add: cte_map_def tcbCallerSlot_def tcb_cnode_index_def cte_level_bits_def) - apply (rule_tac Q="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (receiver + 2 ^ cte_level_bits * tcbCallerSlot)" + in hoare_post_add) apply (wp, (wp getSlotCap_wp)+) apply blast apply (rule no_fail_pre, wp) apply (clarsimp simp: cte_wp_at'_def cte_at'_def) - apply (rule_tac Q="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" - in valid_prove_more) + apply (rule_tac R="\rv. cte_at' (sender + 2 ^ cte_level_bits * tcbReplySlot)" + in hoare_post_add) apply (wp, (wp getCTE_wp')+) apply blast apply (rule no_fail_pre, wp) @@ -2409,7 +2409,7 @@ lemma possibleSwitchTo_weak_sch_act_wf[wp]: bitmap_fun_defs) apply (wp rescheduleRequired_weak_sch_act_wf weak_sch_act_wf_lift_linear[where f="tcbSchedEnqueue t"] - getObject_tcb_wp static_imp_wp + getObject_tcb_wp hoare_weak_lift_imp | wpc)+ apply (clarsimp simp: obj_at'_def projectKOs weak_sch_act_wf_def ps_clear_def tcb_in_cur_domain'_def) done @@ -2628,7 +2628,7 @@ lemmas setMessageInfo_typ_ats[wp] = typ_at_lifts [OF setMessageInfo_typ_at'] declare tl_drop_1[simp] crunch cur[wp]: cancel_ipc "cur_tcb" - (wp: select_wp crunch_wps simp: crunch_simps) + (wp: crunch_wps simp: crunch_simps) crunch valid_objs'[wp]: asUser "valid_objs'" @@ -2777,7 +2777,7 @@ lemma possibleSwitchTo_sch_act[wp]: possibleSwitchTo t \\rv s. sch_act_wf (ksSchedulerAction s) s\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadSet_sch_act setQueue_sch_act threadGet_wp + apply (wp hoare_weak_lift_imp threadSet_sch_act setQueue_sch_act threadGet_wp | simp add: unless_def | wpc)+ apply (auto simp: obj_at'_def projectKOs tcb_in_cur_domain'_def) done @@ -2798,7 +2798,7 @@ lemma possibleSwitchTo_ksQ': possibleSwitchTo t \\_ s. t' \ set (ksReadyQueues s p)\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp + apply (wp hoare_weak_lift_imp rescheduleRequired_ksQ' tcbSchedEnqueue_ksQ threadGet_wp | wpc | simp split del: if_split)+ apply (auto simp: obj_at'_def) @@ -2810,7 +2810,7 @@ lemma possibleSwitchTo_valid_queues'[wp]: possibleSwitchTo t \\rv. valid_queues'\" apply (simp add: possibleSwitchTo_def curDomain_def bitmap_fun_defs) - apply (wp static_imp_wp threadGet_wp | wpc | simp)+ + apply (wp hoare_weak_lift_imp threadGet_wp | wpc | simp)+ apply (auto simp: obj_at'_def) done @@ -3771,7 +3771,7 @@ lemma completeSignal_invs: \ ((\y. ntfnBoundTCB ntfn = Some y) \ ex_nonz_cap_to' ntfnptr s) \ ntfnptr \ ksIdleThread s" in hoare_strengthen_post) - apply ((wp hoare_vcg_ex_lift static_imp_wp | wpc | simp add: valid_ntfn'_def)+)[1] + apply ((wp hoare_vcg_ex_lift hoare_weak_lift_imp | wpc | simp add: valid_ntfn'_def)+)[1] apply (clarsimp simp: obj_at'_def state_refs_of'_def typ_at'_def ko_wp_at'_def projectKOs split: option.splits) apply (blast dest: ntfn_q_refs_no_bound_refs') apply wp @@ -3990,7 +3990,7 @@ lemma rai_invs'[wp]: \ \ep = ActiveNtfn\ apply (simp add: invs'_def valid_state'_def) apply (rule hoare_pre) - apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts static_imp_wp + apply (wp valid_irq_node_lift sts_valid_objs' typ_at_lifts hoare_weak_lift_imp asUser_urz | simp add: valid_ntfn'_def)+ apply (clarsimp simp: pred_tcb_at' valid_pspace'_def) @@ -4455,7 +4455,7 @@ lemma sendSignal_st_tcb'_Running: sendSignal ntfnptr bdg \\_. st_tcb_at' (\st. st = Running \ P st) t\" apply (simp add: sendSignal_def) - apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp static_imp_wp + apply (wp sts_st_tcb_at'_cases cancelIPC_st_tcb_at' gts_wp' getNotification_wp hoare_weak_lift_imp | wpc | clarsimp simp: pred_tcb_at')+ done diff --git a/proof/refine/X64/KHeap_R.thy b/proof/refine/X64/KHeap_R.thy index 4fd7fb09bf..199dd740ef 100644 --- a/proof/refine/X64/KHeap_R.thy +++ b/proof/refine/X64/KHeap_R.thy @@ -1019,8 +1019,8 @@ lemma setEndpoint_corres: corres dc (ep_at ptr) (ep_at' ptr) (set_endpoint ptr e) (setEndpoint ptr e')" apply (simp add: set_simple_ko_def setEndpoint_def is_ep_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ep obj_at_simps objBits_defs partial_inv_def) lemma setNotification_corres: @@ -1028,8 +1028,8 @@ lemma setNotification_corres: corres dc (ntfn_at ptr) (ntfn_at' ptr) (set_notification ptr ae) (setNotification ptr ae')" apply (simp add: set_simple_ko_def setNotification_def is_ntfn_def[symmetric]) - apply (corres_search search: setObject_other_corres[where P="\_. True"]) - apply (corressimp wp: get_object_ret get_object_wp)+ + apply (corresK_search search: setObject_other_corres[where P="\_. True"]) + apply (corresKsimp wp: get_object_ret get_object_wp)+ by (fastforce simp: is_ntfn obj_at_simps objBits_defs partial_inv_def) lemma no_fail_getNotification [wp]: @@ -2114,21 +2114,21 @@ lemma valid_globals_cte_wpD': lemma dmo_aligned'[wp]: "\pspace_aligned'\ doMachineOp f \\_. pspace_aligned'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_distinct'[wp]: "\pspace_distinct'\ doMachineOp f \\_. pspace_distinct'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done lemma dmo_valid_objs'[wp]: "\valid_objs'\ doMachineOp f \\_. valid_objs'\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp done @@ -2136,7 +2136,7 @@ lemma dmo_inv': assumes R: "\P. \P\ f \\_. P\" shows "\P\ doMachineOp f \\_. P\" apply (simp add: doMachineOp_def split_def) - apply (wp select_wp) + apply wp apply clarsimp apply (drule in_inv_by_hoareD [OF R]) apply simp diff --git a/proof/refine/X64/LevityCatch.thy b/proof/refine/X64/LevityCatch.thy index c56bd48e87..9ba1e50687 100644 --- a/proof/refine/X64/LevityCatch.thy +++ b/proof/refine/X64/LevityCatch.thy @@ -8,6 +8,7 @@ theory LevityCatch imports "BaseRefine.Include" "Lib.LemmaBucket" + "Lib.Corres_Method" begin (* Try again, clagged from Include *) diff --git a/proof/refine/X64/Refine.thy b/proof/refine/X64/Refine.thy index 3d7ab23cb8..ddce64cc1c 100644 --- a/proof/refine/X64/Refine.thy +++ b/proof/refine/X64/Refine.thy @@ -280,7 +280,7 @@ lemma kernel_entry_invs: thread_set_ct_running thread_set_not_state_valid_sched hoare_vcg_disj_lift ct_in_state_thread_state_lift thread_set_no_change_tcb_state call_kernel_domain_time_inv_det_ext call_kernel_domain_list_inv_det_ext - static_imp_wp + hoare_weak_lift_imp | clarsimp simp add: tcb_cap_cases_def active_from_running)+ done @@ -296,18 +296,18 @@ definition lemma do_user_op_valid_list:"\valid_list\ do_user_op f tc \\_. valid_list\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_valid_sched:"\valid_sched\ do_user_op f tc \\_. valid_sched\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_sched_act: "\\s. P (scheduler_action s)\ do_user_op f tc \\_ s. P (scheduler_action s)\" unfolding do_user_op_def - apply (wp select_wp | simp add: split_def)+ + apply (wp | simp add: split_def)+ done lemma do_user_op_invs2: @@ -419,8 +419,8 @@ lemma kernelEntry_invs': (\s. 0 < ksDomainTime s) and valid_domain_list' \" apply (simp add: kernelEntry_def) apply (wp ckernel_invs callKernel_domain_time_left - threadSet_invs_trivial threadSet_ct_running' select_wp - TcbAcc_R.dmo_invs' static_imp_wp + threadSet_invs_trivial threadSet_ct_running' + TcbAcc_R.dmo_invs' hoare_weak_lift_imp doMachineOp_sch_act_simple callKernel_domain_time_left | clarsimp simp: user_memory_update_def no_irq_def tcb_at_invs' @@ -498,7 +498,7 @@ lemma doUserOp_invs': (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and (\s. 0 < ksDomainTime s) and valid_domain_list'\" apply (simp add: doUserOp_def split_def ex_abs_def) - apply (wp device_update_invs' select_wp + apply (wp device_update_invs' | (wp (once) dmo_invs', wpsimp simp: no_irq_modify device_memory_update_def user_memory_update_def))+ apply (clarsimp simp: user_memory_update_def simpler_modify_def @@ -632,7 +632,7 @@ lemma entry_corres: apply (rule hoare_strengthen_post, rule ckernel_invs, simp add: invs'_def cur_tcb'_def) apply (wp thread_set_invs_trivial thread_set_ct_running threadSet_invs_trivial threadSet_ct_running' - select_wp thread_set_not_state_valid_sched static_imp_wp + thread_set_not_state_valid_sched hoare_weak_lift_imp hoare_vcg_disj_lift ct_in_state_thread_state_lift | simp add: tcb_cap_cases_def ct_in_state'_def thread_set_no_change_tcb_state | (wps, wp threadSet_st_tcb_at2) )+ @@ -800,7 +800,7 @@ lemma domain_list_rel_eq: by (clarsimp simp: state_relation_def) crunch valid_objs': doUserOp, checkActiveIRQ valid_objs' - (wp: crunch_wps select_wp) + (wp: crunch_wps) lemma ckernel_invariant: "ADT_H uop \ full_invs'" diff --git a/proof/refine/X64/Retype_R.thy b/proof/refine/X64/Retype_R.thy index 8aee07791d..01585f15bf 100644 --- a/proof/refine/X64/Retype_R.thy +++ b/proof/refine/X64/Retype_R.thy @@ -1520,7 +1520,7 @@ lemma retype_region_ext_modify_kheap_futz: apply (simp add: modify_def[symmetric]) done -lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=NonDetMonad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] +lemmas retype_region_ext_modify_kheap_futz' = fun_cong[OF arg_cong[where f=Nondet_Monad.bind, OF retype_region_ext_modify_kheap_futz[symmetric]], simplified bind_assoc] lemma foldr_upd_app_if_eta_futz: "foldr (\p ps. ps(p \ f p)) as = (\g x. if x \ set as then Some (f x) else g x)" @@ -2603,7 +2603,6 @@ lemma update_gs_ksMachineState_update_swap: declare hoare_in_monad_post[wp del] declare univ_get_wp[wp del] -declare result_in_set_wp[wp del] crunch valid_arch_state'[wp]: copyGlobalMappings "valid_arch_state'" (wp: crunch_wps) @@ -4555,7 +4554,7 @@ proof - apply (simp add: ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def) apply (rule hoare_pre) apply (wps a b c d) - apply (wp static_imp_wp e' hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp e' hoare_vcg_disj_lift) apply (auto simp: obj_at'_def ct_in_state'_def projectKOs st_tcb_at'_def) done qed @@ -5545,7 +5544,7 @@ lemma corres_retype_region_createNewCaps: APIType_map2_def arch_default_cap_def) apply fastforce+ \ \PML4\ - apply (corressimp corres: corres_retype[where ty="Inr PML4Object" and 'a=pml4e and sz=sz, + apply (corresKsimp corres: corres_retype[where ty="Inr PML4Object" and 'a=pml4e and sz=sz, simplified, folded retype_region2_retype_region_PML4Obj] corresK: corresK_mapM_x_list_all2[where I="\xs s. valid_arch_state s \ pspace_aligned s \ valid_etcbs s \ diff --git a/proof/refine/X64/Schedule_R.thy b/proof/refine/X64/Schedule_R.thy index 946b5f0408..c1827abcf3 100644 --- a/proof/refine/X64/Schedule_R.thy +++ b/proof/refine/X64/Schedule_R.thy @@ -10,7 +10,7 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) -declare static_imp_wp[wp_split del] +declare hoare_weak_lift_imp[wp_split del] (* Levity: added (20090713 10:04:12) *) declare sts_rel_idle [simp] @@ -41,7 +41,7 @@ proof - apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) done have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" - by (wp alternative_wp | simp)+ + by (wp | simp)+ show ?thesis using p apply (induct xs) apply (simp add: y del: dc_simp) @@ -448,7 +448,7 @@ lemma ct_idle_or_in_cur_domain'_lift2: apply (rule hoare_lift_Pf2[where f=ksCurThread]) apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) including no_pre - apply (wp static_imp_wp hoare_vcg_disj_lift) + apply (wp hoare_weak_lift_imp hoare_vcg_disj_lift) apply simp+ done @@ -715,7 +715,7 @@ lemma arch_switchToIdleThread_corres: arch_switch_to_idle_thread Arch.switchToIdleThread" apply (simp add: arch_switch_to_idle_thread_def X64_H.switchToIdleThread_def) - apply (corressimp corres: getIdleThread_corres setVMRoot_corres) + apply (corresKsimp corres: getIdleThread_corres setVMRoot_corres) apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb obj_at'_def) done @@ -1360,7 +1360,7 @@ lemma switchToIdleThread_invs_no_cicd': crunch obj_at'[wp]: "Arch.switchToIdleThread" "\s. obj_at' P t s" -declare static_imp_conj_wp[wp_split del] +declare hoare_weak_lift_imp_conj[wp_split del] lemma setCurThread_const: "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" @@ -2144,7 +2144,7 @@ lemma schedule_invs': apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 - | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_disjI2[where R="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] | strengthen invs'_invs_no_cicd diff --git a/proof/refine/X64/Syscall_R.thy b/proof/refine/X64/Syscall_R.thy index 9d24c6ef80..78aaf0ddcc 100644 --- a/proof/refine/X64/Syscall_R.thy +++ b/proof/refine/X64/Syscall_R.thy @@ -337,7 +337,7 @@ lemma threadSet_tcbDomain_update_sch_act_wf[wp]: apply (simp add: threadSet_def) apply wp apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp hoare_vcg_all_lift)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp hoare_vcg_all_lift)+ apply (rename_tac word) apply (rule_tac Q="\_ s. ksSchedulerAction s = SwitchToThread word \ st_tcb_at' runnable' word s \ tcb_in_cur_domain' word s \ word \ t" @@ -688,7 +688,7 @@ proof - apply (rule hoare_weaken_pre [OF cteInsert_weak_cte_wp_at3]) apply (rule PUC,simp) apply (clarsimp simp: cte_wp_at_ctes_of) - apply (wp hoare_vcg_all_lift static_imp_wp | simp add:ball_conj_distrib)+ + apply (wp hoare_vcg_all_lift hoare_weak_lift_imp | simp add:ball_conj_distrib)+ done qed @@ -807,7 +807,7 @@ lemma doReply_invs[wp]: apply assumption apply (erule cte_wp_at_weakenE') apply (fastforce) - apply (wp sts_invs_minor'' sts_st_tcb' static_imp_wp) + apply (wp sts_invs_minor'' sts_st_tcb' hoare_weak_lift_imp) apply (rule_tac Q="\rv s. invs' s \ sch_act_simple s \ st_tcb_at' awaiting_reply' t s \ t \ ksIdleThread s" @@ -825,7 +825,7 @@ lemma doReply_invs[wp]: apply (erule_tac P="\st. awaiting_reply' st \ activatable' st" in pred_tcb'_weakenE) apply (case_tac st, clarsimp+) - apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 static_imp_wp + apply (wp threadSet_invs_trivial threadSet_st_tcb_at2 hoare_weak_lift_imp | clarsimp simp add: inQ_def)+ apply (rule_tac Q="\_. invs' and tcb_at' t and sch_act_simple and st_tcb_at' awaiting_reply' t" @@ -982,7 +982,7 @@ lemma setDomain_invs': (\y. domain \ maxDomain))\ setDomain ptr domain \\y. invs'\" apply (simp add:setDomain_def ) - apply (wp add: when_wp static_imp_wp static_imp_conj_wp rescheduleRequired_all_invs_but_extra + apply (wp add: when_wp hoare_weak_lift_imp hoare_weak_lift_imp_conj rescheduleRequired_all_invs_but_extra tcbSchedEnqueue_valid_action hoare_vcg_if_lift2) apply (rule_tac Q = "\r s. all_invs_but_sch_extra s \ curThread = ksCurThread s \ (ptr \ curThread \ ct_not_inQ s \ sch_act_wf (ksSchedulerAction s) s \ ct_idle_or_in_cur_domain' s)" @@ -996,7 +996,7 @@ lemma setDomain_invs': prefer 2 apply clarsimp apply assumption - apply (wp static_imp_wp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain + apply (wp hoare_weak_lift_imp threadSet_pred_tcb_no_state threadSet_not_curthread_ct_domain threadSet_tcbDomain_update_ct_not_inQ | simp)+ apply (rule_tac Q = "\r s. invs' s \ curThread = ksCurThread s \ sch_act_simple s \ domain \ maxDomain @@ -1182,7 +1182,7 @@ done lemmas set_thread_state_active_valid_sched = set_thread_state_runnable_valid_sched[simplified runnable_eq_active] -(*FIXME: move to NonDetMonadVCG.valid_validE_R *) +(*FIXME: move to Nondet_VCG.valid_validE_R *) lemma handleInvocation_corres: "c \ b \ corres (dc \ dc) @@ -1311,7 +1311,7 @@ lemma hinv_invs'[wp]: apply (simp add: handleInvocation_def split_def ts_Restart_case_helper') apply (wp syscall_valid' setThreadState_nonqueued_state_update rfk_invs' - hoare_vcg_all_lift static_imp_wp) + hoare_vcg_all_lift hoare_weak_lift_imp) apply simp apply (intro conjI impI) apply (wp gts_imp' | simp)+ diff --git a/proof/refine/X64/TcbAcc_R.thy b/proof/refine/X64/TcbAcc_R.thy index ac60ef5285..5f6eff7fa9 100644 --- a/proof/refine/X64/TcbAcc_R.thy +++ b/proof/refine/X64/TcbAcc_R.thy @@ -11,7 +11,6 @@ begin context begin interpretation Arch . (*FIXME: arch_split*) declare if_weak_cong [cong] -declare result_in_set_wp[wp] declare hoare_in_monad_post[wp] declare trans_state_update'[symmetric,simp] declare storeWordUser_typ_at' [wp] @@ -50,7 +49,7 @@ lemma isHighestPrio_def': "isHighestPrio d p = gets (\s. ksReadyQueuesL1Bitmap s d = 0 \ lookupBitmapPriority d s \ p)" unfolding isHighestPrio_def bitmap_fun_defs getHighestPrio_def' apply (rule ext) - apply (clarsimp simp: gets_def bind_assoc return_def NonDetMonad.bind_def get_def + apply (clarsimp simp: gets_def bind_assoc return_def Nondet_Monad.bind_def get_def split: if_splits) done @@ -399,7 +398,7 @@ proof - apply (simp add: return_def thread_set_def gets_the_def assert_def assert_opt_def simpler_gets_def set_object_def get_object_def put_def get_def bind_def) - apply (subgoal_tac "kheap s(t \ TCB tcb) = kheap s", simp) + apply (subgoal_tac "(kheap s)(t \ TCB tcb) = kheap s", simp) apply (simp add: map_upd_triv get_tcb_SomeD)+ done show ?thesis @@ -2352,9 +2351,9 @@ lemma threadSet_queued_sch_act_wf[wp]: split: scheduler_action.split) apply (wp hoare_vcg_conj_lift) apply (simp add: threadSet_def) - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wps setObject_sa_unchanged) - apply (wp static_imp_wp getObject_tcb_wp)+ + apply (wp hoare_weak_lift_imp getObject_tcb_wp)+ apply (clarsimp simp: obj_at'_def) apply (wp hoare_vcg_all_lift hoare_vcg_conj_lift hoare_convert_imp)+ apply (simp add: threadSet_def) @@ -4111,7 +4110,7 @@ lemma possibleSwitchTo_ct_not_inQ: possibleSwitchTo t \\_. ct_not_inQ\" (is "\?PRE\ _ \_\") apply (simp add: possibleSwitchTo_def curDomain_def) - apply (wpsimp wp: static_imp_wp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ + apply (wpsimp wp: hoare_weak_lift_imp rescheduleRequired_ct_not_inQ tcbSchedEnqueue_ct_not_inQ threadGet_wp | (rule hoare_post_imp[OF _ rescheduleRequired_sa_cnt], fastforce))+ apply (fastforce simp: obj_at'_def) @@ -4130,7 +4129,7 @@ lemma threadSet_tcbState_update_ct_not_inQ[wp]: apply (clarsimp) apply (rule hoare_conjI) apply (rule hoare_weaken_pre) - apply (wps, wp static_imp_wp) + apply (wps, wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb)+ apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) @@ -4150,7 +4149,7 @@ lemma threadSet_tcbBoundNotification_update_ct_not_inQ[wp]: apply (rule hoare_conjI) apply (rule hoare_weaken_pre) apply wps - apply (wp static_imp_wp) + apply (wp hoare_weak_lift_imp) apply (wp OMG_getObject_tcb) apply (clarsimp simp: comp_def) apply (wp hoare_drop_imp) diff --git a/proof/refine/X64/Tcb_R.thy b/proof/refine/X64/Tcb_R.thy index e915921d09..8cd6e8cbe4 100644 --- a/proof/refine/X64/Tcb_R.thy +++ b/proof/refine/X64/Tcb_R.thy @@ -353,7 +353,7 @@ lemma invokeTCB_WriteRegisters_corres: apply (rule_tac P=\ and P'=\ in corres_inst) apply simp apply (wp+)[2] - apply ((wp static_imp_wp restart_invs' + apply ((wp hoare_weak_lift_imp restart_invs' | strengthen valid_sched_weak_strg einvs_valid_etcbs invs_valid_queues' invs_queues invs_weak_sch_act_wf | clarsimp simp: invs_def valid_state_def valid_sched_def invs'_def valid_state'_def @@ -452,7 +452,7 @@ proof - apply (simp add: frame_registers_def frameRegisters_def) apply (simp add: getRestartPC_def setNextPC_def dc_def[symmetric]) apply (rule Q[OF refl refl]) - apply (wpsimp wp: mapM_x_wp' static_imp_wp)+ + apply (wpsimp wp: mapM_x_wp' hoare_weak_lift_imp)+ apply (rule corres_split_nor) apply (rule corres_when[OF refl]) apply (rule R[OF refl refl]) @@ -462,15 +462,15 @@ proof - apply (rule corres_split[OF corres_when[OF refl rescheduleRequired_corres]]) apply (rule_tac P=\ and P'=\ in corres_inst) apply simp - apply ((solves \wp static_imp_wp\)+) + apply ((solves \wp hoare_weak_lift_imp\)+) apply (rule_tac Q="\_. einvs and tcb_at dest" in hoare_post_imp) apply (clarsimp simp: invs_def valid_sched_weak_strg valid_sched_def) prefer 2 apply (rule_tac Q="\_. invs' and tcb_at' dest" in hoare_post_imp) apply (clarsimp simp: invs'_def valid_state'_def invs_weak_sch_act_wf cur_tcb'_def) - apply (wp mapM_x_wp' static_imp_wp | simp)+ - apply ((wp static_imp_wp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] - apply (wp suspend_nonz_cap_to_tcb static_imp_wp | simp add: if_apply_def2)+ + apply (wp mapM_x_wp' hoare_weak_lift_imp | simp)+ + apply ((wp hoare_weak_lift_imp restart_invs' | wpc | clarsimp simp: if_apply_def2)+)[2] + apply (wp suspend_nonz_cap_to_tcb hoare_weak_lift_imp | simp add: if_apply_def2)+ apply (fastforce simp: invs_def valid_state_def valid_pspace_def dest!: idle_no_ex_cap) apply (fastforce simp: invs'_def valid_state'_def dest!: global'_no_ex_cap) @@ -640,7 +640,7 @@ lemma sp_corres2: apply (rule rescheduleRequired_corres) apply (rule possibleSwitchTo_corres) apply ((clarsimp - | wp static_imp_wp hoare_vcg_if_lift hoare_wp_combs gts_wp + | wp hoare_weak_lift_imp hoare_vcg_if_lift hoare_wp_combs gts_wp isRunnable_wp)+)[4] apply (wp hoare_vcg_imp_lift' hoare_vcg_if_lift hoare_vcg_all_lift) apply clarsimp @@ -1635,30 +1635,30 @@ lemma tc_invs': apply (simp only: eq_commute[where a="a"]) apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] apply (rule hoare_walk_assmsE) apply (clarsimp simp: pred_conj_def option.splits [where P="\x. x s" for s]) - apply ((wp case_option_wp threadSet_invs_trivial static_imp_wp setMCPriority_invs' + apply ((wp case_option_wp threadSet_invs_trivial hoare_weak_lift_imp setMCPriority_invs' typ_at_lifts[OF setMCPriority_typ_at'] hoare_vcg_all_lift threadSet_cap_to' | clarsimp simp: inQ_def)+)[2] - apply (wp add: setP_invs' static_imp_wp hoare_vcg_all_lift)+ + apply (wp add: setP_invs' hoare_weak_lift_imp hoare_vcg_all_lift)+ apply (rule case_option_wp_None_return[OF setP_invs'[simplified pred_conj_assoc]]) apply clarsimp apply wpfix apply assumption apply (rule case_option_wp_None_returnOk) - apply (wpsimp wp: static_imp_wp hoare_vcg_all_lift + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak threadSet_invs_trivial2 threadSet_tcb' hoare_vcg_all_lift threadSet_cte_wp_at')+ - apply (wpsimp wp: static_imp_wpE cteDelete_deletes + apply (wpsimp wp: hoare_weak_lift_imp_R cteDelete_deletes hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_invs' cteDelete_typ_at'_lifts)+ apply (assumption | clarsimp cong: conj_cong imp_cong | (rule case_option_wp_None_returnOk) - | wpsimp wp: static_imp_wp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak + | wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] assertDerived_wp_weak hoare_vcg_imp_lift' hoare_vcg_all_lift checkCap_inv[where P="tcb_at' t" for t] checkCap_inv[where P="valid_cap' c" for c] checkCap_inv[where P=sch_act_simple] - hoare_vcg_const_imp_lift_R assertDerived_wp_weak static_imp_wpE cteDelete_deletes + hoare_vcg_const_imp_lift_R assertDerived_wp_weak hoare_weak_lift_imp_R cteDelete_deletes hoare_vcg_all_lift_R hoare_vcg_conj_liftE1 hoare_vcg_const_imp_lift_R hoare_vcg_propE_R cteDelete_invs' cteDelete_typ_at'_lifts cteDelete_sch_act_simple)+ apply (clarsimp simp: tcb_cte_cases_def cte_level_bits_def objBits_defs tcbIPCBufferSlot_def) @@ -2035,7 +2035,7 @@ lemma decodeSetPriority_corres: clarsimp simp: decode_set_priority_def decodeSetPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2054,7 +2054,7 @@ lemma decodeSetMCPriority_corres: clarsimp simp: decode_set_mcpriority_def decodeSetMCPriority_def) apply (rename_tac auth_cap auth_slot auth_path rest auth_cap' rest') apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) apply (clarsimp simp: newroot_rel_def elim!: is_thread_cap.elims(2)) @@ -2091,7 +2091,7 @@ lemma checkPrio_wp: checkPrio prio auth \ \rv. P \,-" apply (simp add: checkPrio_def) - apply (wp NonDetMonadVCG.whenE_throwError_wp getMCP_wp) + apply (wp Nondet_VCG.whenE_throwError_wp getMCP_wp) by (auto simp add: pred_tcb_at'_def obj_at'_def) lemma checkPrio_lt_ct: @@ -2171,7 +2171,7 @@ lemma decodeSetSchedParams_corres: apply (clarsimp split: list.split simp: list_all2_Cons2) apply (clarsimp simp: list_all2_Cons1 neq_Nil_conv val_le_length_Cons linorder_not_less) apply (rule corres_split_eqrE) - apply corressimp + apply corresKsimp apply (rule corres_split_norE[OF checkPrio_corres]) apply (rule corres_splitEE[OF checkPrio_corres]) apply (rule corres_returnOkTT) @@ -2705,7 +2705,7 @@ lemma restart_makes_simple': \\rv. st_tcb_at' simple' t\" apply (simp add: restart_def) apply (wp sts_st_tcb_at'_cases cancelIPC_simple - cancelIPC_st_tcb_at static_imp_wp | simp)+ + cancelIPC_st_tcb_at hoare_weak_lift_imp | simp)+ apply (rule hoare_strengthen_post [OF isStopped_inv]) prefer 2 apply assumption diff --git a/proof/refine/X64/Untyped_R.thy b/proof/refine/X64/Untyped_R.thy index f15c42d161..475c6f089e 100644 --- a/proof/refine/X64/Untyped_R.thy +++ b/proof/refine/X64/Untyped_R.thy @@ -3292,7 +3292,7 @@ lemma createNewCaps_valid_cap': lemma dmo_ctes_of[wp]: "\\s. P (ctes_of s)\ doMachineOp mop \\rv s. P (ctes_of s)\" - by (simp add: doMachineOp_def split_def | wp select_wp)+ + by (simp add: doMachineOp_def split_def | wp)+ lemma createNewCaps_ranges: "\\s. range_cover ptr sz (APIType_capBits ty us) n \ 0 @@ -3616,7 +3616,7 @@ lemma updateFreeIndex_mdb_simple': and cte_wp_at' :"ctes_of s src = Some cte" "cteCap cte = capability.UntypedCap d ptr sz idx'" and unt_inc' :"untyped_inc' (ctes_of s)" and valid_objs' :"valid_objs' s" - and invp: "mdb_inv_preserve (ctes_of s) (ctes_of s(src \ cteCap_update (\_. capability.UntypedCap d ptr sz idx) cte))" + and invp: "mdb_inv_preserve (ctes_of s) ((ctes_of s)(src \ cteCap_update (\_. UntypedCap d ptr sz idx) cte))" (is "mdb_inv_preserve (ctes_of s) ?ctes") show "untyped_inc' ?ctes" @@ -4145,6 +4145,8 @@ lemma idx_le_new_offs: end +context begin interpretation Arch . (*FIXME: arch_split*) + lemma valid_sched_etcbs[elim!]: "valid_sched_2 queues ekh sa cdom kh ct it \ valid_etcbs_2 ekh kh" by (simp add: valid_sched_def) @@ -4718,6 +4720,8 @@ lemma whenE_reset_resetUntypedCap_invs_etc: crunch ksCurDomain[wp]: updateFreeIndex "\s. P (ksCurDomain s)" +end + lemma (in range_cover) funky_aligned: "is_aligned ((ptr && foo) + v * 2 ^ sbit) sbit" apply (rule aligned_add_aligned) diff --git a/proof/refine/X64/VSpace_R.thy b/proof/refine/X64/VSpace_R.thy index 6cb30fd648..2ef1a0379d 100644 --- a/proof/refine/X64/VSpace_R.thy +++ b/proof/refine/X64/VSpace_R.thy @@ -546,7 +546,7 @@ lemma invalidatePageStructureCacheASID_corres' [corres]: "corres dc \ \ (invalidate_page_structure_cache_asid vspace asid) (X64_H.invalidatePageStructureCacheASID vspace' asid')" - by (corressimp simp: invalidate_page_structure_cache_asid_def + by (corresKsimp simp: invalidate_page_structure_cache_asid_def X64_H.invalidatePageStructureCacheASID_def invalidateLocalPageStructureCacheASID_def assms ucast_id diff --git a/proof/sep-capDL/Frame_SD.thy b/proof/sep-capDL/Frame_SD.thy index 861d47b6ab..3c7937e4ad 100644 --- a/proof/sep-capDL/Frame_SD.thy +++ b/proof/sep-capDL/Frame_SD.thy @@ -85,13 +85,13 @@ lemma disjoint_union_diff: by auto lemma intent_reset_update_slots_single: - "intent_reset (update_slots (object_slots obj(slot \ cap)) obj) - = update_slots (object_slots (intent_reset obj)(slot \ cap)) (intent_reset obj)" + "intent_reset (update_slots ((object_slots obj)(slot \ cap)) obj) + = update_slots ((object_slots (intent_reset obj))(slot \ cap)) (intent_reset obj)" by simp lemma object_clean_update_slots_single: - "object_clean (update_slots (object_slots obj(slot \ cap)) obj) - = update_slots (object_slots (object_clean obj)(slot \ reset_cap_asid cap)) (object_clean obj)" + "object_clean (update_slots ((object_slots obj)(slot \ cap)) obj) + = update_slots ((object_slots (object_clean obj))(slot \ reset_cap_asid cap)) (object_clean obj)" by (auto simp: object_clean_def intent_reset_def asid_reset_def update_slots_def object_slots_def fun_eq_iff split: cdl_object.splits) @@ -203,7 +203,7 @@ lemma object_clean_has_slots: lemma set_object_slot_wp_helper: "\\s. <(obj_id, slot) \c - \* R> s \ cdl_objects s obj_id = Some obj \ object_clean obj = object_clean obj'\ - set_object obj_id (update_slots (object_slots obj' (slot \ cap)) obj') + set_object obj_id (update_slots ((object_slots obj') (slot \ cap)) obj') \\rv. <(obj_id, slot) \c cap \* R>\" apply (clarsimp simp: set_object_def sep_any_def) apply wp @@ -230,7 +230,7 @@ lemma set_object_slot_wp: "\\s. <(obj_id, slot) \c - \* R> s \ cdl_objects s obj_id = Some obj \ (\obj'. object_clean obj = object_clean obj' \ - nobj = (update_slots (object_slots obj' (slot \ cap)) obj'))\ + nobj = (update_slots ((object_slots obj') (slot \ cap)) obj'))\ set_object obj_id nobj \\rv. <(obj_id, slot) \c cap \* R>\" apply (rule hoare_name_pre_state) @@ -316,7 +316,7 @@ lemma set_cap_wp: apply (case_tac ptr, rename_tac obj_id slot, clarsimp) apply (wp|wpc)+ apply (rule_tac obj = obj in set_object_slot_wp) - apply (wp select_wp |wpc)+ + apply (wp |wpc)+ apply clarsimp apply (clarsimp simp: update_slots_def has_slots_def split: cdl_object.splits) diff --git a/proof/sep-capDL/Helpers_SD.thy b/proof/sep-capDL/Helpers_SD.thy index 79d3127f2b..8d2a1a4634 100644 --- a/proof/sep-capDL/Helpers_SD.thy +++ b/proof/sep-capDL/Helpers_SD.thy @@ -1034,7 +1034,7 @@ lemma derive_cap_wp: "\ P (derived_cap cap) \ derive_cap slot cap \P\, -" apply (clarsimp simp: derive_cap_def derived_cap_def) apply (clarsimp simp: validE_R_def derive_cap_def split:cdl_cap.splits) - apply (safe, (wp alternative_wp alternativeE_wp whenE_wp | + apply (safe, (wp whenE_wp | clarsimp simp: ensure_no_children_def)+ ) done diff --git a/proof/sep-capDL/Lookups_D.thy b/proof/sep-capDL/Lookups_D.thy index 3af21b8165..0fca39865a 100644 --- a/proof/sep-capDL/Lookups_D.thy +++ b/proof/sep-capDL/Lookups_D.thy @@ -7,7 +7,7 @@ theory Lookups_D imports "DSpec.Syscall_D" - "Monads.OptionMonadND" + "Monads.Reader_Option_ND" begin type_synonym 'a lookup = "cdl_state \ 'a option" diff --git a/run_tests b/run_tests index 779d635b1c..38b4ac476d 100755 --- a/run_tests +++ b/run_tests @@ -96,7 +96,7 @@ EXCLUDE["AARCH64"]=[ # To be eliminated/refined as development progresses "ASepSpec", "CKernel", - "BaseRefine", + "CBaseRefine", "Access", # Tools and unrelated content, removed for development @@ -152,15 +152,15 @@ if args.help: returncode = 0 for arch in archs: - print("Testing for L4V_ARCH=%s:" % arch) + features = os.environ.get("L4V_FEATURES", "") + plat = os.environ.get("L4V_PLAT", "") + print(f"Testing for L4V_ARCH='{arch}', L4V_FEATURES='{features}', L4V_PLAT='{plat}':") os.environ["L4V_ARCH"] = arch - # Test Orphanage when L4V_ARCH=ARM; - # we need to set this flag here to test the above equality in the ROOT file. - # To be removed when we finish proving Orphanage for ARM_HYP and X64 + # Provide L4V_ARCH_IS_ARM for Corres_Test in lib/ROOT if arch == "ARM": os.environ["L4V_ARCH_IS_ARM"] = arch - print("Testing Orphanage for ARM") + print("Setting L4V_ARCH_IS_ARM") elif "L4V_ARCH_IS_ARM" in os.environ: del os.environ["L4V_ARCH_IS_ARM"] diff --git a/spec/abstract/AARCH64/ArchDecode_A.thy b/spec/abstract/AARCH64/ArchDecode_A.thy index 5da3795eac..760949487e 100644 --- a/spec/abstract/AARCH64/ArchDecode_A.thy +++ b/spec/abstract/AARCH64/ArchDecode_A.thy @@ -53,7 +53,7 @@ definition arch_decode_irq_control_invocation :: else throwError IllegalOperation)" definition attribs_from_word :: "machine_word \ vm_attributes" where - "attribs_from_word w \ {attr. \w!!0 \ attr = Execute \ \w !! 2 \ attr = Device}" + "attribs_from_word w \ {attr. \w!!0 \ attr = Device \ \w !! 2 \ attr = Execute}" definition make_user_pte :: "paddr \ vm_attributes \ vm_rights \ vmpage_size \ pte" where "make_user_pte addr attr rights vm_size \ @@ -91,7 +91,7 @@ definition decode_fr_inv_map :: "'z::state_ext arch_decoder" where odE | None \ doE vtop \ returnOk $ vaddr + mask (pageBitsForSize pgsz); - whenE (vtop \ user_vtop) $ throwError $ InvalidArgument 0 + whenE (vtop > user_vtop) $ throwError $ InvalidArgument 0 odE; (level, slot) \ liftE $ gets_the $ pt_lookup_slot pt vaddr \ ptes_of; unlessE (pt_bits_left level = pg_bits) $ @@ -247,7 +247,7 @@ definition decode_vs_inv_flush :: "'z::state_ext arch_decoder" where definition decode_vspace_invocation :: "'z::state_ext arch_decoder" where "decode_vspace_invocation label args cte cap extra_caps \ - if isPageFlushLabel (invocation_type label) + if isVSpaceFlushLabel (invocation_type label) then decode_vs_inv_flush label args cte cap extra_caps else throwError IllegalOperation" diff --git a/spec/abstract/AARCH64/ArchVSpace_A.thy b/spec/abstract/AARCH64/ArchVSpace_A.thy index b9df0bd59a..14804557c4 100644 --- a/spec/abstract/AARCH64/ArchVSpace_A.thy +++ b/spec/abstract/AARCH64/ArchVSpace_A.thy @@ -50,7 +50,6 @@ definition vspace_for_pool :: "obj_ref \ asid \ (obj_ref (* this is what asid_map encodes in ARM/ARM_HYP; getASIDPoolEntry in Haskell *) definition entry_for_asid :: "asid \ 'z::state_ext state \ asid_pool_entry option" where "entry_for_asid asid = do { - oassert (0 < asid); pool_ptr \ pool_for_asid asid; entry_for_pool pool_ptr asid \ asid_pools_of }" @@ -68,6 +67,7 @@ definition update_asid_pool_entry :: definition vspace_for_asid :: "asid \ 'z::state_ext state \ obj_ref option" where "vspace_for_asid asid = do { + oassert (0 < asid); entry \ entry_for_asid asid; oreturn $ ap_vspace entry }" diff --git a/spec/abstract/AARCH64/Arch_A.thy b/spec/abstract/AARCH64/Arch_A.thy index 2fa80e78c5..77c80678e6 100644 --- a/spec/abstract/AARCH64/Arch_A.thy +++ b/spec/abstract/AARCH64/Arch_A.thy @@ -95,6 +95,7 @@ definition perform_pg_inv_map :: old_pte \ get_pte level slot; set_cap (ArchObjectCap cap) ct_slot; store_pte level slot pte; + do_machine_op $ cleanByVA_PoU slot (addrFromPPtr slot); when (old_pte \ InvalidPTE) $ do (asid, vaddr) \ assert_opt $ acap_map_data cap; invalidate_tlb_by_asid_va asid vaddr @@ -115,7 +116,7 @@ definition do_flush :: "flush_type \ vspace_ref \ vspace case type of Clean \ cleanCacheRange_RAM vstart vend pstart | Invalidate \ invalidateCacheRange_RAM vstart vend pstart - | CleanInvalidate \ invalidateCacheRange_RAM vstart vend pstart + | CleanInvalidate \ cleanInvalidateCacheRange_RAM vstart vend pstart | Unify \ do cleanCacheRange_PoU vstart vend pstart; dsb; diff --git a/spec/abstract/AARCH64/Machine_A.thy b/spec/abstract/AARCH64/Machine_A.thy index 590767dcb3..7932435383 100644 --- a/spec/abstract/AARCH64/Machine_A.thy +++ b/spec/abstract/AARCH64/Machine_A.thy @@ -38,7 +38,7 @@ type_synonym asid_len = 16 type_synonym asid_rep_len = asid_len type_synonym asid = "asid_rep_len word" -type_synonym vmid = "16 word" +type_synonym vmid = "8 word" text \ diff --git a/spec/abstract/AARCH64/VCPUAcc_A.thy b/spec/abstract/AARCH64/VCPUAcc_A.thy index 72415a1eb1..9154bd9527 100644 --- a/spec/abstract/AARCH64/VCPUAcc_A.thy +++ b/spec/abstract/AARCH64/VCPUAcc_A.thy @@ -120,7 +120,7 @@ definition vcpu_disable :: "obj_ref option \ (unit,'z::state_ext) s_ hcr \ do_machine_op get_gic_vcpu_ctrl_hcr; vgic_update vr (\vgic. vgic\ vgic_hcr := hcr \); vcpu_save_reg vr VCPURegSCTLR; - vcpu_save_reg vr VCPURegACTLR; \ \since FPU enabled\ + vcpu_save_reg vr VCPURegCPACR; \ \since FPU enabled\ do_machine_op isb od | _ \ return (); @@ -171,6 +171,8 @@ definition vcpu_save :: "(obj_ref \ bool) option \ (unit,'z:: "vcpu_save vb \ case vb of Some (vr, active) \ do + do_machine_op dsb; + when active $ do vcpu_save_reg vr VCPURegSCTLR; hcr \ do_machine_op get_gic_vcpu_ctrl_hcr; diff --git a/spec/abstract/ARM/ArchDecode_A.thy b/spec/abstract/ARM/ArchDecode_A.thy index 1dbc7bfc00..beb47ffda9 100644 --- a/spec/abstract/ARM/ArchDecode_A.thy +++ b/spec/abstract/ARM/ArchDecode_A.thy @@ -241,12 +241,12 @@ where definition arch_data_to_obj_type :: "nat \ aobject_type option" where "arch_data_to_obj_type n \ - if n = 0 then Some SmallPageObj - else if n = 1 then Some LargePageObj - else if n = 2 then Some SectionObj - else if n = 3 then Some SuperSectionObj - else if n = 4 then Some PageTableObj - else if n = 5 then Some PageDirectoryObj + if n = 0 then Some PageDirectoryObj + else if n = 1 then Some SmallPageObj + else if n = 2 then Some LargePageObj + else if n = 3 then Some SectionObj + else if n = 4 then Some SuperSectionObj + else if n = 5 then Some PageTableObj else None" definition diff --git a/spec/abstract/ARM_HYP/ArchDecode_A.thy b/spec/abstract/ARM_HYP/ArchDecode_A.thy index a69562a3ec..f06266fe0f 100644 --- a/spec/abstract/ARM_HYP/ArchDecode_A.thy +++ b/spec/abstract/ARM_HYP/ArchDecode_A.thy @@ -259,12 +259,12 @@ where definition arch_data_to_obj_type :: "nat \ aobject_type option" where "arch_data_to_obj_type n \ - if n = 0 then Some SmallPageObj - else if n = 1 then Some LargePageObj - else if n = 2 then Some SectionObj - else if n = 3 then Some SuperSectionObj - else if n = 4 then Some PageTableObj - else if n = 5 then Some PageDirectoryObj + if n = 0 then Some PageDirectoryObj + else if n = 1 then Some SmallPageObj + else if n = 2 then Some LargePageObj + else if n = 3 then Some SectionObj + else if n = 4 then Some SuperSectionObj + else if n = 5 then Some PageTableObj else if n = 6 then Some VCPUObj else None" diff --git a/spec/abstract/ARM_HYP/Init_A.thy b/spec/abstract/ARM_HYP/Init_A.thy index a737025ba8..0d25ded079 100644 --- a/spec/abstract/ARM_HYP/Init_A.thy +++ b/spec/abstract/ARM_HYP/Init_A.thy @@ -77,8 +77,8 @@ definition tcb_bound_notification = None, tcb_mcpriority = minBound, tcb_arch = init_arch_tcb - \) - (us_global_pd_ptr \ us_global_pd)" + \, + us_global_pd_ptr \ us_global_pd)" definition "init_cdt \ Map.empty" diff --git a/spec/abstract/CSpace_A.thy b/spec/abstract/CSpace_A.thy index f999c0283d..2823c837b7 100644 --- a/spec/abstract/CSpace_A.thy +++ b/spec/abstract/CSpace_A.thy @@ -15,7 +15,7 @@ imports SchedContext_A IpcCancel_A ArchCSpace_A - "Monads.NonDetMonadLemmas" + "Monads.Nondet_Lemmas" "HOL-Library.Prefix_Order" begin diff --git a/spec/abstract/KHeap_A.thy b/spec/abstract/KHeap_A.thy index 9d8ef1da1b..54d4c24aa7 100644 --- a/spec/abstract/KHeap_A.thy +++ b/spec/abstract/KHeap_A.thy @@ -41,7 +41,7 @@ where kobj <- get_object ptr; assert (a_type kobj = a_type obj); s \ get; - put (s\kheap := kheap s(ptr \ obj)\) + put (s\kheap := (kheap s)(ptr \ obj)\) od" diff --git a/spec/abstract/X64/Machine_A.thy b/spec/abstract/X64/Machine_A.thy index 5cbdbe852c..67cccb9218 100644 --- a/spec/abstract/X64/Machine_A.thy +++ b/spec/abstract/X64/Machine_A.thy @@ -13,7 +13,7 @@ chapter "x64 Machine Instantiation" theory Machine_A imports - "Monads.NonDetMonad" + "Monads.Nondet_Monad" "ExecSpec.MachineTypes" "ExecSpec.MachineOps" begin diff --git a/spec/capDL/Monads_D.thy b/spec/capDL/Monads_D.thy index 48fe35b826..f605e45e61 100644 --- a/spec/capDL/Monads_D.thy +++ b/spec/capDL/Monads_D.thy @@ -11,8 +11,8 @@ theory Monads_D imports Types_D - Monads.In_Monad - Monads.NonDetMonadVCG + Monads.Nondet_In_Monad + Monads.Nondet_VCG begin (* Kernel state monad *) diff --git a/spec/cspec/c/Makefile b/spec/cspec/c/Makefile index a1e3d9da3a..af8be49cb8 100644 --- a/spec/cspec/c/Makefile +++ b/spec/cspec/c/Makefile @@ -32,7 +32,7 @@ CONFIG_THY := ../../machine/${L4V_ARCH}/Kernel_Config.thy # called by ../../Makefile config: ${CONFIG_THY} -${CONFIG_THY}: ${KERNEL_CONFIG_ROOT}/.cmake_done +${CONFIG_THY}: ${CONFIG_DONE} ./gen-config-thy.py diff --git a/spec/cspec/c/export-kernel-builds.py b/spec/cspec/c/export-kernel-builds.py index 333d35a1c8..093c5d28ec 100755 --- a/spec/cspec/c/export-kernel-builds.py +++ b/spec/cspec/c/export-kernel-builds.py @@ -64,16 +64,22 @@ def get_l4v_paths(l4v_arch: str) -> L4vPaths: return L4vPaths(kernel_mk=kernel_mk, c_pp=c_pp, c_functions=c_functions) +def path_suffix(opt_suffix: Optional[str]) -> str: + return f'-{opt_suffix}' if opt_suffix else '' + + class ExportConfig(NamedTuple): export_root: Path l4v_arch: str l4v_features: Optional[str] + l4v_plat: Optional[str] l4v_paths: L4vPaths manifest: Optional[Path] def config_name(self, optimisation: str) -> str: - features = f'-{self.l4v_features}' if self.l4v_features else '' - return f'{self.l4v_arch}{features}{optimisation}' + features = path_suffix(self.l4v_features) + plat = path_suffix(self.l4v_plat) + return f'{self.l4v_arch}{features}{plat}{optimisation}' def export_path(self, optimisation: str) -> Path: return self.export_root / self.config_name(optimisation) @@ -124,6 +130,7 @@ def do_export(self, optimisation: str) -> None: with open(export_dir / 'config.env', 'w') as config_env: config_env.write(f'L4V_ARCH={self.l4v_arch}\n') config_env.write(f'L4V_FEATURES={self.l4v_features or ""}\n') + config_env.write(f'L4V_PLAT={self.l4v_plat or ""}\n') config_env.write(f'CONFIG_OPTIMISATION={optimisation}\n') @@ -167,6 +174,7 @@ def parse_args() -> ExportCommand: config = ExportConfig(export_root=args.export_root, l4v_arch=l4v_arch, l4v_features=os.environ.get('L4V_FEATURES'), + l4v_plat=os.environ.get('L4V_PLAT'), l4v_paths=get_l4v_paths(l4v_arch), manifest=args.manifest_xml) diff --git a/spec/cspec/c/kernel.mk b/spec/cspec/c/kernel.mk index 2414dbf9d2..f6a973404c 100644 --- a/spec/cspec/c/kernel.mk +++ b/spec/cspec/c/kernel.mk @@ -26,7 +26,7 @@ ifndef L4V_ARCH $(error L4V_ARCH is not set) endif -SEL4_CONFIG_NAME := ${L4V_ARCH}$(if ${L4V_FEATURES},_${L4V_FEATURES},) +SEL4_CONFIG_NAME := ${L4V_ARCH}$(if ${L4V_FEATURES},_${L4V_FEATURES},)$(if ${L4V_PLAT},_${L4V_PLAT},) ifndef CONFIG CONFIG := ${SOURCE_ROOT}/configs/${SEL4_CONFIG_NAME}_verified.cmake @@ -87,8 +87,14 @@ endif # We avoid this by excluding __pycache__ directories from the kernel dependencies. KERNEL_DEPS := $(shell find ${SOURCE_ROOT} -name .git -prune -o -name __pycache__ -prune -o -type f -print) +# The kernel build generates a large number of files, so we create a dummy file +# .cmake_done-${SEL4_CONFIG_NAME} to represent overall completion for make's +# dependency tracking. The ${SEL4_CONFIG_NAME} part makes sure we rebuild when +# we switch features or platforms. +BUILD_DONE = ${KERNEL_BUILD_ROOT}/.cmake_done-${SEL4_CONFIG_NAME} + # Top level rule for rebuilding kernel_all.c_pp -${KERNEL_BUILD_ROOT}/kernel_all.c_pp: ${KERNEL_BUILD_ROOT}/.cmake_done +${KERNEL_BUILD_ROOT}/kernel_all.c_pp: ${BUILD_DONE} cd ${KERNEL_BUILD_ROOT} && ninja kernel_all_pp_wrapper cp -a ${KERNEL_BUILD_ROOT}/kernel_all_pp.c $@ @@ -105,9 +111,7 @@ endif # Initialize the CMake build. We purge the build directory and start again # whenever any of the kernel sources change, so that we can reliably pick up # changes to the build config. -# This step also generates a large number of files, so we create a dummy file -# .cmake_done to represent overall completion for make's dependency tracking. -${KERNEL_BUILD_ROOT}/.cmake_done: ${KERNEL_DEPS} ${CONFIG_DOMAIN_SCHEDULE} ${OVERLAY} +${BUILD_DONE}: ${KERNEL_DEPS} ${CONFIG_DOMAIN_SCHEDULE} ${OVERLAY} @rm -rf ${KERNEL_BUILD_ROOT} @mkdir -p ${KERNEL_BUILD_ROOT} cd ${KERNEL_BUILD_ROOT} && \ @@ -119,7 +123,7 @@ ${KERNEL_BUILD_ROOT}/.cmake_done: ${KERNEL_DEPS} ${CONFIG_DOMAIN_SCHEDULE} ${OVE ${KERNEL_CMAKE_OPTIMISATION} ${KERNEL_CMAKE_EXTRA_OPTIONS} \ ${OVERLAY_OPT} \ -G Ninja ${SOURCE_ROOT} - @touch ${KERNEL_BUILD_ROOT}/.cmake_done + @touch ${BUILD_DONE} ifneq ($(L4V_ARCH),X64) @if [ "$$(diff -q ${OVERLAY} ${DEFAULT_OVERLAY})" ]; then \ echo "++ Used custom overlay for $(L4V_ARCH)"; \ @@ -134,7 +138,8 @@ ${UMM_TYPES}: ${KERNEL_BUILD_ROOT}/kernel_all.c_pp # This target generates config files and headers only. It does not invoke # the C tool chain or preprocessor. We force CMake to skip tests for these, # so that ASpec and ExecSpec can be built with fewer dependencies. -${KERNEL_CONFIG_ROOT}/.cmake_done: ${KERNEL_DEPS} gen-config-thy.py ${OVERLAY} +CONFIG_DONE = ${KERNEL_CONFIG_ROOT}/.cmake_done-${SEL4_CONFIG_NAME} +${CONFIG_DONE}: ${KERNEL_DEPS} gen-config-thy.py ${OVERLAY} @rm -rf ${KERNEL_CONFIG_ROOT} @mkdir -p ${KERNEL_CONFIG_ROOT} cd ${KERNEL_CONFIG_ROOT} && \ @@ -144,7 +149,7 @@ ${KERNEL_CONFIG_ROOT}/.cmake_done: ${KERNEL_DEPS} gen-config-thy.py ${OVERLAY} ${OVERLAY_OPT} \ -G Ninja ${SOURCE_ROOT} cd ${KERNEL_CONFIG_ROOT} && ninja gen_config/kernel/gen_config.json - @touch ${KERNEL_CONFIG_ROOT}/.cmake_done + @touch ${CONFIG_DONE} ifneq ($(L4V_ARCH),X64) @if [ "$$(diff -q ${OVERLAY} ${DEFAULT_OVERLAY})" ]; then \ echo "++ Used custom overlay for $(L4V_ARCH)"; \ diff --git a/spec/design/m-skel/AARCH64/MachineTypes.thy b/spec/design/m-skel/AARCH64/MachineTypes.thy index ed9ce47936..98c96196f3 100644 --- a/spec/design/m-skel/AARCH64/MachineTypes.thy +++ b/spec/design/m-skel/AARCH64/MachineTypes.thy @@ -8,9 +8,9 @@ chapter "AARCH64 Machine Types" theory MachineTypes imports Word_Lib.WordSetup - Monads.Empty_Fail - Monads.No_Fail - Monads.OptionMonadND + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail + Monads.Reader_Option_ND Lib.HaskellLib_H Platform begin diff --git a/spec/design/m-skel/ARM/MachineTypes.thy b/spec/design/m-skel/ARM/MachineTypes.thy index 1db06db111..296c98ae27 100644 --- a/spec/design/m-skel/ARM/MachineTypes.thy +++ b/spec/design/m-skel/ARM/MachineTypes.thy @@ -9,9 +9,9 @@ chapter "ARM Machine Types" theory MachineTypes imports Word_Lib.WordSetup - Monads.Empty_Fail - Monads.No_Fail - Monads.OptionMonadND + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail + Monads.Reader_Option_ND Setup_Locale Platform begin diff --git a/spec/design/m-skel/ARM_HYP/MachineTypes.thy b/spec/design/m-skel/ARM_HYP/MachineTypes.thy index 77e6f4fc88..7e895ec27b 100644 --- a/spec/design/m-skel/ARM_HYP/MachineTypes.thy +++ b/spec/design/m-skel/ARM_HYP/MachineTypes.thy @@ -9,9 +9,9 @@ chapter \ARM\_HYP Machine Types\ theory MachineTypes imports Word_Lib.WordSetup - Monads.Empty_Fail - Monads.No_Fail - Monads.OptionMonadND + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail + Monads.Reader_Option_ND Setup_Locale Platform begin diff --git a/spec/design/m-skel/RISCV64/MachineTypes.thy b/spec/design/m-skel/RISCV64/MachineTypes.thy index 554924eb1f..91d1364102 100644 --- a/spec/design/m-skel/RISCV64/MachineTypes.thy +++ b/spec/design/m-skel/RISCV64/MachineTypes.thy @@ -9,9 +9,9 @@ chapter "RISCV 64bit Machine Types" theory MachineTypes imports Word_Lib.WordSetup - Monads.Empty_Fail - Monads.No_Fail - Monads.OptionMonadND + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail + Monads.Reader_Option_ND Lib.HaskellLib_H Platform begin diff --git a/spec/design/m-skel/X64/MachineTypes.thy b/spec/design/m-skel/X64/MachineTypes.thy index ea19e28b9c..e8465d4e15 100644 --- a/spec/design/m-skel/X64/MachineTypes.thy +++ b/spec/design/m-skel/X64/MachineTypes.thy @@ -9,9 +9,9 @@ chapter "x86-64bit Machine Types" theory MachineTypes imports Word_Lib.WordSetup - Monads.Empty_Fail - Monads.No_Fail - Monads.OptionMonadND + Monads.Nondet_Empty_Fail + Monads.Nondet_No_Fail + Monads.Reader_Option_ND Lib.HaskellLib_H Platform begin diff --git a/spec/design/skel/AARCH64/ArchIntermediate_H.thy b/spec/design/skel/AARCH64/ArchIntermediate_H.thy index fc109989b5..016a51f40c 100644 --- a/spec/design/skel/AARCH64/ArchIntermediate_H.thy +++ b/spec/design/skel/AARCH64/ArchIntermediate_H.thy @@ -25,10 +25,15 @@ private abbreviation (input) od)" private abbreviation (input) - "createNewTableCaps regionBase numObjects tableBits objectProto cap initialiseMappings \ (do + "createNewTableCaps regionBase numObjects ptType objectProto cap initialiseMappings \ (do + tableBits \ return (ptBits ptType); tableSize \ return (tableBits - objBits objectProto); addrs \ createObjects regionBase numObjects (injectKO objectProto) tableSize; pts \ return (map (PPtr \ fromPPtr) addrs); + modify (\ks. ks \ksArchState := + ksArchState ks \gsPTTypes := (\addr. + if addr `~elem~` map fromPPtr addrs then Just ptType + else gsPTTypes (ksArchState ks) addr)\\); initialiseMappings pts; return $ map (\pt. cap pt Nothing) pts od)" @@ -45,11 +50,11 @@ defs Arch_createNewCaps_def: | HugePageObject \ createNewFrameCaps regionBase numObjects dev (2 * ptTranslationBits NormalPT_T) ARMHugePage | VSpaceObject \ - createNewTableCaps regionBase numObjects (ptBits VSRootPT_T) (makeObject::pte) + createNewTableCaps regionBase numObjects VSRootPT_T (makeObject::pte) (\base addr. PageTableCap base VSRootPT_T addr) (\pts. return ()) | PageTableObject \ - createNewTableCaps regionBase numObjects (ptBits NormalPT_T) (makeObject::pte) + createNewTableCaps regionBase numObjects NormalPT_T (makeObject::pte) (\base addr. PageTableCap base NormalPT_T addr) (\pts. return ()) | VCPUObject \ (do diff --git a/spec/design/skel/AARCH64/ArchPSpace_H.thy b/spec/design/skel/AARCH64/ArchPSpace_H.thy new file mode 100644 index 0000000000..5ec65834fe --- /dev/null +++ b/spec/design/skel/AARCH64/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming AARCH64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/AARCH64.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/ARM/ArchPSpace_H.thy b/spec/design/skel/ARM/ArchPSpace_H.thy new file mode 100644 index 0000000000..af3737ffb7 --- /dev/null +++ b/spec/design/skel/ARM/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming ARM_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/ARM.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/ARM/ArchTypes_H.thy b/spec/design/skel/ARM/ArchTypes_H.thy index 04887e2c72..e82300474d 100644 --- a/spec/design/skel/ARM/ArchTypes_H.thy +++ b/spec/design/skel/ARM/ArchTypes_H.thy @@ -34,12 +34,12 @@ interpretation Arch . definition enum_object_type: "enum_class.enum \ map APIObjectType (enum_class.enum :: apiobject_type list) @ - [SmallPageObject, + [PageDirectoryObject, + SmallPageObject, LargePageObject, SectionObject, SuperSectionObject, - PageTableObject, - PageDirectoryObject + PageTableObject ]" definition diff --git a/spec/design/skel/ARM_HYP/ArchPSpace_H.thy b/spec/design/skel/ARM_HYP/ArchPSpace_H.thy new file mode 100644 index 0000000000..38644c9b44 --- /dev/null +++ b/spec/design/skel/ARM_HYP/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming ARM_HYP_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/ARM.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/ARM_HYP/ArchTypes_H.thy b/spec/design/skel/ARM_HYP/ArchTypes_H.thy index 807b774830..b80c0e045e 100644 --- a/spec/design/skel/ARM_HYP/ArchTypes_H.thy +++ b/spec/design/skel/ARM_HYP/ArchTypes_H.thy @@ -34,12 +34,12 @@ interpretation Arch . definition enum_object_type: "enum_class.enum \ map APIObjectType (enum_class.enum :: apiobject_type list) @ - [SmallPageObject, + [PageDirectoryObject, + SmallPageObject, LargePageObject, SectionObject, SuperSectionObject, PageTableObject, - PageDirectoryObject, VCPUObject ]" diff --git a/spec/design/skel/PSpaceFuns_H.thy b/spec/design/skel/PSpaceFuns_H.thy index 81b5db6334..cb9a95cee5 100644 --- a/spec/design/skel/PSpaceFuns_H.thy +++ b/spec/design/skel/PSpaceFuns_H.thy @@ -10,6 +10,7 @@ theory PSpaceFuns_H imports ObjectInstances_H FaultMonad_H + ArchPSpace_H "Lib.DataMap" begin @@ -22,6 +23,9 @@ requalify_consts loadWord end +requalify_consts (in Arch) + deleteGhost + definition deleteRange :: "( machine_word , 'a ) DataMap.map \ machine_word \ nat \ ( machine_word , 'a ) DataMap.map" where "deleteRange m ptr bits \ let inRange = (\ x. x && ((- mask bits) - 1) = fromPPtr ptr) in diff --git a/spec/design/skel/RISCV64/ArchPSpace_H.thy b/spec/design/skel/RISCV64/ArchPSpace_H.thy new file mode 100644 index 0000000000..d692e99c12 --- /dev/null +++ b/spec/design/skel/RISCV64/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming RISCV64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/RISCV64.hs + +end (* context Arch *) + +end diff --git a/spec/design/skel/X64/ArchPSpace_H.thy b/spec/design/skel/X64/ArchPSpace_H.thy new file mode 100644 index 0000000000..0c9b7e8c3e --- /dev/null +++ b/spec/design/skel/X64/ArchPSpace_H.thy @@ -0,0 +1,20 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +(* Arch-specific ghost update functions for physical memory *) + +theory ArchPSpace_H +imports + ObjectInstances_H +begin + +context Arch begin global_naming X64_H + +#INCLUDE_HASKELL SEL4/Model/PSpace/X64.hs + +end (* context Arch *) + +end diff --git a/spec/haskell/Makefile b/spec/haskell/Makefile index bb85ec6768..6891ae7051 100644 --- a/spec/haskell/Makefile +++ b/spec/haskell/Makefile @@ -31,6 +31,13 @@ GHC_DEV_OPTS=--ghc-options="" all: build-arm build-riscv +# build targets by L4V_ARCH: +ARM: build-arm +ARM_HYP: build-arm-hyp-nosmmu +X64: build-x64 +RISCV64: build-riscv +AARCH64: build-aarch64 + sandbox: .stack-work build-arm: sandbox $(BOOT_FILES) diff --git a/spec/haskell/SEL4.cabal b/spec/haskell/SEL4.cabal index c7eefa8020..4253b84d89 100644 --- a/spec/haskell/SEL4.cabal +++ b/spec/haskell/SEL4.cabal @@ -12,13 +12,13 @@ build-type: Custom license: GPL-2.0-only author: Philip Derrin et. al., NICTA synopsis: Executable specification for the seL4 Kernel -tested-with: GHC == 9.0.2 +tested-with: GHC == 9.2.8 homepage: http://sel4.systems/ custom-setup setup-depends: - base == 4.15.*, - Cabal == 3.4.1.0 + base == 4.16.*, + Cabal == 3.6.3.* Flag FFI description: Include the C language bindings @@ -47,7 +47,7 @@ Flag ArchAArch64 Library exposed-modules: SEL4 SEL4.Machine.Target - build-depends: mtl==2.2.*, base==4.15.*, array, containers, transformers + build-depends: mtl==2.2.*, base==4.16.*, array, containers, transformers if flag(FFI) -- FFIBindings currently relies on POSIX signal handlers. This could @@ -126,6 +126,7 @@ Library SEL4.Object.Instances.ARM SEL4.Object.TCB.ARM SEL4.Model.StateData.ARM + SEL4.Model.PSpace.ARM SEL4.Machine.RegisterSet.ARM SEL4.Machine.Hardware.ARM @@ -149,6 +150,7 @@ Library SEL4.Object.VCPU.ARM SEL4.Object.TCB.ARM SEL4.Model.StateData.ARM + SEL4.Model.PSpace.ARM SEL4.Machine.RegisterSet.ARM SEL4.Machine.Hardware.ARM @@ -170,6 +172,7 @@ Library SEL4.Object.IOPort.X64 SEL4.Object.TCB.X64 SEL4.Model.StateData.X64 + SEL4.Model.PSpace.X64 SEL4.Machine.RegisterSet.X64 SEL4.Machine.Hardware.X64 @@ -191,6 +194,7 @@ Library SEL4.Object.Instances.RISCV64 SEL4.Object.TCB.RISCV64 SEL4.Model.StateData.RISCV64 + SEL4.Model.PSpace.RISCV64 SEL4.Machine.RegisterSet.RISCV64 SEL4.Machine.Hardware.RISCV64 @@ -213,6 +217,7 @@ Library SEL4.Object.VCPU.AARCH64 SEL4.Object.TCB.AARCH64 SEL4.Model.StateData.AARCH64 + SEL4.Model.PSpace.AARCH64 SEL4.Machine.RegisterSet.AARCH64 SEL4.Machine.Hardware.AARCH64 @@ -226,6 +231,8 @@ Library -fno-warn-unrecognised-pragmas -fno-warn-unused-binds -fno-warn-unused-imports -fno-warn-unused-matches + -fno-warn-incomplete-record-updates + -fno-warn-incomplete-uni-patterns cpp-options: -- set via Setup.hs hook diff --git a/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs b/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs index 6a6228ab59..4a4ba85bfd 100644 --- a/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs +++ b/spec/haskell/src/SEL4/Kernel/VSpace/AARCH64.hs @@ -128,7 +128,8 @@ isPagePTE (PagePTE {}) = True isPagePTE _ = False getPPtrFromPTE :: PTE -> PPtr PTE -getPPtrFromPTE pte = ptrFromPAddr $ pteBaseAddress pte +getPPtrFromPTE pte = + ptrFromPAddr (if isPagePTE pte then pteBaseAddress pte else ptePPN pte `shiftL` pageBits) -- how many bits there are left to be translated at a given level (0 = bottom -- level). This counts the bits being translated by the levels below the current one, so @@ -406,7 +407,7 @@ loadVMID asid = do maybeEntry <- getASIDPoolEntry asid case maybeEntry of Just (ASIDPoolVSpace vmID ptr) -> return vmID - _ -> error ("loadVMID: no entry for asid") + _ -> fail "loadVMID: no entry for asid" invalidateASID :: ASID -> Kernel () invalidateASID = updateASIDPoolEntry (\entry -> Just $ entry { apVMID = Nothing }) @@ -469,6 +470,7 @@ checkVSpaceRoot :: Capability -> Int -> KernelF SyscallError (PPtr PTE, ASID) checkVSpaceRoot vspaceCap argNo = case vspaceCap of ArchObjectCap (PageTableCap { + capPTType = VSRootPT_T, capPTMappedAddress = Just (asid, _), capPTBasePtr = vspace }) -> return (vspace, asid) @@ -541,8 +543,8 @@ decodeARMFrameInvocationMap cte cap vptr rightsMask attr vspaceCap = do let pgBits = pageBitsForSize frameSize case capFMappedAddress cap of Just (asid', vaddr') -> do - when (asid' /= asid) $ throw $ InvalidCapability 0 - when (vaddr' /= vptr) $ throw $ InvalidArgument 2 + when (asid' /= asid) $ throw $ InvalidCapability 1 + when (vaddr' /= vptr) $ throw $ InvalidArgument 0 Nothing -> do let vtop = vptr + (bit pgBits - 1) when (vtop > pptrUserTop) $ throw $ InvalidArgument 0 @@ -614,7 +616,7 @@ decodeARMPageTableInvocationMap cte cap vptr attr vspaceCap = do oldPTE <- withoutFailure $ getObject slot when (bitsLeft == pageBits || oldPTE /= InvalidPTE) $ throw DeleteFirst let pte = PageTablePTE { - pteBaseAddress = addrFromPPtr (capPTBasePtr cap) } + ptePPN = addrFromPPtr (capPTBasePtr cap) `shiftR` pageBits } let vptr = vptr .&. complement (mask bitsLeft) return $ InvokePageTable $ PageTableMap { ptMapCap = ArchObjectCap $ cap { capPTMappedAddress = Just (asid, vptr) }, @@ -713,7 +715,8 @@ decodeARMASIDPoolInvocation label cap@(ASIDPoolCap {}) extraCaps = case vspaceCap of ArchObjectCap (PageTableCap { capPTMappedAddress = Nothing }) -> do - when (not (isVTableRoot vspaceCap) || isJust (capPTMappedAddress cap)) $ + -- C checks for a mapping here, but our case already checks that + when (not (isVTableRoot vspaceCap)) $ throw $ InvalidCapability 1 asidTable <- withoutFailure $ gets (armKSASIDTable . ksArchState) let base = capASIDBase cap diff --git a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs index c3042270ce..88cc0de6cf 100644 --- a/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs +++ b/spec/haskell/src/SEL4/Machine/Hardware/AARCH64.hs @@ -336,7 +336,7 @@ data PTE pteDevice :: Bool, pteRights :: VMRights } | PageTablePTE { - pteBaseAddress :: PAddr } + ptePPN :: PAddr } deriving (Show, Eq) {- Simulator callbacks -} diff --git a/spec/haskell/src/SEL4/Model/PSpace.lhs b/spec/haskell/src/SEL4/Model/PSpace.lhs index 6d94d996a9..03c7da30d3 100644 --- a/spec/haskell/src/SEL4/Model/PSpace.lhs +++ b/spec/haskell/src/SEL4/Model/PSpace.lhs @@ -6,6 +6,14 @@ This module contains the data structure and operations for the physical memory model. +\begin{impdetails} + +This module uses the C preprocessor to select a target architecture. + +> {-# LANGUAGE CPP #-} + +\end{impdetails} + > module SEL4.Model.PSpace ( > PSpace, newPSpace, initPSpace, > PSpaceStorable, @@ -22,6 +30,8 @@ This module contains the data structure and operations for the physical memory m % {-# BOOT-EXPORTS: PSpace #PRegion newPSpace #-} > import Prelude hiding (Word) +> import qualified SEL4.Model.PSpace.TARGET as Arch + > import SEL4.Model.StateData > import SEL4.Object.Structures @@ -251,7 +261,7 @@ No type checks are performed when deleting objects; "deleteObjects" simply delet > let ps' = ps { psMap = map' } > modify (\ks -> ks { ksPSpace = ps'}) -Clear the ghost state for user pages and cnodes within the deleted range. +Clear the ghost state for user pages, cnodes, and arch-specific objects within the deleted range. > modify (\ks -> ks { gsUserPages = (\x -> if inRange x > then Nothing else gsUserPages ks x) }) @@ -259,6 +269,7 @@ Clear the ghost state for user pages and cnodes within the deleted range. > "Object deletion would split CNodes." > modify (\ks -> ks { gsCNodes = (\x -> if inRange x > then Nothing else gsCNodes ks x) }) +> Arch.deleteGhost ptr bits > stateAssert ksASIDMapSafe "Object deletion would leave dangling PD pointers" In "deleteObjects" above, we assert "deletionIsSafe"; that is, that there are no pointers to these objects remaining elsewhere in the kernel state. Since we cannot easily check this in the Haskell model, we assume that it is always true; the assertion is strengthened during translation into Isabelle. diff --git a/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs b/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs new file mode 100644 index 0000000000..3781b9cf7e --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/AARCH64.hs @@ -0,0 +1,24 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.AARCH64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Model.StateData.AARCH64 +import SEL4.Machine.RegisterSet + +import Data.Bits + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = do + let inRange = (\x -> x .&. ((- mask bits) - 1) == fromPPtr ptr) + ptTypes <- gets (gsPTTypes . ksArchState) + let ptTypes' = (\x -> if inRange x then Nothing else ptTypes x) + modify (\ks -> ks { ksArchState = (ksArchState ks) { gsPTTypes = ptTypes' } }) diff --git a/spec/haskell/src/SEL4/Model/PSpace/ARM.hs b/spec/haskell/src/SEL4/Model/PSpace/ARM.hs new file mode 100644 index 0000000000..f8ac3839f3 --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/ARM.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.ARM(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs b/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs new file mode 100644 index 0000000000..16e9fc6ecd --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/RISCV64.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.RISCV64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/PSpace/X64.hs b/spec/haskell/src/SEL4/Model/PSpace/X64.hs new file mode 100644 index 0000000000..b07a1a4f06 --- /dev/null +++ b/spec/haskell/src/SEL4/Model/PSpace/X64.hs @@ -0,0 +1,17 @@ +-- +-- Copyright 2023, Proofcraft Pty Ltd +-- +-- SPDX-License-Identifier: GPL-2.0-only +-- + +-- This module contains architecture-specific code for PSpace, in particular +-- for potential ghost state updates when deleting objects. + +module SEL4.Model.PSpace.X64(deleteGhost) where + +import Prelude hiding (Word) +import SEL4.Model.StateData +import SEL4.Machine.RegisterSet + +deleteGhost :: PPtr a -> Int -> Kernel () +deleteGhost ptr bits = return () diff --git a/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs b/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs index 832cabbbca..037fd06e5b 100644 --- a/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs +++ b/spec/haskell/src/SEL4/Model/StateData/AARCH64.hs @@ -12,7 +12,7 @@ module SEL4.Model.StateData.AARCH64 where import Prelude hiding (Word) import SEL4.Machine -import SEL4.Machine.Hardware.AARCH64 (PTE(..), config_ARM_PA_SIZE_BITS_40) +import SEL4.Machine.Hardware.AARCH64 (PTE(..), PT_Type, config_ARM_PA_SIZE_BITS_40) import SEL4.Object.Structures.AARCH64 import Data.Array @@ -40,7 +40,8 @@ data KernelState = ARMKernelState { -- used e.g. for user threads with missing or invalid VSpace root armKSGlobalUserVSpace :: PPtr PTE, armHSCurVCPU :: Maybe (PPtr VCPU, Bool), - armKSGICVCPUNumListRegs :: Int + armKSGICVCPUNumListRegs :: Int, + gsPTTypes :: Word -> Maybe PT_Type } -- counting from 0 at bottom, i.e. number of levels = maxPTLevel + 1; diff --git a/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs b/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs index 1d3447f63c..b71e018100 100644 --- a/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs +++ b/spec/haskell/src/SEL4/Object/ObjectType/AARCH64.hs @@ -158,6 +158,13 @@ placeNewDataObject regionBase sz isDevice = if isDevice then placeNewObject regionBase UserDataDevice sz else placeNewObject regionBase UserData sz +updatePTType :: PPtr () -> PT_Type -> Kernel () +updatePTType p pt_t = do + ptTypes <- gets (gsPTTypes . ksArchState) + let funupd = (\f x v y -> if y == x then v else f y) + let ptTypes' = funupd ptTypes (fromPPtr p) (Just pt_t) + modify (\ks -> ks { ksArchState = (ksArchState ks) { gsPTTypes = ptTypes' } }) + createObject :: ObjectType -> PPtr () -> Int -> Bool -> Kernel ArchCapability createObject t regionBase _ isDevice = let funupd = (\f x v y -> if y == x then v else f y) in @@ -189,10 +196,12 @@ createObject t regionBase _ isDevice = Arch.Types.PageTableObject -> do let ptSize = ptBits NormalPT_T - objBits (makeObject :: PTE) placeNewObject regionBase (makeObject :: PTE) ptSize + updatePTType regionBase NormalPT_T return $ PageTableCap (pointerCast regionBase) NormalPT_T Nothing Arch.Types.VSpaceObject -> do let ptSize = ptBits VSRootPT_T - objBits (makeObject :: PTE) placeNewObject regionBase (makeObject :: PTE) ptSize + updatePTType regionBase VSRootPT_T return $ PageTableCap (pointerCast regionBase) VSRootPT_T Nothing Arch.Types.VCPUObject -> do placeNewObject regionBase (makeObject :: VCPU) 0 diff --git a/spec/haskell/src/SEL4/Object/Structures.lhs b/spec/haskell/src/SEL4/Object/Structures.lhs index 6c47955694..37a74bf198 100644 --- a/spec/haskell/src/SEL4/Object/Structures.lhs +++ b/spec/haskell/src/SEL4/Object/Structures.lhs @@ -506,7 +506,8 @@ This type is used to represent the required action, if any, of the scheduler nex \item IPC operations may request that the scheduler switch to a specific thread. -> | SwitchToThread (PPtr TCB) +> | SwitchToThread { +> schActTarget :: PPtr TCB } > deriving (Eq, Show) diff --git a/spec/haskell/stack.yaml b/spec/haskell/stack.yaml index 42d98d3d38..91eced2a9e 100644 --- a/spec/haskell/stack.yaml +++ b/spec/haskell/stack.yaml @@ -7,8 +7,8 @@ # We use `stack` only to install GHC and cabal-install, not to build the project. # The rest of the build works via cabal -# Stackage LTS Haskell 19.12 (ghc-9.0.2) -resolver: lts-19.12 +# Stackage LTS Haskell 20.25 (ghc-9.2.8) +resolver: lts-20.25 packages: [] extra-deps: [] diff --git a/spec/machine/AARCH64/MachineOps.thy b/spec/machine/AARCH64/MachineOps.thy index 30ec6cbe74..cdef617ec2 100644 --- a/spec/machine/AARCH64/MachineOps.thy +++ b/spec/machine/AARCH64/MachineOps.thy @@ -10,7 +10,7 @@ chapter "Machine Operations" theory MachineOps imports Word_Lib.WordSetup - Monads.NonDetMonad + Monads.Nondet_Monad MachineMonad begin diff --git a/spec/machine/RISCV64/MachineOps.thy b/spec/machine/RISCV64/MachineOps.thy index 7a7ca9d51d..816a3b66ab 100644 --- a/spec/machine/RISCV64/MachineOps.thy +++ b/spec/machine/RISCV64/MachineOps.thy @@ -9,7 +9,7 @@ chapter "Machine Operations" theory MachineOps imports "Word_Lib.WordSetup" - "Monads.NonDetMonad" + "Monads.Nondet_Monad" MachineMonad begin diff --git a/spec/machine/X64/MachineOps.thy b/spec/machine/X64/MachineOps.thy index 649e78743e..6ec113cc06 100644 --- a/spec/machine/X64/MachineOps.thy +++ b/spec/machine/X64/MachineOps.thy @@ -9,7 +9,7 @@ chapter "Machine Operations" theory MachineOps imports "Word_Lib.WordSetup" - "Monads.NonDetMonad" + "Monads.Nondet_Monad" MachineMonad begin diff --git a/spec/tests.xml b/spec/tests.xml index a7f133fa44..b520faa91c 100644 --- a/spec/tests.xml +++ b/spec/tests.xml @@ -39,7 +39,7 @@ - make + make $L4V_ARCH diff --git a/sys-init/DuplicateCaps_SI.thy b/sys-init/DuplicateCaps_SI.thy index 2b23618894..24737e6ba6 100644 --- a/sys-init/DuplicateCaps_SI.thy +++ b/sys-init/DuplicateCaps_SI.thy @@ -256,7 +256,7 @@ lemma distinct_card': (* FIXME, move higher *) lemma distinct_length_filter': "distinct xs \ length [x\xs. P x] = card {x \ set xs. P x}" - by (metis distinct_length_filter set_conj_Int_simp inf_commute) + by (metis distinct_card' distinct_filter set_filter) lemma duplicate_caps_sep_no_rv: "\\si_caps_at t orig_caps spec dev {obj_id. real_object_at obj_id spec} \* diff --git a/tools/autocorres/AutoCorres.thy b/tools/autocorres/AutoCorres.thy index 68f2cca203..704f82628c 100644 --- a/tools/autocorres/AutoCorres.thy +++ b/tools/autocorres/AutoCorres.thy @@ -21,7 +21,7 @@ imports TypHeapSimple HeapLift WordAbstract - "Monads.OptionMonadWP" + "Monads.Reader_Option_VCG" "Eisbach_Tools.Apply_Trace" AutoCorresSimpset "ML_Utils.MkTermAntiquote" diff --git a/tools/autocorres/CorresXF.thy b/tools/autocorres/CorresXF.thy index 8b052e8c34..7bd6563548 100644 --- a/tools/autocorres/CorresXF.thy +++ b/tools/autocorres/CorresXF.thy @@ -247,9 +247,9 @@ lemma corresXF_join: apply (unfold bindE_def) apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inl r \ a = Inl (E r s) | Inr r \ a = Inr (V r s) \ P' (projr a) r s)"]) apply (simp add: corresXF_simple_def split: sum.splits unit.splits) - apply (clarsimp simp: NonDetMonad.lift_def throwError_def return_def) + apply (clarsimp simp: Nondet_Monad.lift_def throwError_def return_def) apply fastforce - apply (fastforce simp: NonDetMonadVCG.validE_def split: sum.splits cong del: unit.case_cong) + apply (fastforce simp: Nondet_VCG.validE_def split: sum.splits cong del: unit.case_cong) apply simp done @@ -261,9 +261,9 @@ lemma corresXF_except: apply (unfold handleE'_def) apply (erule corresXF_simple_join [where P'="\a b s. (case b of Inr r \ a = Inr (V r s) | Inl r \ a = Inl (E r s) \ P' (projl a) r s)"]) apply (simp add: corresXF_simple_def split: sum.splits unit.splits) - apply (clarsimp simp: NonDetMonad.lift_def throwError_def return_def) + apply (clarsimp simp: Nondet_Monad.lift_def throwError_def return_def) apply fastforce - apply (clarsimp simp: NonDetMonadVCG.validE_def split: sum.splits cong del: unit.case_cong) + apply (clarsimp simp: Nondet_VCG.validE_def split: sum.splits cong del: unit.case_cong) apply simp done @@ -566,13 +566,13 @@ lemma corresXF_while: apply (subst (asm) corresXF_simple_corresXF[symmetric]) apply atomize apply (erule_tac x="projr x" in allE) - apply (clarsimp simp: corresXF_simple_def NonDetMonad.lift_def throwError_def return_def + apply (clarsimp simp: corresXF_simple_def Nondet_Monad.lift_def throwError_def return_def split: sum.splits) apply (clarsimp simp: cond_match split: sum.splits) apply (clarsimp simp: lift_def split: sum.splits) apply (cut_tac pred_inv [unfolded validE_def, simplified lift_def]) apply (erule hoare_chain) - apply (monad_eq simp: NonDetMonad.lift_def whileLoopE_def split: sum.splits) + apply (monad_eq simp: Nondet_Monad.lift_def whileLoopE_def split: sum.splits) apply monad_eq apply (clarsimp simp: pred_imply split: sum.splits) apply (clarsimp simp: init_match pred_imply) diff --git a/tools/autocorres/L1Defs.thy b/tools/autocorres/L1Defs.thy index 76c24ebf5f..346223fa4f 100644 --- a/tools/autocorres/L1Defs.thy +++ b/tools/autocorres/L1Defs.thy @@ -320,7 +320,7 @@ lemma L1corres_prepend_unknown_var': apply (monad_eq simp: Bex_def) apply metis apply (subst L1_init_def) - apply (wp del: hoare_vcg_prop) + apply (wpsimp wp_del: hoare_vcg_prop) done lemma L1_catch_seq_join: "no_throw \ A \ L1_seq A (L1_catch B C) = (L1_catch (L1_seq A B) C)" @@ -334,7 +334,7 @@ lemma no_throw_L1_init [simp]: "no_throw P (L1_init f)" apply (rule no_throw_bindE [where B=\]) apply simp apply simp - apply wp + apply wpsimp done lemma L1corres_prepend_unknown_var: diff --git a/tools/autocorres/L1Peephole.thy b/tools/autocorres/L1Peephole.thy index 3914b66be0..8614d80043 100644 --- a/tools/autocorres/L1Peephole.thy +++ b/tools/autocorres/L1Peephole.thy @@ -58,7 +58,7 @@ lemma L1_fail_propagate_catch [L1opt]: "(L1_seq (L1_catch L R) L1_fail) = (L1_catch (L1_seq L L1_fail) (L1_seq R L1_fail))" unfolding L1_defs apply (clarsimp simp: bindE_def handleE'_def handleE_def bind_assoc) - apply (rule arg_cong [where f="NonDetMonad.bind L"]) + apply (rule arg_cong [where f="Nondet_Monad.bind L"]) apply (fastforce split: sum.splits simp: throwError_def) done diff --git a/tools/autocorres/L1Valid.thy b/tools/autocorres/L1Valid.thy index fdd0b33451..1ac15209a6 100644 --- a/tools/autocorres/L1Valid.thy +++ b/tools/autocorres/L1Valid.thy @@ -29,7 +29,7 @@ lemma L1_spec_wp [wp]: "\ \s. \t. (s, t) \ f \ \s. \x. P () (f (\_. x) s) \ L1_init f \ P \, \ Q \" apply (unfold L1_init_def) - apply (wp select_wp) + apply wp apply fastforce done diff --git a/tools/autocorres/L2Opt.thy b/tools/autocorres/L2Opt.thy index 93f2d01f24..eb4ea9cc52 100644 --- a/tools/autocorres/L2Opt.thy +++ b/tools/autocorres/L2Opt.thy @@ -152,7 +152,7 @@ lemma monad_equiv_guard_conj [L2flow]: lemma monad_equiv_unknown [L2flow]: "monad_equiv P (L2_unknown name) (L2_unknown name) (\r s. P s) (\_ _. False)" apply (clarsimp simp: monad_equiv_def L2_defs) - apply (wp select_wp) + apply wp apply force done diff --git a/tools/autocorres/L2Peephole.thy b/tools/autocorres/L2Peephole.thy index 68a2eeab24..6fbfb7a060 100644 --- a/tools/autocorres/L2Peephole.thy +++ b/tools/autocorres/L2Peephole.thy @@ -56,8 +56,8 @@ lemma L2_unknown_bind [L2opt]: apply (rule ext) apply (clarsimp simp: L2_seq_def L2_unknown_def) apply (clarsimp simp: liftE_def select_def bindE_def) - apply (clarsimp simp: NonDetMonad.lift_def bind_def) - apply (clarsimp simp: NonDetMonad.bind_def split_def) + apply (clarsimp simp: Nondet_Monad.lift_def bind_def) + apply (clarsimp simp: Nondet_Monad.bind_def split_def) apply (rule prod_eqI) apply (rule set_eqI) apply (clarsimp) @@ -126,7 +126,7 @@ lemma L2_fail_propagate_catch [L2opt]: apply (clarsimp simp: bindE_def) apply (clarsimp simp: handleE'_def handleE_def) apply (clarsimp simp: bind_assoc) - apply (rule arg_cong [where f="NonDetMonad.bind L"]) + apply (rule arg_cong [where f="Nondet_Monad.bind L"]) apply (rule ext)+ apply (clarsimp split: sum.splits) apply (clarsimp simp: throwError_def) diff --git a/tools/autocorres/MonadMono.thy b/tools/autocorres/MonadMono.thy index be9e698a62..0463d06834 100644 --- a/tools/autocorres/MonadMono.thy +++ b/tools/autocorres/MonadMono.thy @@ -12,8 +12,8 @@ theory MonadMono imports NonDetMonadEx - Monads.WhileLoopRulesCompleteness - Monads.OptionMonadWP + Monads.Nondet_While_Loop_Rules_Completeness + Monads.Reader_Option_VCG begin (* @@ -152,7 +152,7 @@ lemma monad_mono_step_bindE: apply (unfold bindE_def) apply (rule monad_mono_step_bind) apply simp - apply (monad_eq simp: monad_mono_step_def NonDetMonad.lift_def + apply (monad_eq simp: monad_mono_step_def Nondet_Monad.lift_def split: sum.splits) done diff --git a/tools/autocorres/NonDetMonadEx.thy b/tools/autocorres/NonDetMonadEx.thy index f855ec12a6..3b54035cad 100644 --- a/tools/autocorres/NonDetMonadEx.thy +++ b/tools/autocorres/NonDetMonadEx.thy @@ -11,12 +11,12 @@ theory NonDetMonadEx imports "Word_Lib.WordSetup" - "Monads.NonDetMonadVCG" - "Monads.Monad_Equations" - "Monads.More_NonDetMonadVCG" - "Monads.No_Throw" - "Monads.No_Fail" - "Monads.OptionMonadND" + "Monads.Nondet_VCG" + "Monads.Nondet_Monad_Equations" + "Monads.Nondet_More_VCG" + "Monads.Nondet_No_Throw" + "Monads.Nondet_No_Fail" + "Monads.Reader_Option_ND" begin (* @@ -276,7 +276,7 @@ lemma whileLoop_to_fold: (\r. return (Q r)) i s) = return (if P i \ x then fold (\i r. (Q r)) [unat (P i) ..< unat x] i else i) s" (is "?LHS s = return (?RHS x) s") - apply (subst OptionMonadND.gets_the_return [symmetric]) + apply (subst Reader_Option_ND.gets_the_return [symmetric]) apply (subst gets_the_whileLoop) apply (rule gets_the_to_return) apply (subst owhile_to_fold) diff --git a/tools/autocorres/TypHeapSimple.thy b/tools/autocorres/TypHeapSimple.thy index 074ec1932c..d5e303c10a 100644 --- a/tools/autocorres/TypHeapSimple.thy +++ b/tools/autocorres/TypHeapSimple.thy @@ -512,7 +512,7 @@ lemma simple_lift_field_update': and xf_xfu: "fg_cons xf xfu" and cl: "simple_lift hp ptr = Some z" shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - simple_lift hp(ptr \ xfu val z)" + (simple_lift hp)(ptr \ xfu val z)" (is "?LHS = ?RHS") proof (rule ext) fix p @@ -581,7 +581,7 @@ lemma simple_lift_field_update: and xf_xfu: "fg_cons xf (xfu o (\x _. x))" and cl: "simple_lift hp ptr = Some z" shows "(simple_lift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - simple_lift hp(ptr \ xfu (\_. val) z)" + (simple_lift hp)(ptr \ xfu (\_. val) z)" (is "?LHS = ?RHS") apply (insert fl [unfolded field_ti_def]) apply (clarsimp split: option.splits) diff --git a/tools/autocorres/TypeStrengthen.thy b/tools/autocorres/TypeStrengthen.thy index 314d370dec..bdebf9aff5 100644 --- a/tools/autocorres/TypeStrengthen.thy +++ b/tools/autocorres/TypeStrengthen.thy @@ -14,7 +14,7 @@ theory TypeStrengthen imports L2Defs - "Monads.OptionMonadND" + "Monads.Reader_Option_ND" ExecConcrete begin diff --git a/tools/autocorres/tests/examples/FactorialTest.thy b/tools/autocorres/tests/examples/FactorialTest.thy index b46e21e9c1..de0314c599 100644 --- a/tools/autocorres/tests/examples/FactorialTest.thy +++ b/tools/autocorres/tests/examples/FactorialTest.thy @@ -10,7 +10,7 @@ Termination for recursive functions. theory FactorialTest imports "AutoCorres.AutoCorres" - "Monads.OptionMonadWP" + "Monads.Reader_Option_VCG" begin external_file "factorial.c" diff --git a/tools/autocorres/utils.ML b/tools/autocorres/utils.ML index 6059cb8055..cdb45d5394 100644 --- a/tools/autocorres/utils.ML +++ b/tools/autocorres/utils.ML @@ -459,7 +459,7 @@ fun term_fold_map_top f x = *) fun simp_map f = Context.map_proof ( - Local_Theory.declaration {syntax = false, pervasive = false} ( + Local_Theory.declaration {syntax = false, pervasive = false, pos = @{here}} ( K (Simplifier.map_ss f))) |> Context.proof_map diff --git a/tools/c-parser/Simpl/AlternativeSmallStep.thy b/tools/c-parser/Simpl/AlternativeSmallStep.thy index dfc7cfcfd1..c06998995b 100644 --- a/tools/c-parser/Simpl/AlternativeSmallStep.thy +++ b/tools/c-parser/Simpl/AlternativeSmallStep.thy @@ -33,7 +33,7 @@ begin text \ -This is the small-step semantics, which is described and used in my PhD-thesis \cite{Schirmer-PhD}. +This is the small-step semantics, which is described and used in my PhD-thesis \<^cite>\"Schirmer-PhD"\. It decomposes the statement into a list of statements and finally executes the head. So the redex is always the head of the list. The equivalence between termination (based on the big-step semantics) and the absence of infinite computations in diff --git a/tools/c-parser/Simpl/ROOT b/tools/c-parser/Simpl/ROOT index 9bcdd6de64..404fb59db6 100644 --- a/tools/c-parser/Simpl/ROOT +++ b/tools/c-parser/Simpl/ROOT @@ -1,6 +1,6 @@ chapter AFP -session Simpl (AFP) = HOL + +session Simpl = HOL + options [timeout = 600] sessions "HOL-Library" diff --git a/tools/c-parser/Simpl/UserGuide.thy b/tools/c-parser/Simpl/UserGuide.thy index 82b629d2f5..cfeb2e1a08 100644 --- a/tools/c-parser/Simpl/UserGuide.thy +++ b/tools/c-parser/Simpl/UserGuide.thy @@ -222,7 +222,7 @@ for procedure calls (that creates the proper @{term init}, @{term return} and @{term result} functions on the fly) and creates locales and statespaces to reason about the procedure. The purpose of locales is to set up logical contexts to support modular reasoning. Locales can be seen as freeze-dried proof contexts that -get alive as you setup a new lemma or theorem (\cite{Ballarin-04-locales}). +get alive as you setup a new lemma or theorem (\<^cite>\"Ballarin-04-locales"\). The locale the user deals with is named \Square_impl\. It defines the procedure name (internally @{term "Square_'proc"}), the procedure body (named \Square_body\) and the statespaces for parameters and local and @@ -537,7 +537,7 @@ the lookup of variable \x\ in the state \\\. The approach to specify procedures on lists -basically follows \cite{MehtaN-CADE03}. From the pointer structure +basically follows \<^cite>\"MehtaN-CADE03"\. From the pointer structure in the heap we (relationally) abstract to HOL lists of references. Then we can specify further properties on the level of HOL lists, rather then on the heap. The basic abstractions are: @@ -795,7 +795,7 @@ since the lists are already uniquely determined by the relational abstraction: \ text \ -The next contrived example is taken from \cite{Homeier-95-vcg}, to illustrate +The next contrived example is taken from \<^cite>\"Homeier-95-vcg"\, to illustrate a more complex termination criterion for mutually recursive procedures. The procedures do not calculate anything useful. @@ -1534,7 +1534,7 @@ procedures init' (|p) = subsubsection \Extending State Spaces\ text \ The records in Isabelle are -extensible \cite{Nipkow-02-hol,NaraschewskiW-TPHOLs98}. In principle this can be exploited +extensible \<^cite>\"Nipkow-02-hol" and "NaraschewskiW-TPHOLs98"\. In principle this can be exploited during verification. The state space can be extended while we we add procedures. But there is one major drawback: \begin{itemize} diff --git a/tools/c-parser/Simpl/hoare.ML b/tools/c-parser/Simpl/hoare.ML index 0fb153d5af..e69f5545d0 100644 --- a/tools/c-parser/Simpl/hoare.ML +++ b/tools/c-parser/Simpl/hoare.ML @@ -540,7 +540,7 @@ fun dest_Guard (Const (@{const_name Language.com.Guard},_)$f$g$c) = (f,g,c,false fun add_declaration name decl thy = thy |> Named_Target.init [] name - |> Local_Theory.declaration {syntax = false, pervasive = false} decl + |> Local_Theory.declaration {syntax = false, pervasive = false, pos = \<^here>} decl |> Local_Theory.exit |> Proof_Context.theory_of; @@ -939,7 +939,7 @@ fun procedures_definition locname procs thy = val context = Context.Theory thy |> fold (add_parameter_info Morphism.identity (unsuffix proc_deco)) name_pars - |> StateSpace.set_silent true + |> Config.put_generic StateSpace.silent true fun read_body (_, body) = Syntax.read_term (Context.proof_of context) body; @@ -1058,7 +1058,7 @@ fun procedures_definition locname procs thy = ctxt |> Proof_Context.theory_of |> Named_Target.init [] lname - |> Local_Theory.declaration {syntax = false, pervasive = false} parameter_info_decl + |> Local_Theory.declaration {syntax = false, pervasive = false, pos = \<^here>} parameter_info_decl |> (fn lthy => if has_body name then snd (Local_Theory.define (def lthy) lthy) else lthy) diff --git a/tools/c-parser/Simpl/hoare_syntax.ML b/tools/c-parser/Simpl/hoare_syntax.ML index 81906c78de..db3f4046f5 100644 --- a/tools/c-parser/Simpl/hoare_syntax.ML +++ b/tools/c-parser/Simpl/hoare_syntax.ML @@ -880,7 +880,7 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = (lookup_comp ctxt [] name (Bound 1)) arg end; - val _ = if not (StateSpace.get_silent (Context.Proof ctxt)) andalso + val _ = if not (Config.get ctxt StateSpace.silent) andalso ((not fcall andalso length formals <> length actuals) orelse (fcall andalso length formals <> length actuals + 1)) @@ -932,7 +932,7 @@ fun mk_call_tr ctxt grd Call formals pn pt actuals has_args cont = (case res_formals of [(_, n)] => Abs ("s", dummyT, lookup_comp ctxt [] n (Bound 0)) | _ => - if StateSpace.get_silent (Context.Proof ctxt) + if Config.get ctxt StateSpace.silent then Abs ("s", dummyT, lookup_comp ctxt [] "dummy" (Bound 0)) else raise TERM ("call_tr: function " ^ pn ^ "may only have one result parameter", [])); in Call $ init $ pt $ ret $ res $ c end) @@ -999,7 +999,7 @@ fun gen_call_tr prfx dyn grd ctxt p actuals has_args cont = SOME formals => mk_call_tr ctxt grd (Call dyn has_args cont) formals pn pt actuals has_args cont | NONE => - if StateSpace.get_silent (Context.Proof ctxt) + if Config.get ctxt StateSpace.silent then mk_call_tr ctxt grd (Call dyn has_args cont) [] pn pt [] has_args cont else raise TERM ("gen_call_tr: procedure " ^ quote pn ^ " not defined", [])) end; diff --git a/tools/c-parser/TypHeapLib.thy b/tools/c-parser/TypHeapLib.thy index ae9f3569a5..06f9232880 100644 --- a/tools/c-parser/TypHeapLib.thy +++ b/tools/c-parser/TypHeapLib.thy @@ -37,7 +37,7 @@ lemma c_guard_clift: lemma clift_heap_update: fixes p :: "'a :: mem_type ptr" - shows "hrs_htd hp \\<^sub>t p \ clift (hrs_mem_update (heap_update p v) hp) = clift hp(p \ v)" + shows "hrs_htd hp \\<^sub>t p \ clift (hrs_mem_update (heap_update p v) hp) = (clift hp)(p \ v)" unfolding hrs_mem_update_def apply (cases hp) apply (simp add: split_def hrs_htd_def) @@ -172,7 +172,7 @@ lemma clift_field_update: and eu: "export_uinfo t = export_uinfo (typ_info_t TYPE('b))" and cl: "clift hp ptr = Some z" shows "(clift (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - clift hp(ptr \ field_update (field_desc t) (to_bytes_p val) z)" + (clift hp)(ptr \ field_update (field_desc t) (to_bytes_p val) z)" (is "?LHS = ?RHS") proof - have cl': "clift (fst hp, snd hp) ptr = Some z" using cl by simp diff --git a/tools/c-parser/standalone-parser/basics.sml b/tools/c-parser/standalone-parser/basics.sml index ab2f9ca988..2f2a5491d6 100644 --- a/tools/c-parser/standalone-parser/basics.sml +++ b/tools/c-parser/standalone-parser/basics.sml @@ -72,6 +72,9 @@ struct fun K x y = x fun I x = x + fun the_default x (SOME y) = y + | the_default x NONE = x; + end open Basics @@ -145,9 +148,6 @@ struct fun uncurry f (x,y) = f x y - (*union of sets represented as lists: no repetitions*) - fun union eq = List.foldl (uncurry (insert eq)) - fun single x = [x] fun get_first f l = @@ -164,6 +164,4 @@ struct end -infix union - open Library diff --git a/tools/c-parser/standalone-parser/c-parser.mlb b/tools/c-parser/standalone-parser/c-parser.mlb index 18d5e68264..1aa358895a 100644 --- a/tools/c-parser/standalone-parser/c-parser.mlb +++ b/tools/c-parser/standalone-parser/c-parser.mlb @@ -12,6 +12,8 @@ in ../Feedback.ML ../Binaryset.ML basics.sml + library.ML + unsynchronized.ML ../topo_sort.ML ann "nonexhaustiveMatch ignore" diff --git a/tools/c-parser/standalone-parser/library.ML b/tools/c-parser/standalone-parser/library.ML index 378c57d883..f4a4f9b44f 100644 --- a/tools/c-parser/standalone-parser/library.ML +++ b/tools/c-parser/standalone-parser/library.ML @@ -7,13 +7,28 @@ signature LIBRARY = sig + val is_equal: order -> bool + + val build: ('a list -> 'a list) -> 'a list val sort : ('a * 'a -> order) -> 'a list -> 'a list + val foldl: ('a * 'b -> 'a) -> 'a * 'b list -> 'a + + val insert: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list + val remove: ('b * 'a -> bool) -> 'b -> 'a list -> 'a list + val update: ('a * 'a -> bool) -> 'a -> 'a list -> 'a list + val union: ('a * 'a -> bool) -> 'a list -> 'a list -> 'a list + val merge: ('a * 'a -> bool) -> 'a list * 'a list -> 'a list + end structure Library : LIBRARY = struct +fun is_equal ord = ord = EQUAL; + +fun build (f: 'a list -> 'a list) = f []; + (*stable mergesort -- preserves order of equal elements*) fun mergesort unique ord = let @@ -63,4 +78,31 @@ fun mergesort unique ord = fun sort ord = mergesort false ord; +(* (op @) (e, [x1, ..., xn]) ===> ((e @ x1) @ x2) ... @ xn + for operators that associate to the left (TAIL RECURSIVE)*) +fun foldl (f: 'a * 'b -> 'a) : 'a * 'b list -> 'a = + let fun itl (e, []) = e + | itl (e, a::l) = itl (f(e, a), l) + in itl end; + +fun insert eq x xs = if member eq xs x then xs else x :: xs; + +fun remove eq x xs = if member eq xs x then filter_out (fn y => eq (x, y)) xs else xs; + +fun update eq x list = + (case list of + [] => [x] + | y :: rest => + if member eq rest x then x :: remove eq x list + else if eq (x, y) then list else x :: list); + +fun union eq = fold (insert eq); + +fun merge eq (xs, ys) = + if pointer_eq (xs, ys) then xs + else if null xs then ys + else fold_rev (insert eq) ys xs; + end + +val is_equal = Library.is_equal diff --git a/tools/c-parser/standalone-parser/tokenizer.mlb b/tools/c-parser/standalone-parser/tokenizer.mlb index 51e7dd06db..a7f01a43bc 100644 --- a/tools/c-parser/standalone-parser/tokenizer.mlb +++ b/tools/c-parser/standalone-parser/tokenizer.mlb @@ -9,6 +9,8 @@ $(SML_LIB)/basis/mlton.mlb (* for pointer equality *) ../Feedback.ML ../Binaryset.ML basics.sml +library.ML +unsynchronized.ML ../topo_sort.ML ann "nonexhaustiveMatch ignore" diff --git a/tools/c-parser/standalone-parser/unsynchronized.ML b/tools/c-parser/standalone-parser/unsynchronized.ML new file mode 100644 index 0000000000..114cec177e --- /dev/null +++ b/tools/c-parser/standalone-parser/unsynchronized.ML @@ -0,0 +1,36 @@ +(* SPDX-License-Identifier: BSD-3-Clause *) +(* SPDX-FileCopyrightText: Markus Wenzel, TU Muenchen *) + +(* Extracted from Isabelle sources (src/Pure/Concurrent/unsynchronized.ML), + reduced to work for mlton *) + +signature UNSYNCHRONIZED = +sig + datatype ref = datatype ref + val := : 'a ref * 'a -> unit + val ! : 'a ref -> 'a + val change: 'a ref -> ('a -> 'a) -> unit + val change_result: 'a ref -> ('a -> 'b * 'a) -> 'b + val inc: int ref -> int + val dec: int ref -> int + val add: int ref -> int -> int +end; + +structure Unsynchronized: UNSYNCHRONIZED = +struct + +(* regular references *) + +datatype ref = datatype ref; + +val op := = op :=; +val ! = !; + +fun change r f = r := f (! r); +fun change_result r f = let val (x, y) = f (! r) in r := y; x end; + +fun inc i = (i := ! i + (1: int); ! i); +fun dec i = (i := ! i - (1: int); ! i); +fun add i n = (i := ! i + (n: int); ! i); + +end; diff --git a/tools/c-parser/umm_heap/StructSupport.thy b/tools/c-parser/umm_heap/StructSupport.thy index 1eff974eb8..a18f00d3b6 100644 --- a/tools/c-parser/umm_heap/StructSupport.thy +++ b/tools/c-parser/umm_heap/StructSupport.thy @@ -541,8 +541,8 @@ lemma lift_t_hrs_mem_update_fld: Some (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x)), m')" and xf_xfu: "fg_cons xf (xfu \ (\x _. x))" and cl: "lift_t g hp ptr = Some z" - shows "(lift_t g (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = - lift_t g hp(ptr \ xfu (\_. val) z)" + shows "((lift_t g) (hrs_mem_update (heap_update (Ptr &(ptr\f)) val) hp)) = + (lift_t g hp)(ptr \ xfu (\_. val) z)" (is "?LHS = ?RHS") proof - let ?ati = "adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))" @@ -566,8 +566,8 @@ proof - qed also - have "\ = lift_t g hp(ptr \ update_ti_t (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))) - (to_bytes_p val) z)" + have "\ = (lift_t g hp)(ptr \ update_ti_t (adjust_ti (typ_info_t TYPE('b)) xf (xfu \ (\x _. x))) + (to_bytes_p val) z)" by (simp add: cl eui fl super_field_update_lookup) also have "\ = ?RHS" using xf_xfu diff --git a/tools/c-parser/umm_heap/TypHeap.thy b/tools/c-parser/umm_heap/TypHeap.thy index 9bd4fa5c62..1378b095db 100644 --- a/tools/c-parser/umm_heap/TypHeap.thy +++ b/tools/c-parser/umm_heap/TypHeap.thy @@ -1799,7 +1799,7 @@ lemma field_names_same: lemma lift_t_heap_update: "d,g \\<^sub>t p \ lift_t g (heap_update p v h,d) = - (lift_t g (h,d) (p \ (v::'a::mem_type)))" + ((lift_t g (h,d)) (p \ (v::'a::mem_type)))" apply(subst lift_t_sub_field_update) apply fast apply(simp add: sub_typ_proper_def)