diff --git a/.github/workflows/verifast-negative.yml b/.github/workflows/verifast-negative.yml index 0b5ad23947c33..9980025f4dd76 100644 --- a/.github/workflows/verifast-negative.yml +++ b/.github/workflows/verifast-negative.yml @@ -30,16 +30,16 @@ jobs: - name: Install VeriFast run: | cd ~ - curl -OL https://github.com/verifast/verifast/releases/download/25.07/verifast-25.07-linux.tar.gz - # https://github.com/verifast/verifast/attestations/8998468 - echo '48d2c53b4a6e4ba6bf03bd6303dbd92a02bfb896253c06266b29739c78bad23b verifast-25.07-linux.tar.gz' | shasum -a 256 -c - tar xf verifast-25.07-linux.tar.gz + curl -OL https://github.com/verifast/verifast/releases/download/25.08/verifast-25.08-linux.tar.gz + # https://github.com/verifast/verifast/attestations/10123891 + echo '1e40019d6add91bf72141c86f4007f2fe1eef67f453cb7fb8f1f5ab7d31d509f verifast-25.08-linux.tar.gz' | shasum -a 256 -c + tar xf verifast-25.08-linux.tar.gz - name: Install the Rust toolchain used by VeriFast run: rustup toolchain install nightly-2025-04-09 - name: Run VeriFast Verification run: | - export PATH=~/verifast-25.07/bin:$PATH + export PATH=~/verifast-25.08/bin:$PATH cd verifast-proofs bash check-verifast-proofs-negative.sh diff --git a/.github/workflows/verifast.yml b/.github/workflows/verifast.yml index 80f388296ff07..41e5aad7ec92d 100644 --- a/.github/workflows/verifast.yml +++ b/.github/workflows/verifast.yml @@ -27,17 +27,17 @@ jobs: - name: Install VeriFast run: | cd ~ - curl -OL https://github.com/verifast/verifast/releases/download/25.07/verifast-25.07-linux.tar.gz - # https://github.com/verifast/verifast/attestations/8998468 - echo '48d2c53b4a6e4ba6bf03bd6303dbd92a02bfb896253c06266b29739c78bad23b verifast-25.07-linux.tar.gz' | shasum -a 256 -c - tar xf verifast-25.07-linux.tar.gz + curl -OL https://github.com/verifast/verifast/releases/download/25.08/verifast-25.08-linux.tar.gz + # https://github.com/verifast/verifast/attestations/10123891 + echo '1e40019d6add91bf72141c86f4007f2fe1eef67f453cb7fb8f1f5ab7d31d509f verifast-25.08-linux.tar.gz' | shasum -a 256 -c + tar xf verifast-25.08-linux.tar.gz - name: Install the Rust toolchain used by VeriFast run: rustup toolchain install nightly-2025-04-09 - name: Run VeriFast Verification run: | - export PATH=~/verifast-25.07/bin:$PATH + export PATH=~/verifast-25.08/bin:$PATH cd verifast-proofs bash check-verifast-proofs.sh diff --git a/verifast-proofs/README.md b/verifast-proofs/README.md index ee3b573810552..096d3e32e9061 100644 --- a/verifast-proofs/README.md +++ b/verifast-proofs/README.md @@ -2,8 +2,18 @@ This directory contains [VeriFast](../doc/src/tools/verifast.md) proofs for (currently a very, very small) part of the standard library. -> [!NOTE] -> TL;DR: If the VeriFast CI action fails because of a failing diff, please run `verifast-proofs/patch-verifast-proofs.sh` to fix the problem. +Specifically, it currently contains the following proofs: + +- Partial proof of [LinkedList](alloc/collections/linked_list.rs/) +- Partial proof of [RawVec](alloc/raw_vec/mod.rs/) + +See each proof's accompanying README for a tour of the proof and applicable caveats. + +## Maintaining the proofs + + If the VeriFast CI action fails because of a failing diff, please run `cd verifast-proofs; ./patch-verifast-proofs.sh` to fix the problem. + +## `-skip_specless_fns` VeriFast supports selecting the code to verify on a function-by-function basis. By default, when given a `.rs` file VeriFast will try to verify [semantic well-typedness](https://verifast.github.io/verifast/rust-reference/non-unsafe-funcs.html) of all non-`unsafe` functions in that file (and in any submodules), and will require that the user provide specifications for all `unsafe` functions, which it will then verify against those specifications. However, when given the `-skip_specless_fns` command-line flag, VeriFast will skip all functions for which the user did not provide a specification. diff --git a/verifast-proofs/alloc/collections/linked_list.rs/README.md b/verifast-proofs/alloc/collections/linked_list.rs/README.md index afe1e25813643..912bed7e9192f 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/README.md +++ b/verifast-proofs/alloc/collections/linked_list.rs/README.md @@ -382,7 +382,7 @@ fn pop_front_node<'a>(&'a mut self) -> Option, &'a A>> //@ open [?f]ref_initialized_::(alloc_ref1)(); let alloc_ref = &self.alloc; - r = match head { + r = match head { None => { //@ close [f]ref_initialized_::(alloc_ref)(); //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); @@ -572,13 +572,12 @@ closes it back up afterwards. First of all, this proof was performed with the following VeriFast command-line flags: - `-skip_specless_fns`: VeriFast ignores the functions that do not have a `req` or `ens` clause. - `-ignore_unwind_paths`: This proof ignores code that is reachable only when unwinding. -- `-allow_assume`: This proof uses a number of `assume` ghost statements and `assume_correct` clauses. These must be carefully audited. +- `-allow_assume`: This proof uses a number of `assume` ghost statements and `assume_correct` clauses. These must be carefully audited. Specifically, this proof uses `assume` statements to assume that the lifetime of the allocator used by the LinkedList value equals `'static`, i.e. this proof only applies if the global allocator or another allocator that lasts forever is used. Secondly, since VeriFast uses the `rustc` frontend, which assumes a particular target architecture, VeriFast's results hold only for the used Rust toolchain's target architecture. When VeriFast reports "0 errors found" for a Rust program, it always reports the targeted architecture as well (e.g. `0 errors found (2149 statements verified) (target: x86_64-unknown-linux-gnu (LP64))`). Thirdly, VeriFast has a number of [known unsoundnesses](https://github.com/verifast/verifast/issues?q=is%3Aissue+is%3Aopen+label%3Aunsoundness) (reasons why VeriFast might in some cases incorrectly accept a program), including the following: - VeriFast does not yet fully verify compliance with Rust's [pointer aliasing rules](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). - VeriFast does not yet properly verify compliance of custom type interpretations with Rust's [variance](https://doc.rust-lang.org/reference/subtyping.html#variance) rules. -- The current standard library specifications do not [prevent an allocated memory block from outliving its allocator](https://github.com/verifast/verifast/issues/829). This is sound only if the global allocator is used. Fourthly, unlike foundational tools such as [RefinedRust](https://plv.mpi-sws.org/refinedrust/), VeriFast has not itself been verified, so there are undoubtedly also unknown unsoundnesses. Such unsoundnesses might exist in VeriFast's [symbolic execution engine](https://github.com/model-checking/verify-rust-std/issues/213#issuecomment-2531006855) [itself](https://github.com/model-checking/verify-rust-std/issues/213#issuecomment-2534922580) or in its [prelude](https://github.com/verifast/verifast/tree/master/bin/rust) (definitions and axioms automatically imported at the start of every verification run) or in the [specifications](https://github.com/verifast/verifast/blob/master/bin/rust/std/lib.rsspec) it uses for the Rust standard library functions called by the program being verified. diff --git a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs index 2c0bd376f37c4..d2c31320b4df8 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs +++ b/verifast-proofs/alloc/collections/linked_list.rs/verified/linked_list.rs @@ -25,7 +25,7 @@ use crate::boxed::Box; //@ use std::alloc::{alloc_id_t, alloc_block_in, Layout, Global, Allocator}; //@ use std::option::{Option, Option::None, Option::Some}; -//@ use std::ptr::{NonNull, NonNull_ptr}; +//@ use std::ptr::NonNull; //@ use std::boxed::Box_in; /*@ @@ -171,10 +171,10 @@ pred Nodes(alloc_id: alloc_id_t, n: Option>>, prev: Option>()) &*& struct_Node_padding(NonNull_ptr(n_)) &*& - (*NonNull_ptr(n_)).prev |-> prev &*& - (*NonNull_ptr(n_)).next |-> ?next0 &*& - pointer_within_limits(&(*NonNull_ptr(n_)).element) == true &*& + alloc_block_in(alloc_id, n_.as_ptr() as *u8, Layout::new::>()) &*& struct_Node_padding(n_.as_ptr()) &*& + (*n_.as_ptr()).prev |-> prev &*& + (*n_.as_ptr()).next |-> ?next0 &*& + pointer_within_limits(&(*n_.as_ptr()).element) == true &*& Nodes(alloc_id, next0, n, last, next, ?nodes0) &*& nodes == cons(n_, nodes0) }; @@ -204,11 +204,11 @@ lem Nodes_split_off_last(n: Option>>) req Nodes::(?alloc_id, n, ?prev, ?last, ?next, ?nodes) &*& n != next; ens last == Option::Some(?last_) &*& Nodes::(alloc_id, n, prev, ?last1, last, ?nodes0) &*& - alloc_block_in(alloc_id, NonNull_ptr(last_) as *u8, Layout::new_::>()) &*& - (*NonNull_ptr(last_)).prev |-> last1 &*& - (*NonNull_ptr(last_)).next |-> next &*& - pointer_within_limits(&(*NonNull_ptr(last_)).element) == true &*& - struct_Node_padding(NonNull_ptr(last_)) &*& + alloc_block_in(alloc_id, last_.as_ptr() as *u8, Layout::new::>()) &*& + (*last_.as_ptr()).prev |-> last1 &*& + (*last_.as_ptr()).next |-> next &*& + pointer_within_limits(&(*last_.as_ptr()).element) == true &*& + struct_Node_padding(last_.as_ptr()) &*& nodes == append(nodes0, [last_]); { open Nodes::(alloc_id, n, prev, last, next, nodes); @@ -226,11 +226,11 @@ lem Nodes_split_off_last(n: Option>>) lem Nodes_append_one_(head: Option>>) req Nodes::(?alloc_id, head, ?prev, ?last, Option::Some(?n), ?nodes1) &*& - alloc_block_in(alloc_id, NonNull_ptr(n) as *u8, Layout::new_::>()) &*& - (*NonNull_ptr(n)).prev |-> last &*& - (*NonNull_ptr(n)).next |-> ?next &*& - pointer_within_limits(&(*NonNull_ptr(n)).element) == true &*& - struct_Node_padding(NonNull_ptr(n)) &*& + alloc_block_in(alloc_id, n.as_ptr() as *u8, Layout::new::>()) &*& + (*n.as_ptr()).prev |-> last &*& + (*n.as_ptr()).next |-> ?next &*& + pointer_within_limits(&(*n.as_ptr()).element) == true &*& + struct_Node_padding(n.as_ptr()) &*& Nodes(alloc_id, next, Option::Some(n), ?tail, None, ?nodes2); ens Nodes(alloc_id, head, prev, Option::Some(n), next, append(nodes1, [n])) &*& Nodes(alloc_id, next, Option::Some(n), tail, None, nodes2); @@ -258,7 +258,7 @@ lem Nodes_append(n: Option>>) if n == n2 { } else { assert n == Option::Some(?n_); - Nodes_append((*NonNull_ptr(n_)).next); + Nodes_append((*n_.as_ptr()).next); close Nodes::(alloc_id, n, prev, tail, None, append(nodes1, nodes2)); } } @@ -271,7 +271,7 @@ lem Nodes_append_(n: Option>>) if n == n2 { } else { assert n == Option::Some(?n_); - Nodes_append_((*NonNull_ptr(n_)).next); + Nodes_append_((*n_.as_ptr()).next); open Nodes(alloc_id, n4, n5, tail, None, nodes3); close Nodes(alloc_id, n4, n5, tail, None, nodes3); close Nodes::(alloc_id, n, prev, n3, n4, append(nodes1, nodes2)); @@ -279,19 +279,19 @@ lem Nodes_append_(n: Option>>) } lem Nodes_append__(n: Option>>) - req Nodes::(?alloc_id, n, ?prev, ?n1, ?n2, ?nodes1) &*& Nodes::(alloc_id, n2, n1, ?n3, Option::Some(?n4), ?nodes2) &*& (*NonNull_ptr(n4)).prev |-> n3; - ens Nodes::(alloc_id, n, prev, n3, Some(n4), append(nodes1, nodes2)) &*& (*NonNull_ptr(n4)).prev |-> n3; + req Nodes::(?alloc_id, n, ?prev, ?n1, ?n2, ?nodes1) &*& Nodes::(alloc_id, n2, n1, ?n3, Option::Some(?n4), ?nodes2) &*& (*n4.as_ptr()).prev |-> n3; + ens Nodes::(alloc_id, n, prev, n3, Some(n4), append(nodes1, nodes2)) &*& (*n4.as_ptr()).prev |-> n3; { open Nodes::(alloc_id, n, prev, n1, n2, nodes1); if n == n2 { } else { assert n == Option::Some(?n_); - Nodes_append__((*NonNull_ptr(n_)).next); + Nodes_append__((*n_.as_ptr()).next); close Nodes::(alloc_id, n, prev, n3, Some(n4), append(nodes1, nodes2)); } } -pred_ctor elem_fbc(t: thread_id_t)(node: NonNull>) = (*NonNull_ptr(node)).element |-> ?elem &*& .own(t, elem); +pred_ctor elem_fbc(t: thread_id_t)(node: NonNull>) = (*node.as_ptr()).element |-> ?elem &*& .own(t, elem); pred >.own(t, ll) = Allocator(t, ll.alloc, ?alloc_id) &*& @@ -329,7 +329,7 @@ lem LinkedList_elems_send(t0: thread_id_t, t1: thread_id_t) nil => {} cons(n, nodes0) => { open elem_fbc::(t0)(n); - Send::send(t0, t1, (*NonNull_ptr(n)).element); + Send::send(t0, t1, (*n.as_ptr()).element); close elem_fbc::(t1)(n); LinkedList_elems_send::(t0, t1); } @@ -354,10 +354,10 @@ pred Nodes1(alloc_id: alloc_id_t, n: Option>>, prev: Option n == Option::Some(n_) &*& - alloc_block_in(alloc_id, NonNull_ptr(n_) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(n_)) &*& - (*NonNull_ptr(n_)).prev |-> prev &*& - (*NonNull_ptr(n_)).next |-> ?next0 &*& - pointer_within_limits(&(*NonNull_ptr(n_)).element) == true &*& + alloc_block_in(alloc_id, n_.as_ptr() as *u8, Layout::new::>()) &*& struct_Node_padding(n_.as_ptr()) &*& + (*n_.as_ptr()).prev |-> prev &*& + (*n_.as_ptr()).next |-> ?next0 &*& + pointer_within_limits(&(*n_.as_ptr()).element) == true &*& Nodes1(alloc_id, next0, n, last, next, nodes0, ?prevs0, ?nexts0) &*& prevs == cons(prev, prevs0) &*& nexts == cons(next0, nexts0) @@ -385,7 +385,7 @@ lem Nodes1_append(head: Option>>) match nodes1 { nil => {} cons(n, nodes10) => { - Nodes1_append::((*NonNull_ptr(n)).next); + Nodes1_append::((*n.as_ptr()).next); close [f]Nodes1::(alloc_id, head, prev, tail, next, append(nodes1, nodes2), append(prevs1, prevs2), append(nexts1, nexts2)); } } @@ -452,7 +452,7 @@ pred_ctor LinkedList_frac_borrow_content(alloc_id: alloc_id_t, l: *LinkedL inductive LinkedList_share_info = LinkedList_share_info(alloc_id: alloc_id_t, head: Option>>, tail: Option>>, nodes: list>>, prevs: list>>>, nexts: list>>>); -pred_ctor elem_share(k: lifetime_t, t: thread_id_t)(node: NonNull>) = [_](.share(k, t, &(*NonNull_ptr(node)).element)); +pred_ctor elem_share(k: lifetime_t, t: thread_id_t)(node: NonNull>) = [_](.share(k, t, &(*node.as_ptr()).element)); pred >.share(k, t, l) = exists(LinkedList_share_info(?alloc_id, ?head, ?tail, ?nodes, ?prevs, ?nexts)) &*& @@ -463,7 +463,7 @@ pred >.share(k, t, l) = fix elem_fbcs(t: thread_id_t, nodes: list>>) -> pred() { match nodes { nil => True, - cons(n, nodes0) => sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes0)) + cons(n, nodes0) => sep(.full_borrow_content(t, &(*n.as_ptr()).element), elem_fbcs(t, nodes0)) } } @@ -491,9 +491,9 @@ lem LinkedList_share_full(k: lifetime_t, t: thread_id_t, l: *LinkedList { iter(nodes_left0); open elem_fbc::(t)(n); - close_full_borrow_content::(t, &(*NonNull_ptr(n)).element); - close sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes_left0))(); - assert sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs(t, nodes_left0)) == elem_fbcs::(t, nodes_left); + close_full_borrow_content::(t, &(*n.as_ptr()).element); + close sep(.full_borrow_content(t, &(*n.as_ptr()).element), elem_fbcs(t, nodes_left0))(); + assert sep(.full_borrow_content(t, &(*n.as_ptr()).element), elem_fbcs(t, nodes_left0)) == elem_fbcs::(t, nodes_left); } } } @@ -517,9 +517,9 @@ lem LinkedList_share_full(k: lifetime_t, t: thread_id_t, l: *LinkedList { open True(); } cons(n, nodes_left0) => { - open sep(.full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs::(t, nodes_left0))(); + open sep(.full_borrow_content(t, &(*n.as_ptr()).element), elem_fbcs::(t, nodes_left0))(); iter(nodes_left0); - open_full_borrow_content::(t, &(*NonNull_ptr(n)).element); + open_full_borrow_content::(t, &(*n.as_ptr()).element); close elem_fbc::(t)(n); } } @@ -552,8 +552,8 @@ lem LinkedList_share_full(k: lifetime_t, t: thread_id_t, l: *LinkedList(k, t)); } cons(n, nodes_left0) => { - full_borrow_split_m(k, .full_borrow_content(t, &(*NonNull_ptr(n)).element), elem_fbcs::(t, nodes_left0)); - share_full_borrow_m::(k, t, &(*NonNull_ptr(n)).element); + full_borrow_split_m(k, .full_borrow_content(t, &(*n.as_ptr()).element), elem_fbcs::(t, nodes_left0)); + share_full_borrow_m::(k, t, &(*n.as_ptr()).element); iter(nodes_left0); close elem_share::(k, t)(n); close foreach(nodes_left, elem_share::(k, t)); @@ -590,7 +590,7 @@ lem LinkedList_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l } cons(n, nodes0) => { open elem_share::(k, t)(n); - share_mono::(k, k1, t, &(*NonNull_ptr(n)).element); + share_mono::(k, k1, t, &(*n.as_ptr()).element); close elem_share::(k1, t)(n); iter(); } @@ -740,7 +740,7 @@ lem LinkedList_sync(t1: thread_id_t) nil => {} cons(n, nodes0) => { open elem_share::(k, t0)(n); - Sync::sync::(k, t0, t1, &(*NonNull_ptr(n)).element); + Sync::sync::(k, t0, t1, &(*n.as_ptr()).element); close elem_share::(k, t1)(n); iter(); } @@ -839,7 +839,7 @@ lem Iter_send<'a, T>(t1: thread_id_t) nil => {} cons(n, nodes0) => { open elem_share::(k, t0)(n); - Sync::sync::(k, t0, t1, &(*NonNull_ptr(n)).element); + Sync::sync::(k, t0, t1, &(*n.as_ptr()).element); close elem_share::(k, t1)(n); iter(); } @@ -966,13 +966,13 @@ impl LinkedList { /*@ req thread_token(?t) &*& *self |-> ?self0 &*& Allocator(t, self0.alloc, ?alloc_id) &*& Nodes(alloc_id, self0.head, None, self0.tail, None, ?nodes) &*& length(nodes) == self0.len &*& - *NonNull_ptr(node) |-> ?n &*& alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()); + *node.as_ptr() |-> ?n &*& alloc_block_in(alloc_id, node.as_ptr() as *u8, Layout::new::>()); @*/ /*@ ens thread_token(t) &*& *self |-> ?self1 &*& Allocator(t, self1.alloc, alloc_id) &*& Nodes(alloc_id, self1.head, None, self1.tail, None, cons(node, nodes)) &*& self1.len == 1 + length(nodes) &*& - (*NonNull_ptr(node)).element |-> n.element; + (*node.as_ptr()).element |-> n.element; @*/ { // This method takes care not to create mutable references to whole nodes, @@ -1001,7 +1001,7 @@ impl LinkedList { self.len += 1; //@ close_points_to(self); //@ assert *self |-> ?self1; - //@ points_to_limits(&(*NonNull_ptr(node)).element); + //@ points_to_limits(&(*node.as_ptr()).element); //@ close Nodes(alloc_id, node_1, None, self1.tail, None, cons(node, nodes)); } } @@ -1080,12 +1080,12 @@ impl LinkedList { //@ open Nodes(alloc_id, ?head1, None, ?tail1, None, ?nodes1); //@ open foreach(nodes1, elem_fbc::(t)); //@ open elem_fbc::(t)(node); - //@ borrow_points_to_at_lft(alloc_id.lft, NonNull_ptr(node)); - //@ leak points_to_at_lft_end_token(alloc_id.lft, NonNull_ptr(node)); - let node = Box::from_raw_in(node.as_ptr(), &*alloc_ref); + //@ borrow_points_to_at_lft(alloc_id.lft, node.as_ptr()); + //@ leak points_to_at_lft_end_token(alloc_id.lft, node.as_ptr()); + let node = Box::from_raw_in/*@::, &'a A>@*/(node.as_ptr(), &*alloc_ref); //@ close [f]ref_initialized_::(alloc_ref)(); //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); - //@ std::boxed::Box_separate_contents(&node_1); + //@ std::boxed::Box_separate_contents::, &'a A>(&node_1); //@ assert std::boxed::Box_minus_contents_in(_, _, _, _, _, ?contents_ptr); //@ assume(alloc_id.lft == 'static); //@ produce_lifetime_token_static(); @@ -1179,20 +1179,20 @@ impl LinkedList { /*@ req (*self).head |-> ?head &*& (*self).tail |-> ?tail &*& Nodes::(?alloc_id, head, None, ?prev_, Some(node), ?nodes1) &*& - alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()) &*& - (*NonNull_ptr(node)).next |-> ?next_ &*& - (*NonNull_ptr(node)).prev |-> prev_ &*& - struct_Node_padding(NonNull_ptr(node)) &*& + alloc_block_in(alloc_id, node.as_ptr() as *u8, Layout::new::>()) &*& + (*node.as_ptr()).next |-> ?next_ &*& + (*node.as_ptr()).prev |-> prev_ &*& + struct_Node_padding(node.as_ptr()) &*& Nodes::(alloc_id, next_, Some(node), tail, None, ?nodes2) &*& (*self).len |-> length(nodes1) + 1 + length(nodes2); @*/ /*@ ens (*self).head |-> ?head1 &*& (*self).tail |-> ?tail1 &*& Nodes::(alloc_id, head1, None, prev_, next_, nodes1) &*& - alloc_block_in(alloc_id, NonNull_ptr(node) as *u8, Layout::new_::>()) &*& - (*NonNull_ptr(node)).next |-> next_ &*& - (*NonNull_ptr(node)).prev |-> prev_ &*& - struct_Node_padding(NonNull_ptr(node)) &*& + alloc_block_in(alloc_id, node.as_ptr() as *u8, Layout::new::>()) &*& + (*node.as_ptr()).next |-> next_ &*& + (*node.as_ptr()).prev |-> prev_ &*& + struct_Node_padding(node.as_ptr()) &*& Nodes::(alloc_id, next_, prev_, tail1, None, nodes2) &*& (*self).len |-> length(nodes1) + length(nodes2); @*/ @@ -1395,7 +1395,7 @@ impl LinkedList { head: second_part_head, tail: second_part_tail, len: self.len - at, - alloc: Allocator_clone__VeriFast_wrapper(&self.alloc), + alloc: Allocator_clone__VeriFast_wrapper/*@::@*/(&self.alloc), marker: PhantomData, }; } @@ -1422,7 +1422,7 @@ impl LinkedList { { //@ let_lft 'a = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - alloc = Allocator_clone__VeriFast_wrapper(&self.alloc); + alloc = Allocator_clone__VeriFast_wrapper/*@::@*/(&self.alloc); } //@ end_lifetime(k); //@ std::alloc::end_ref_Allocator_at_lifetime::(); @@ -1465,7 +1465,7 @@ fn mem_take_usize__VeriFast_wrapper(dest: &mut usize) -> usize unsafe fn NonNull_from_ref_mut__VeriFast_wrapper(value: &mut T) -> NonNull //@ req true; -//@ ens NonNull_ptr(result) == value; +//@ ens result.as_ptr() == value; //@ assume_correct { NonNull::from(value) @@ -2136,9 +2136,9 @@ impl LinkedList { //@ let_lft 'b = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'b, A>(alloc_ref); //@ close drop_perm::>(false, True, t, node0); - let node = Box::new_in(node0, &self.alloc); + let node = Box::new_in/*@::, &'b A>@*/(node0, &self.alloc); //@ open drop_perm::>(false, True, t, node0); - node_ptr = NonNull_from_ref_mut__VeriFast_wrapper(Box::leak(node)); + node_ptr = NonNull_from_ref_mut__VeriFast_wrapper(Box::leak/*@::, &'b A, 'static>@*/(node)); } //@ end_lifetime(k); //@ std::alloc::end_ref_Allocator_at_lifetime::(); @@ -2147,7 +2147,7 @@ impl LinkedList { //@ assert Allocator(_, _, ?alloc_id); //@ assume(alloc_id.lft == 'static); //@ produce_lifetime_token_static(); - //@ open_points_to_at_lft(NonNull_ptr(node_ptr)); + //@ open_points_to_at_lft(node_ptr.as_ptr()); //@ leak close_points_to_at_lft_token(_, _, _, _); //@ assert Nodes(alloc_id, ll0.head, None, ll0.tail, None, ?nodes); // SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc and leaked @@ -2163,7 +2163,7 @@ impl LinkedList { foreach(nodes, elem_fbc(t)) &*& self1.len == 1 + length(nodes); close Ctx(); - close_full_borrow_content::(t, &(*NonNull_ptr(node_ptr_)).element); + close_full_borrow_content::(t, &(*node_ptr_.as_ptr()).element); produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, .full_borrow_content(t, r), klong, >.full_borrow_content(t, self))() { open Ctx(); open_full_borrow_content::(t, r); @@ -2341,7 +2341,7 @@ impl LinkedList { unsafe { //@ let_lft 'a = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - alloc1 = Allocator_clone__VeriFast_wrapper(&self.alloc); + alloc1 = Allocator_clone__VeriFast_wrapper/*@::@*/(&self.alloc); } //@ end_lifetime(k); //@ std::alloc::end_ref_Allocator_at_lifetime::(); @@ -2358,7 +2358,7 @@ impl LinkedList { unsafe { //@ let_lft 'a = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - alloc2 = Allocator_clone__VeriFast_wrapper(&self.alloc); + alloc2 = Allocator_clone__VeriFast_wrapper/*@::@*/(&self.alloc); } //@ end_lifetime(k); //@ std::alloc::end_ref_Allocator_at_lifetime::(); @@ -2700,7 +2700,7 @@ impl<'a, T> Iterator for Iter<'a, T> { //@ close [f0]Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); //@ close [f0]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); //@ close_frac_borrow(f0, Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)); - //@ let node_ref = precreate_ref(NonNull_ptr(node)); + //@ let node_ref = precreate_ref(node.as_ptr()); //@ open_ref_init_perm_Node(node_ref); //@ produce_type_interp::(); //@ open foreach(_, _); @@ -2720,7 +2720,7 @@ impl<'a, T> Iterator for Iter<'a, T> { //@ close_ref_initialized_Node(node_ref); //@ open [1 - f]ref_padding_initialized_::>(node_ref)(); //@ let node0 = node; - //@ note(pointer_within_limits(&(*ref_origin(NonNull_ptr(node0))).element)); + //@ note(pointer_within_limits(&(*ref_origin(node0.as_ptr())).element)); let node = &*node.as_ptr(); let len = *len_ref; //@ produce_limits(len); @@ -2733,15 +2733,15 @@ impl<'a, T> Iterator for Iter<'a, T> { pred Ctx() = true; pred Q() = [f]Nodes1(alloc_id, head0, None, prev, self0.head, nodes_before, prevs_before, nexts_before) &*& - [f]alloc_block_in(alloc_id, NonNull_ptr(node0) as *u8, Layout::new_::>()) &*& + [f]alloc_block_in(alloc_id, node0.as_ptr() as *u8, Layout::new::>()) &*& [f/2]struct_Node_padding(node_ref) &*& - [f/2]struct_Node_padding(NonNull_ptr(node0)) &*& - [f/2](*node_ref).prev |-> prev &*& ref_end_token_Option_NonNull(&(*node_ref).prev, &(*NonNull_ptr(node0)).prev, f, prev) &*& - [f/2](*node_ref).next |-> nodeNext &*& ref_end_token_Option_NonNull(&(*node_ref).next, &(*NonNull_ptr(node0)).next, f, nodeNext) &*& + [f/2]struct_Node_padding(node0.as_ptr()) &*& + [f/2](*node_ref).prev |-> prev &*& ref_end_token_Option_NonNull(&(*node_ref).prev, &(*node0.as_ptr()).prev, f, prev) &*& + [f/2](*node_ref).next |-> nodeNext &*& ref_end_token_Option_NonNull(&(*node_ref).next, &(*node0.as_ptr()).next, f, nodeNext) &*& [f]ref_initialized(node_ref) &*& [1-f]ref_initialized(&(*node_ref).next) &*& [1-f]ref_initialized(&(*node_ref).prev) &*& - ref_padding_end_token(node_ref, NonNull_ptr(node0), f/2) &*& + ref_padding_end_token(node_ref, node0.as_ptr(), f/2) &*& [1-f]ref_padding_initialized::>(node_ref) &*& [f]Nodes1(alloc_id, nodeNext, self0.head, self0.tail, next, tail(nodes), tail(prevs), tail(nexts)) &*& [f]Nodes1(alloc_id, next, self0.tail, tail0, None, nodes_after, prevs_after, nexts_after); @@ -2769,7 +2769,7 @@ impl<'a, T> Iterator for Iter<'a, T> { open [?f1]Iter_frac_borrow_content::(alloc_id, head0, self0.head, prev, self0.tail, next, tail0, nodes_before, nodes, nodes_after, prevs_before, prevs, prevs_after, nexts_before, nexts, nexts_after)(); open Nodes1::(alloc_id, self0.head, prev, self0.tail, next, nodes, prevs, nexts); close [f1]Nodes1::(alloc_id, self1.head, self0.head, self0.head, self1.head, [], [], []); - assert self0.head == Option::Some(node) &*& [f1](*NonNull_ptr(node)).next |-> self1.head; + assert self0.head == Option::Some(node) &*& [f1](*node.as_ptr()).next |-> self1.head; close [f1]Nodes1::(alloc_id, self0.head, prev, self0.head, self1.head, [node], [prev], [self1.head]); Nodes1_append::(head0); close [f1]Iter_frac_borrow_content::(alloc_id, head0, self1.head, self0.head, self1.tail, next, tail0, append(nodes_before, [node]), tail(nodes), nodes_after, append(prevs_before, [prev]), tail(prevs), prevs_after, append(nexts_before, [self1.head]), tail(nexts), nexts_after)(); @@ -2866,14 +2866,14 @@ impl<'a, T> Iterator for IterMut<'a, T> { *self |-> self0 &*& result == Option::None } else { self0.head == Option::Some(?head) &*& - alloc_block_in(alloc_id, NonNull_ptr(head) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(head)) &*& - (*NonNull_ptr(head)).prev |-> prev &*& - (*NonNull_ptr(head)).next |-> ?next0 &*& - pointer_within_limits(&(*NonNull_ptr(head)).element) == true &*& + alloc_block_in(alloc_id, head.as_ptr() as *u8, Layout::new::>()) &*& struct_Node_padding(head.as_ptr()) &*& + (*head.as_ptr()).prev |-> prev &*& + (*head.as_ptr()).next |-> ?next0 &*& + pointer_within_limits(&(*head.as_ptr()).element) == true &*& Nodes(alloc_id, next0, Option::Some(head), self0.tail, next, ?nodes0) &*& nodes == cons(head, nodes0) &*& *self |-> ?self1 &*& self1.head == next0 &*& self1.tail == self0.tail &*& self1.len == self0.len - 1 &*& - result == Option::Some(&(*NonNull_ptr(head)).element) + result == Option::Some(&(*head.as_ptr()).element) }; @*/ //@ safety_proof { assume(false); } @@ -2924,10 +2924,10 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { *self |-> self0 &*& result == Option::None } else { self0.tail == Option::Some(?tail) &*& - alloc_block_in(alloc_id, NonNull_ptr(tail) as *u8, Layout::new_::>()) &*& struct_Node_padding(NonNull_ptr(tail)) &*& - (*NonNull_ptr(tail)).prev |-> ?prev0 &*& - (*NonNull_ptr(tail)).next |-> next &*& - pointer_within_limits(&(*NonNull_ptr(tail)).element) == true &*& + alloc_block_in(alloc_id, tail.as_ptr() as *u8, Layout::new::>()) &*& struct_Node_padding(tail.as_ptr()) &*& + (*tail.as_ptr()).prev |-> ?prev0 &*& + (*tail.as_ptr()).next |-> next &*& + pointer_within_limits(&(*tail.as_ptr()).element) == true &*& Nodes(alloc_id, self0.head, prev, prev0, self0.tail, ?nodes0) &*& nodes == append(nodes0, [tail]) &*& *self |-> ?self1 &*& self1.head == self0.head &*& self1.tail == prev0 &*& self1.len == self0.len - 1 @@ -3489,9 +3489,9 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { open elem_fbc::(t1)(current); produce_type_interp::(); if t1 != t { - Send::send(t1, t, (*NonNull_ptr(current)).element); + Send::send(t1, t, (*current.as_ptr()).element); } - close_full_borrow_content::(t, &(*NonNull_ptr(current)).element); + close_full_borrow_content::(t, &(*current.as_ptr()).element); assert (*list).alloc |-> ?alloc &*& Allocator::(t1, alloc, alloc_id) &*& @@ -3513,11 +3513,11 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { (*list).len |-> length(nodes1) + length(nodes2) &*& index == length(nodes1) &*& struct_LinkedList_padding(list); - produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, .full_borrow_content(t, &(*NonNull_ptr(current)).element), klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list))() { + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, .full_borrow_content(t, &(*current.as_ptr()).element), klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list))() { open Ctx(); - open_full_borrow_content::(t, &(*NonNull_ptr(current)).element); + open_full_borrow_content::(t, &(*current.as_ptr()).element); if t1 != t { - Send::send(t, t1, (*NonNull_ptr(current)).element); + Send::send(t, t1, (*current.as_ptr()).element); } leak type_interp::(); close elem_fbc::(t1)(current); @@ -3525,14 +3525,14 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { close CursorMut_fbc::(t1, ghost_cell_id, self0.list)(); } { close Ctx(); - close_full_borrow_strong(klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list), .full_borrow_content(t, &(*NonNull_ptr(current)).element)); - full_borrow_mono(klongb2, 'b, .full_borrow_content(t, &(*NonNull_ptr(current)).element)); + close_full_borrow_strong(klongb2, CursorMut_fbc::(t1, ghost_cell_id, self0.list), .full_borrow_content(t, &(*current.as_ptr()).element)); + full_borrow_mono(klongb2, 'b, .full_borrow_content(t, &(*current.as_ptr()).element)); } } } @*/ - //@ close_ref_mut_own::<'b, T>(t, &(*NonNull_ptr(current)).element); - //@ close >.own(t, Some(&(*NonNull_ptr(current)).element)); + //@ close_ref_mut_own::<'b, T>(t, &(*current.as_ptr()).element); + //@ close >.own(t, Some(&(*current.as_ptr()).element)); Some(&mut (*current.as_ptr()).element) } } @@ -3715,7 +3715,9 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { self.list.unlink_node(unlinked_node); /*@ if t1 != t { + produce_type_interp::(); std::alloc::Allocator_send(t, (*self0.list).alloc); + leak type_interp::(); } @*/ //@ std::alloc::close_Allocator_full_borrow_content_(t, &(*self0.list).alloc); @@ -3730,10 +3732,10 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { { //@ let_lft 'b = k; //@ std::alloc::close_Allocator_ref::<'b, A>(t, alloc_ref); - //@ borrow_points_to_at_lft(alloc_id.lft, NonNull_ptr(unlinked_node)); + //@ borrow_points_to_at_lft(alloc_id.lft, unlinked_node.as_ptr()); //@ leak points_to_at_lft_end_token(_, _); let unlinked_node = Box::from_raw_in/*@::, &'b A>@*/(unlinked_node.as_ptr(), &self.list.alloc); - r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper(unlinked_node).element); // Some(unlinked_node_.element) + r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper/*@::, A, 'b>@*/(unlinked_node).element); // Some(unlinked_node_.element) } //@ close [f]ref_initialized_::(alloc_ref)(); //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); @@ -3744,7 +3746,9 @@ impl<'a, T, A: Allocator> CursorMut<'a, T, A> { //@ ghost_cell_mutate(ghost_cell_id, pair(self0.index, current1)); /*@ if t1 != t { + produce_type_interp::(); std::alloc::Allocator_send(t1, (*self0.list).alloc); + leak type_interp::(); assert r == Option::Some(?e); produce_type_interp::(); Send::send(t1, t, e); @@ -4076,9 +4080,9 @@ where { //@ let_lft 'b = k; //@ std::alloc::close_Allocator_ref::<'b, A>(t, alloc_ref); - //@ borrow_points_to_at_lft(alloc_id.lft, NonNull_ptr(node)); + //@ borrow_points_to_at_lft(alloc_id.lft, node.as_ptr()); //@ leak points_to_at_lft_end_token(_, _); - r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper(Box::from_raw_in/*@::, &'b A>@*/(node.as_ptr(), &self.list.alloc)).element); + r = Some(Box_into_inner_with_ref_Allocator__VeriFast_wrapper/*@::, A, 'b>@*/(Box::from_raw_in/*@::, &'b A>@*/(node.as_ptr(), &self.list.alloc)).element); } //@ close [f]ref_initialized_::(alloc_ref)(); //@ close_frac_borrow(f, ref_initialized_(alloc_ref)); diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/README.md b/verifast-proofs/alloc/raw_vec/mod.rs/README.md index 4f90c960ab74e..9e47cb7e53413 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/README.md +++ b/verifast-proofs/alloc/raw_vec/mod.rs/README.md @@ -1,6 +1,1027 @@ # Partial verification of RawVec with VeriFast -With certain [caveats](#caveats), this proof proves functional correctness as well as [soundness](https://doc.rust-lang.org/nomicon/working-with-unsafe.html) of RawVec associated functions `new_in`, `with_capacity_in`, `try_reserve`, `try_reserve_exact`, and `drop`, as well as functional correctness of RawVec associated functions `into_box`, `from_raw_parts_in`, `from_nonnull_in`, `ptr`, and `capacity`, RawVecInner associated functions `new_in`, `with_capacity_in`, `try_allocate_in`, `from_raw_parts_in`, `from_nonnull_in`, `ptr`, `non_null`, `capacity`, `current_memory`, `try_reserve`, `try_reserve_exact`, `needs_to_grow`, `set_ptr_and_cap`, `grow_amortized`, `grow_exact`, `shrink`, `shrink_unchecked`, and `deallocate`, and functions `capacity_overflow`, `new_cap`, `finish_grow`, `handle_error`, `alloc_guard`, and `layout_array`. +With certain [caveats](#caveats), this proof proves functional correctness as well as [soundness](https://doc.rust-lang.org/nomicon/working-with-unsafe.html) of RawVec associated functions `new_in`, `with_capacity_in`, `ptr`, `capacity`, `try_reserve`, `try_reserve_exact`, and `drop`, RawVecInner associated functions `new_in`, `with_capacity_in`, `try_allocate_in`, `ptr`, `non_null`, `capacity`, and `needs_to_grow`, and functions `capacity_overflow`, `handle_error`, `alloc_guard`, and `layout_array`, as well as functional correctness of RawVec associated functions `into_box`, `from_raw_parts_in`, `from_nonnull_in`, RawVecInner associated functions `from_raw_parts_in`, `from_nonnull_in`, `current_memory`, `try_reserve`, `try_reserve_exact`, `set_ptr_and_cap`, `grow_amortized`, `grow_exact`, `shrink`, `shrink_unchecked`, and `deallocate`, and functions `new_cap` and `finish_grow`. + +## What is RawVec? + +RawVec is an internal abstraction used in the implementation of Rust's Vec and VecDeque data structures. RawVec's job is to take care of the allocation, deallocation, growing, and shrinking of the buffer used by Vec and VecDeque to store the elements. It hides the complexities of correctly dealing with a zero-sized element type, with a zero-capacity buffer, with possible arithmetic overflows, etc. It does not call the allocator for zero-size buffers. + +RawVec is defined as follows: + +```rust +type Cap = core::num::niche_types::UsizeNoHighBit; + +struct RawVecInner { + ptr: Unique, + cap: Cap, + alloc: A, +} + +pub(crate) struct RawVec { + inner: RawVecInner, + _marker: PhantomData, +} +``` + +RawVecInner is like RawVec, but only generic over the allocator, not the element type. Therefore, all RawVecInner methods take the element layout as a parameter. Having this separation reduces the amount of code that needs to be monomorphized. + +The `cap` field is a `UsizeNoHighBit`, which is like a `usize` except that its high bit must always be 0. In other words, its value must always be between 0 and `isize::MAX`. This allows the compiler to use the high bit for niche optimizations. The `cap` field's value is used only for non-zero-size element types; if T is a ZST, the field value is ignored and the logical capacity is `usize::MAX`. + +## Tour of the proof + +### Predicates RawVecInner and RawVec + +The core definitions of the proof are the following: + +``` +fix logical_capacity(cap: UsizeNoHighBit, elem_size: usize) -> usize { + if elem_size == 0 { usize::MAX } else { cap.as_inner() } +} + +pred RawVecInner(t: thread_id_t, self: RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + Allocator(t, self.alloc, alloc_id) &*& + capacity == logical_capacity(self.cap, elemLayout.size()) &*& + ptr == self.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) + }; + +pred RawVec(t: thread_id_t, self: RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = + RawVecInner(t, self.inner, Layout::new::, alloc_id, ?ptr_, capacity) &*& ptr == ptr_ as *T; +``` + +These are three VeriFast *ghost declarations*. +- The first defines *fixpoint function* `logical_capacity`. A fixpoint function is a pure mathematical function; it must always terminate, must not have side-effects, and must not depend on the state of memory. +- The second defines *predicate* `RawVecInner`. A predicate is a named, parameterized *separation logic* assertion. ([Separation logic](https://en.wikipedia.org/wiki/Separation_logic) is a logic for reasoning about ownership.) Abstractly speaking, assertion `RawVecInner(t, self, elemLayout, alloc_id, ptr, capacity)` asserts exclusive ownership of RawVecInner value `self`, accessible to thread `t` (in case allocator type A is not Send), valid with respect to element layout `elemLayout`, using the allocator identified by `alloc_id`, with buffer pointer `ptr` and logical capacity `capacity`. (Note that a value `alloc` of a type A : Allocator is not a good identifier for the allocator, since `alloc` and `&alloc` are different values that denote the same allocator. This is why VeriFast introduces the separate notion of *allocator identifiers*.) Concretely, among other things, the body of the predicate asserts exclusive ownership of the `A` value `self.alloc`, accessible to thread `t` and denoting the allocator identified by `alloc_id`; it also asserts that the pointer is properly aligned for the element layout and, if the buffer size is nonzero, exclusive permission (denoted by the `alloc_block_in` predicate) to deallocate or reallocate (grow or shrink) the allocation at `ptr`, whose layout is given by pure function `Layout::repeat`. The notation `?allocLayout` denotes a *binding occurrence* of logical variable `allocLayout`. This variable is bound at this point through pattern matching against the result of `elemLayout.repeat(capacity)`. The `&*&` operator denotes *separating conjunction*; it asserts the resources asserted by the left-hand side and, *separately*, the resources asserted by the right-hand side. For example, `alloc_block_in(alloc_id, ptr, allocLayout) &*& alloc_block_in(alloc_id, ptr, allocLayout)` is unsatisfiable because it asserts *two* copies of resource `alloc_block_in(alloc_id, ptr, allocLayout)` and there can only ever be one of these, since it denotes exclusive permission to deallocate or reallocate the allocation. +- The third defines predicate `RawVec`. Abstractly speaking, assertion `RawVec(t, self, alloc_id, ptr, capacity)` asserts exclusive ownership of RawVec value `self`, accessible to thread `t`, using the allocator identified by `alloc_id`, with buffer pointer `ptr` and logical capacity `capacity`. It is defined trivially in terms of predicate `RawVecInner`. + +### RawVecInner assocated function `new_in` + +```rust +impl RawVecInner { + const fn new_in(alloc: A, align: Alignment) -> Self + /*@ + req exists::(?elemSize) &*& + thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + std::alloc::is_valid_layout(elemSize, align.as_nonzero().get()) == true; + @*/ + /*@ + ens thread_token(t) &*& + RawVecInner(t, result, Layout::from_size_align(elemSize, align.as_nonzero().get()), alloc_id, ?ptr, ?capacity) &*& + array_at_lft_(alloc_id.lft, ptr, capacity * elemSize, _) &*& + capacity * elemSize == 0; + @*/ + //@ on_unwind_ens false; + //@ safety_proof { ... } + { + let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero())); + // `cap: 0` means "unallocated". zero-sized types are ignored. + let cap = ZERO_CAP; + let r = Self { ptr, cap, alloc }; + //@ div_rem_nonneg_unique(align.as_nonzero().get(), align.as_nonzero().get(), 1, 0); + //@ let layout = Layout::from_size_align(elemSize, align.as_nonzero().get()); + //@ close RawVecInner(t, r, layout, alloc_id, _, _); + r + } +} +``` + +This function creates and returns a RawVecInner value, accessible to the current thread `t` and valid with respect to an element layout with the given alignment and any element size `elemSize` that, together with the given alignment, constitutes a [valid layout](https://doc.rust-lang.org/beta/std/alloc/struct.Layout.html) (as expressed by fixpoint function [`is_valid_layout`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/std/lib.rsspec#L703)). It uses the allocator denoted by `alloc`. The precondition (given by the `req` clause) asserts the `thread_token` for the current thread `t`, which denotes exclusive permission to access resources that are only accessible to thread `t`. Furthermore, the precondition also asserts ownership of `alloc`. The `thread_token` is also asserted by the postcondition (given by the `ens` clause), but ownership of `alloc` is not. It follows that the caller loses ownership of `alloc`, but gains ownership of the RawVecInner value. The postcondition also asserts an `array_at_lft_` resource. It denotes ownership of a possibly uninitialized region of memory at `ptr` of size `capacity * elemSize`, but only until the end of the allocator's lifetime `alloc_id.lft`. The size is always zero in this case, so this resource is really an empty box, but asserting it here allows the caller to treat the case of a zero-size allocation uniformly with the case of a nonzero-size one. + +This function never unwinds, so its unwind postcondition (given by the `on_unwind_ens` clause) is `false`. We did not in fact verify unwind paths (we invoked VeriFast with the `-ignore_unwind_paths` option), so we will ignore unwind postconditions in the remainder of this tour. + +The proof calls lemma [`div_rem_nonneg_unique`](https://github.com/verifast/verifast/blob/d44166ebe72b2846b1a4464dc0ad52fe69c647a9/bin/rust/prelude_core.rsspec#L67) to help VeriFast see that `ptr` is properly aligned for the given alignment. It then uses the `close` ghost command to wrap the resources described by predicate `RawVecInner` into a `RawVecInner` *chunk* in VeriFast's symbolic heap. In this case, the only resource described by this predicate is the ownership of `alloc`. In this case, therefore, the `close` command *consumes* the `Allocator(t, alloc, alloc_id)` chunk from the symbolic heap and *produces* a `RawVecInner(t, r, layout, alloc_id, ptr_, capacity)` chunk, for some values for `ptr_` and `capacity` which VeriFast derives from the definition of `RawVecInner`. At this point, VeriFast also checks that the facts asserted by the `RawVecInner` predicate hold. + +For every Rust function not marked as `unsafe`, VeriFast generates a specification (i.e. a precondition and a postcondition) that expresses the function's *semantic well-typedness*, which guarantees that calling this function from safe code cannot cause undefined behavior. By default, VeriFast checks that the specification written by the user trivially implies the generated specification. For function `new_in`, however, that is not the case. In that case, the user must provide a `safety_proof` clause proving that the written specification implies the generated specification. We postpone the topic of semantic well-typedness for now and will discuss these safety proofs later. + +### Function `layout_array` + +The definition of function `layout_array` in the Rust standard library is as follows: + +```rust +fn layout_array(cap: usize, elem_layout: Layout) -> Result { + elem_layout.repeat(cap).map(|(layout, _pad)| layout).map_err(|_| CapacityOverflow.into()) +} +``` + +The verified version is as follows: + +```rust +fn layout_array(cap: usize, elem_layout: Layout) -> Result +//@ req thread_token(currentThread); +/*@ +ens thread_token(currentThread) &*& + match result { + Result::Ok(layout) => elem_layout.repeat(cap) == some(pair(layout, ?stride)), + Result::Err(err) => .own(currentThread, err) + }; +@*/ +//@ safety_proof { ... } +{ + let r = match elem_layout.repeat(cap) { + Ok(info) => Ok(info.0), + Err(err) => Err(err) + }; + let r2 = match r { + Ok(l) => Ok(l), + Err(err) => { + let e = CapacityOverflow; + //@ close .own(currentThread, e); + Err(e.into()) + } + }; + r2 +} +``` + +Since VeriFast does not currently support verifying code that involves lambda expressions, we inlined the calls of `Result::map` and `Result::map_err`. We developed a tool called `refinement-checker`, that ships with VeriFast. We use it to check that the original version of `raw_vec` *refines* the verified version, which means that every behavior of the original version is also a behavior of the verified version. Therefore, if we can prove through verification that the verified version has no undefined behavior, then it follows that neither does the original version. + +The postcondition performs a case analysis on the result of the function. If the function returns `Err(err)`, the postcondition asserts *ownership* of TryReserveError value `err` accessible to the current thread, using notation `.own(currentThread, err)`. + +The function calls [`>::into`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L1697), whose precondition asserts `.own(currentThread, e)`. This chunk is created using the `close` ghost command. TryReserveErrorKind is an [enum](https://doc.rust-lang.org/std/collections/enum.TryReserveErrorKind.html); its ownership predicate (automatically generated by VeriFast) simply asserts ownership of the relevant constructor's fields. Since CapacityOverflow has no fields, in this case closing the predicate does not consume anything. + +Since function `layout_array` is not marked as `unsafe`, VeriFast generates a specification for it that expresses its semantic well-typedness. The generated specification mostly simply asserts ownership of the arguments in the precondition, and ownership of the result in the postcondition. In this case, it is as follows: +```rust +fn layout_array(cap: usize, elem_layout: Layout) -> Result +//@ req thread_token(currentThread) &*& .own(currentThread, elem_layouyt); +//@ ens thread_token(currentThread) &*& >.own(currentThread, result); +``` +(As an optimization, VeriFast skips asserting ownership of types like `usize` whose ownership it knows to be trivial.) + +VeriFast cannot automatically prove that the written specification implies the generated specification, so we provide a `safety_proof` clause: +```rust +/*@ +safety_proof { + leak .own(_t, elem_layout); + let result = call(); + match result { + Result::Ok(layout) => { std::alloc::close_Layout_own(_t, layout); } + Result::Err(e) => {} + } + close >.own(_t, result); +} +@*/ +``` +A safety proof is a block of ghost commands, one of which must be `call()`. VeriFast verifies the block against the generated specification, using the written specification to symbolically execute the `call()`. + +The safety proof for function `layout_array` first leaks the ownership of `elem_layout`. Ownership of a Layout value does not assert any resources, but unfortunately VeriFast does not currently realize that automatically so without the `leak` command it would complain that the proof might leak resources. + +After performing the `call()`, the proof must produce ownership of the Result value. It does so using the `close` command, but first it must produce ownership of the Result constructor's field. In the `Err` case, it gets it directly from the `call()`'s postcondition, but in the `Ok` case it must create it by calling lemma [`std::alloc::close_Layout_own`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L691). + +### RawVecInner associated function `try_allocate_in` + +#### Specification + +The proof for this function is rather large. Let's first focus on the specification: + +```rust +enum AllocInit { + Uninitialized, + Zeroed, +} + +impl RawVecInner { + fn try_allocate_in( + capacity: usize, + init: AllocInit, + alloc: A, + elem_layout: Layout, + ) -> Result + /*@ + req thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + t == currentThread; + @*/ + /*@ + ens thread_token(t) &*& + match result { + Result::Ok(v) => + RawVecInner(t, v, elem_layout, alloc_id, ?ptr, ?capacity_) &*& + capacity <= capacity_ &*& + match init { + raw_vec::AllocInit::Uninitialized => + array_at_lft_(alloc_id.lft, ptr, ?n, _) &*& + elem_layout.size() % elem_layout.align() != 0 || n == capacity_ * elem_layout.size(), + raw_vec::AllocInit::Zeroed => + array_at_lft(alloc_id.lft, ptr, ?n, ?bs) &*& + elem_layout.size() % elem_layout.align() != 0 || n == capacity_ * elem_layout.size() &*& + forall(bs, (eq)(0)) == true + }, + Result::Err(e) => .own(t, e) + }; + @*/ + //@ safety_proof { ... } +``` +The postcondition asserts that, if the function succeeds, it returns a RawVecInner value with a capacity that is at least the requested capacity. Furthermore, it returns ownership of a region of memory whose size `n` equals the product of the capacity and the element size, at least if the given element layout's size is a multiple of its alignment. (If that is not the case, the block will be larger, but since the only client of RawVecInner only passes element layouts that satisfy this property, we did not bother to specify that.) If an uninitialized buffer was requested, the region will be possibly uninitialized, as expressed by predicate `array_at_lft_` (notice the trailing underscore), and if a zeroed buffer was requested, the region will be initialized (as expressed by predicate `array_at_lft`) with a list of bytes `bs` that satisfies `forall(bs, (eq)(0)) == true`, expressed using fixpoint functions [`forall`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/list.rsspec#L156) and [`eq`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/prelude_core_core.rsspec#L5) defined in the VeriFast prelude. + +#### First part + +Let's go over the proof part by part. The first part calls `layout_array` to compute the layout for the allocation: +```rust +{ + //@ std::alloc::Layout_inv(elem_layout); + + // We avoid `unwrap_or_else` here because it bloats the amount of + // LLVM IR generated. + let layout = match layout_array(capacity, elem_layout) { + Ok(layout) => layout, + Err(_) => { + //@ leak .own(_, _); + //@ std::alloc::Allocator_to_own(alloc); + //@ close .own(currentThread, std::collections::TryReserveErrorKind::CapacityOverflow); + return Err(CapacityOverflow.into()) + }, + }; +``` +The proof first calls [`std::alloc::Layout_inv`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L716) to make VeriFast aware of the fact that a Layout is always valid, meaning, roughly speaking, that its size is at most `isize::MAX`. + +If `layout_array` returns an error, `alloc` will get dropped, which VeriFast treats like a call of [`std::ptr::drop_in_place`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L488). This call requires `.own(currentThread, alloc)`, but all we have (from our precondition) is `Allocator(currentThread, alloc, alloc_id)`. We convert one into the other using lemma [`std::alloc::Allocator_to_own`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L842). + +#### Second part + +The second part handles the case of a zero-size layout: +```rust +//@ let elemLayout = elem_layout; +//@ let layout_ = layout; +//@ assert elemLayout.repeat(capacity) == some(pair(layout_, ?stride)); +//@ std::alloc::Layout_repeat_some(elemLayout, capacity); +//@ mul_mono_l(elemLayout.size(), stride, capacity); +// Don't allocate here because `Drop` will not deallocate when `capacity` is 0. +if layout.size() == 0 { + let elem_layout_alignment = elem_layout.alignment(); + //@ close exists(elem_layout.size()); + let r = Self::new_in(alloc, elem_layout_alignment); + //@ RawVecInner_inv2::(); + //@ assert RawVecInner(_, _, _, _, ?ptr_, ?capacity_); + //@ mul_mono_l(0, capacity, elem_layout.size()); + //@ mul_zero(capacity, elem_layout.size()); + /*@ + match init { + raw_vec::AllocInit::Uninitialized => { close array_at_lft_(alloc_id.lft, ptr_, 0, []); } + raw_vec::AllocInit::Zeroed => { close array_at_lft(alloc_id.lft, ptr_, 0, []); } + } + @*/ + return Ok(r); +} +``` +The proof starts by loading `elem_layout` and `layout` into ghost variables. This is necessary to be able mention them +in the subsequent `assert` command, because `elem_layout` and `layout` have their address taken, so loading them accesses +the VeriFast symbolic heap, and expressions that access the symbolic heap are not allowed inside assertions. + +Next, the proof uses an `assert` command to bind `layout`'s stride (the offset between subsequent array elements) to ghost variable `stride`. +The stride is equal to the element size, except if the element size is not a multiple of the element alignment. +It then calls lemma [`std::alloc::Layout_repeat_some`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L732) +to make VeriFast aware of the properties of the stride, including that it is at least the element size. +It then calls lemma [`mul_mono_l`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/prelude_core.rsspec#L55) +to derive that `elemLayout.size() * capacity <= stride * capacity`. + +If the layout size is zero, the code calls `Self::new_in`. Its precondition requires an [`exists`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/prelude_core.rsspec#L29) chunk, so the proof creates it using a `close` command. After the +`new_in` call, it calls lemma `RawVecInner_inv2`, defined near the top of the `raw_vec.rs` proof as follows: +``` +lem RawVecInner_inv2() + req RawVecInner::(?t, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& + pointer_within_limits(ptr) == true &*& ptr as usize % elemLayout.align() == 0 &*& + 0 <= capacity &*& capacity <= usize::MAX &*& + if elemLayout.size() == 0 { capacity == usize::MAX } else { capacity <= isize::MAX }; +{ + open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); + std::num::niche_types::UsizeNoHighBit_inv(self_.cap); + close RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); +} +``` +A lemma is like a regular Rust function, except that it is defined inside an annotation, and VeriFast checks that it has no side-effects and terminates. +The proof of the lemma uses an `open` command, which consumes the specified predicate chunk and produces the predicate's definition. In this case, the +point of the `open` command is to make VeriFast aware of the facts asserted by the predicate. Any resources produced by the `open` command are consumed +again by the subsequent `close` command. + +#### Third part + +Next, the function allocates a block of memory with the computed layout: +```rust +let result = match init { + AllocInit::Uninitialized => { + let r; + //@ let alloc_ref = precreate_ref(&alloc); + //@ let k = begin_lifetime(); + unsafe { + //@ let_lft 'a = k; + //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); + r = alloc.allocate(layout); + //@ leak Allocator(_, _, _); + } + //@ end_lifetime(k); + //@ std::alloc::end_ref_Allocator_at_lifetime::(); + r + } + AllocInit::Zeroed => { ... } +}; +``` +Here, the proof is complicated by the fact that VeriFast verifies compliance with Rust's rule saying that memory pointed to by a shared reference must not be mutated while the shared reference is active. VeriFast does so by, first of all, treating the shared reference as denoting a *different place* from the original place from which the shared reference was created. While both places have the same address, they have different *provenances*. It follows that any separation logic permissions the proof has for accessing the original place do not apply to the new place. Secondly, VeriFast forces the proof to *precreate* and *initialize* the new shared reference. Initializing generally means producing permission to perform accesses through the shared reference, and taking away permission to mutate the original place (except in the presence of interior mutability). Finally, *ending* a shared reference removes all permissions to perform accesses through the shared reference and restores permission to mutate the original place. + +Since Allocator method [`allocate`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L982) takes a shared reference to the Allocator value, the line `r = alloc.allocate(layout);` implicitly creates a shared reference to variable `alloc`. VeriFast treats a shared reference creation like a call of function [`create_ref`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/aliasing.rsspec#L87). That function's precondition requires a `ref_precreated_token`, which can only be obtained using lemma +[`precreate_ref`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/aliasing.rsspec#L77). This lemma also produces a `ref_init_perm`, which the proof needs to initialize the new shared reference. The proof performs the initialization using lemma [`init_ref_Allocator_at_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L908), which produces the `ref_initialized` resource required by `create_ref`, but also consumes `Allocator::(currentThread, alloc, alloc_id)` and produces `Allocator::<&'a A>(currentThread, alloc_ref, alloc_id)`, for some lifetime variable `'a`. It also produces a token that allows the proof to recover the original `Allocator` chunk using lemma [`end_ref_Allocator_at_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L913) after lifetime `'a` has ended. The proof first uses [`begin_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L71) to create a semantic lifetime, and then uses ghost declaration `let_lft` to introduce a lifetime variable `'a` at the level of the type system, bound to semantic lifetime `k`, whose scope is the enclosing block. After the block, the proof uses [`end_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L76) to obtain the `lifetime_dead_token` needed by `end_ref_Allocator_at_lifetime`. + +#### Fourth part + +The final part of the `try_allocate_in` proof involves no new concepts: +```rust + let ptr = match result { + Ok(ptr) => ptr, + Err(_) => { + //@ std::alloc::Allocator_to_own(alloc); + let err1 = AllocError { layout, non_exhaustive: () }; + //@ std::alloc::close_Layout_own(currentThread, layout); + //@ close_tuple_0_own(currentThread); + //@ close .own(currentThread, err1); + return Err(err1.into()) + } + }; + + // Allocators currently return a `NonNull<[u8]>` whose length + // matches the size requested. If that ever changes, the capacity + // here should change to `ptr.len() / size_of::()`. + /*@ + if elem_layout.size() % elem_layout.align() == 0 { + div_rem_nonneg(elem_layout.size(), elem_layout.align()); + div_rem_nonneg(stride, elem_layout.align()); + if elem_layout.size() / elem_layout.align() < stride / elem_layout.align() { + mul_mono_l(elem_layout.size() / elem_layout.align() + 1, stride / elem_layout.align(), elem_layout.align()); + } else { + if elem_layout.size() / elem_layout.align() > stride / elem_layout.align() { + mul_mono_l(stride / elem_layout.align() + 1, elem_layout.size() / elem_layout.align(), elem_layout.align()); + assert false; + } + } + assert stride == elem_layout.size(); + } + @*/ + /*@ + if elem_layout.size() == 0 { + div_rem_nonneg_unique(elem_layout.size(), elem_layout.align(), 0, 0); + assert false; + } + @*/ + //@ mul_mono_l(1, elem_layout.size(), capacity); + let res = Self { + ptr: Unique::from(ptr.cast()), + cap: unsafe { Cap::new_unchecked(capacity) }, + alloc, + }; + //@ std::alloc::alloc_block_in_aligned(ptr.ptr.as_ptr()); + //@ close RawVecInner(t, res, elem_layout, alloc_id, ptr.ptr.as_ptr(), _); + Ok(res) +} +``` + +### Predicate `RawVecInner_share_` + +Rust knows two types of ownership: *exclusive ownership*, as in the case of a non-borrowed value or a mutable reference; and *shared ownership*, as in the case of a shared reference. +Correspondingly, predicate `RawVecInner(t, self, elemLayout, alloc_id, ptr, capacity)` denotes *exclusive ownership* of a RawVecInner value `self`, and predicate `RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity)` denotes *shared ownership* with lifetime `k` of the place of type RawVecInner at `l` and the value it stores. +Shared references are Copy and therefore duplicable; correspondingly, `RawVecInner_share_` is duplicable as well. + +``` +pred_ctor RawVecInner_frac_borrow_content(l: *RawVecInner, elemLayout: Layout, ptr: *u8, capacity: usize)(;) = + struct_RawVecInner_padding(l) &*& + (*l).ptr |-> ?u &*& + (*l).cap |-> ?cap &*& + capacity == logical_capacity(cap, elemLayout.size()) &*& + ptr == u.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) + }; + +pred RawVecInner_share_(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + [_]std::alloc::Allocator_share(k, t, &(*l).alloc, alloc_id) &*& + [_]frac_borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& ptr != 0; +``` + +Predicate `RawVecInner_share_` uses two predicates in its definition: +- `[_]Allocator_share::(k, t, l, alloc_id)` denotes shared ownership of the place of type A at `l` and the Allocator value stored at that place, at lifetime `k`, accessible to thread `t`, denoting allocator `alloc_id`. The `[_]` prefix indicates that this is a *dummy fraction*, which means it is duplicable. VeriFast automatically duplicates a dummy fraction when it is consumed, so that it is never actually removed from the symbolic heap. +- `[_]frac_borrow(k, p)` denotes a *fractured borrow* of the resources asserted by *predicate value* `p` at lifetime `k`. Owning a fractured borrow allows the owner to obtain *fractional ownership* of payload `p` until the end of lifetime `k`. Fractional ownership of a memory region is sufficient for reading, but writing requires exclusive ownership. + +Predicate `RawVecInner_share_` specifies the payload of the fractured borrow by means of *predicate constructor* `RawVecInner_frac_borrow_content`. A predicate constructor has two parameter lists. Applying a predicate constructor to arguments for the first parameter list yields a *predicate value* which can be passed as an argument to a higher-order predicate such as `frac_borrow`. + +`RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)()` asserts ownership of the struct padding at `l` and the `ptr` and `cap` fields at `l`, the latter expressed using *points-to assertions*: assertion `place |-> value` asserts ownership of place `place` and furthermore asserts that it currently stores value `value`. + +### Lemma `share_RawVecInner` + +``` +pred RawVecInner_share_end_token(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + borrow_end_token(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)) &*& + borrow_end_token(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) + }; + +lem share_RawVecInner(k: lifetime_t, l: *RawVecInner) + nonghost_callers_only + req [?q]lifetime_token(k) &*& + *l |-> ?self_ &*& + RawVecInner(?t, self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& + RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + full_borrow_into_frac(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); + borrow(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); + std::alloc::share_Allocator_full_borrow_content_(k, t, &(*l).alloc, alloc_id); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +} + +lem end_share_RawVecInner(l: *RawVecInner) + nonghost_callers_only + req RawVecInner_share_end_token(?k, ?t, l, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& [_]lifetime_dead_token(k); + ens *l |-> ?self_ &*& RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); + borrow_end(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); + std::alloc::open_Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id); + borrow_end(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close RawVecInner(t, *l, elemLayout, alloc_id, ptr, capacity); +} +``` + +Lemma `share_RawVecInner` converts exclusive ownership of a place of type RawVecInner, and the value it stores, to shared ownership at a lifetime `k`. This is possible only if the lifetime is alive, as evidenced by the existence of the `lifetime_token` for `k`. Specifically, the caller must show a *fraction* `q` (a real number greater than zero and not greater than one) of the lifetime token for `k`. (The lifetime token for a lifetime is produced by [`begin_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L71) and consumed by [`end_lifetime`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L76).) The lemma also produces a `RawVecInner_share_end_token` that the caller can pass to lemma `end_share_RawVecInner` after `k` has ended to recover exclusive ownership. + +The proof first uses the `open` command to replace the `RawVecInner` chunk in the symbolic heap by its contents. It then uses the `close` command to create a chunk whose predicate is the predicate value `RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)`. It then calls [`borrow`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L117) to create a *full borrow* at lifetime `k` with payload `RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)`. A full borrow denotes exclusive ownership of the payload, but only until the end of the lifetime. This lemma also produces a `borrow_end_token` that allows the caller to recover the payload free and clear after the lifetime has ended. The proof then uses [`full_borrow_into_frac`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L262) to turn the full borrow into a fractured borrow. + +It then uses lemma [`close_Allocator_full_borrow_content`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L852) to wrap ownership of the `alloc` field, and the Allocator value it stores, into a chunk whose predicate is `Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)`. It then creates a full borrow with this predicate value as its payload. Then it uses [`share_Allocator_full_borrow_content`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L860) to produce the `Allocator_share` chunk that the proof needs in order to create the `RawVecInner_share_` chunk. After creating this chunk as well as the `RawVecInner_share_end_token` chunk, the proof finishes by using the `leak` command to turn full ownership of the `RawVecInner_share_` chunk into a *dummy fraction chunk* `[_]RawVecInner_share_(...)`. (This command is called `leak` because it silences the leak error that VeriFast normally generates if chunks are left in the symbolic heap at the end of a function, after consuming the postcondition. Specifically: VeriFast generates a leak error only if *non-dummy-fraction* chunks are left in the symbolic heap.) + +Lemma `end_share_RawVecInner` uses lemmas [`borrow_end`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L122) and [`open_Allocator_full_borrow_content_`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/std/lib.rsspec#L856) to recover exclusive ownership of the RawVecInner object. + +### RawVecInner associated function `capacity` + +```rust +impl RawVecInner { + const fn capacity(&self, elem_size: usize) -> usize + /*@ + req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k); + @*/ + //@ ens [q]lifetime_token(k) &*& elem_size != elem_layout.size() || result == capacity; + //@ safety_proof { ... } + { + //@ open RawVecInner_share_(k, t, self, elem_layout, alloc_id, ptr, capacity); + //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity), q); + //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + let r = + if elem_size == 0 { usize::MAX } else { self.cap.as_inner() }; + //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + //@ close_frac_borrow(f, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity)); + r + } +} +``` + +This function's precondition asserts shared ownership of `self` at some lifetime `k`, and a fraction `q` of the lifetime token for `k`. The postcondition asserts the latter as well. (The function does not bother to return the shared ownership; the caller can duplicate it anyway.) + +The proof uses lemma [`open_frac_borrow`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L275) to obtain fractional ownership at some fraction `f` of the fractured borrow's payload. This is sufficient to read (but not write) the `cap` field. Notice that `open_frac_borrow` consumes a fraction, with coefficient given by its third argument, of the lifetime token for `k`. Indeed, accessing a fractured borrow is possible only while its lifetime is alive. Since the proof must return to the caller the fraction `q` of the lifetime token that it received from the caller, it is forced to call lemma [`close_frac_borrow`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L280) to recover the lifetime token fraction. This lemma consumes the payload fraction `f` that was produced by `open_frac_borrow`. + +### Lemma `init_ref_RawVecInner_` + +We first look at this lemma's specification, and then at the two parts of its proof. + +``` +lem init_ref_RawVecInner_(l: *RawVecInner) + nonghost_callers_only + req ref_init_perm(l, ?l0) &*& + [_]RawVecInner_share_(?k, ?t, l0, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k); + ens [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& + [_]frac_borrow(k, ref_initialized_(l)); +``` + +This lemma allows the caller to initialize a precreated shared reference `l` to a RawVecInner place at `l0`. (Remember that initializing a shared reference means enabling read access through the shared reference, and disabling write access to the original place.) Given shared ownership of `l0`, it produces shared ownership at `l`. It also produces proof that the shared reference has been initialized, and will remain so for the duration of lifetime `k`, in the form of a `ref_initialized(l)` token wrapped into a predicate value of type `pred(;)` using the [`ref_initialized_`](https://github.com/verifast/verifast/blob/c01806aa35cb4efb20b721a41611406acf0d784c/bin/rust/aliasing.rsspec#L40) predicate constructor, which in turn is wrapped into a fractured borrow at lifetime `k`. + +Initializing the shared reference means, among other things, *consuming* fractional ownership of the `(*l0).ptr` and `(*l0).cap` fields. But ownership of these fields is being borrowed at some lifetime. Crucially, when the lifetime ends, the resources that were borrowed must again be available to the lender. Therefore, consuming borrowed resources is allowed only if the consumption is *reversible*. More specifically, if one opens a fractured borrow using lemma [`open_frac_borrow`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L275), the *same* payload predicate that was produced by `open_frac_borrow` will be consumed by [`close_frac_borrow`](https://github.com/verifast/verifast/blob/2e7dd7a6d1aef2c9ffe7a1cedcbe28463f02440b/bin/rust/rust_belt/lifetime_logic.rsspec#L280) when the lifetime token fraction is recovered. It follows that the pair of lemmas `open_frac_borrow`/`close_frac_borrow` is not suitable for the `init_ref_RawVecInner` proof. Instead, we use the more flexible pair [`open_frac_borrow_strong_`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L346)/[`close_frac_borrow_strong_`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L355). Crucially, the payload predicate `Q` consumed by `close_frac_borrow_strong_` need *not* be the same as the predicate `P` produced by `open_frac_borrow_strong_`, provided that a *restoring lemma* can be proven that converts `Q` back into `P`. Conceptually, the lifetime logic infrastructure will call this lemma when the lifetime ends to restore the resources to the form that the lender expects. + +#### First part + +We will now look at the part of the proof that initializes the shared reference and prepares the new payload for the fractured borrow. Then we will look at the part that proves the restoring lemma, closes the fractured borrow, and produces the lemma's postcondition. + +``` +{ + open_ref_init_perm_RawVecInner(l); + open RawVecInner_share_(k, t, l0, elemLayout, alloc_id, ptr, capacity); + std::alloc::init_ref_Allocator_share(k, t, &(*l).alloc); + frac_borrow_sep(k, RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)); + open_frac_borrow_strong_( + k, + sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)), + q); + open [?f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + open [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + open [f]ref_initialized_::(&(*l).alloc)(); + let ptr_ = (*l0).ptr; + let cap_ = (*l0).cap; + init_ref_readonly(&(*l).ptr, 1/2); + init_ref_readonly(&(*l).cap, 1/2); + init_ref_padding_RawVecInner(l, 1/2); + { + pred P() = ref_padding_initialized(l); + close [1 - f]P(); + close_ref_initialized_RawVecInner(l); + open P(); + } + close [f/2]RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + close [f]ref_initialized_::>(l)(); + close scaledp(f, ref_initialized_(l))(); + close sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))(); +``` + +First, the proof converts the `ref_init_perm` for the RawVecInner object into a separate `ref_init_perm` for each of the fields (`ptr`, `cap`, and `alloc`), as well as a `ref_padding_init_perm`, giving permission to initialize the struct's padding bytes. For each struct S defined by the program, VeriFast introduces a lemma `open_ref_init_perm_S`, which performs this conversion. + +Then, it calls lemma [`init_ref_Allocator_share`](https://github.com/verifast/verifast/blob/c01806aa35cb4efb20b721a41611406acf0d784c/bin/rust/std/lib.rsspec#L881) to initialize the `alloc` field. This produces a `[_]frac_borrow(k, ref_initialized_(&(*l).alloc))` chunk, proving that this field has now been initialized, and will remain so until the end of lifetime `k`. Next, the proof calls lemma [`frac_borrow_sep`](https://github.com/verifast/verifast/blob/c01806aa35cb4efb20b721a41611406acf0d784c/bin/rust/rust_belt/lifetime_logic.rsspec#L318) to join the `[_]frac_borrow(k, RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity))` chunk (obtained from the `RawVecInner_share_` chunk) and the `[_]frac_borrow(k, ref_initialized_(&(*l).alloc))` chunk into one `[_]frac_borrow(k, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)))` chunk (expressed using predicate constructor [`sep_`](https://github.com/verifast/verifast/blob/c01806aa35cb4efb20b721a41611406acf0d784c/bin/rust/rust_belt/lifetime_logic.rsspec#L312)). This is necessary because we need both of these payloads together to obtain the (fraction of a) `ref_initialized(l)` token that we need to produce (wrapped in a `frac_borrow`) to show that the RawVecInner object has been initialized. The proof then calls [`open_frac_borrow_strong_`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L346) to get fractional access at a fraction `f` to the combined payload, in a way that will allow the proof to specify a *different* payload predicate when closing the borrow back up. + +After opening the payload predicates, the proof stores the values of the `ptr` and `cap` fields into ghost variables, for use in assertions the second part of the proof. (Expressions that access resources are not allowed inside assertions.) Then, the proof uses lemma [`init_ref_readonly`](https://github.com/verifast/verifast/blob/c01806aa35cb4efb20b721a41611406acf0d784c/bin/rust/aliasing.rsspec#L45) to initialize the `ptr` and `cap` fields, and lemma `init_ref_padding_RawVecInner` to initialize the padding. (Such a lemma is introduced by VeriFast for each struct defined by the program.) Both lemmas take as an argument a coefficient C (which must be greater than 0 and less than 1) such that if a fraction `f` of the ownership of the original place is available, a fraction `C*f` is transferred to the reference being initialized. This enables read access through the new reference, and disables write access to the original place. + +At this point, the proof has the following resources (among others): `ref_initialized(&(*l).ptr)`, `ref_initialized(&(*l).cap)`, `[f]ref_initialized(&(*l).alloc)`, and `ref_padding_initialized(l)`. It then uses `close_ref_initialized_RawVecInner` to consume `[f]ref_initialized(&(*l).ptr)`, `[f]ref_initialized(&(*l).cap)`, `[f]ref_initialized(&(*l).alloc)`, and `[f]ref_padding_initialized(l)` and produce `[f]ref_initialized(l)`. (Such a lemma is introduced by VeriFast for each struct defined by the program.) However, this lemma takes the coefficient for the fractions to be produced and consumed from the coefficient of the `ref_padding_initialized` chunk it finds in the symbolic heap. Therefore, the proof needs to *hide* a fraction `1 - f` of the `ref_padding_initialized` chunk from this lemma. It does so by introducing a local predicate `P` and temporarily wrapping that fraction inside that predicate. + +The proof then wraps up the payloads to be consumed when closing the fractured borrow into the appropriate payload predicates, but scaled by appropriate coefficients using predicate constructor [`scaledp`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L322). + +#### Second part + +``` + { + pred Ctx() = + ref_padding_end_token(l, l0, f/2) &*& [f/2]struct_RawVecInner_padding(l0) &*& [1 - f]ref_padding_initialized(l) &*& + ref_readonly_end_token(&(*l).ptr, &(*l0).ptr, f/2) &*& [f/2](*l0).ptr |-> ptr_ &*& [1 - f]ref_initialized(&(*l).ptr) &*& + ref_readonly_end_token(&(*l).cap, &(*l0).cap, f/2) &*& [f/2](*l0).cap |-> cap_ &*& [1 - f]ref_initialized(&(*l).cap); + close Ctx(); + produce_lem_ptr_chunk restore_frac_borrow( + Ctx, + sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l))), + f, + sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)))() { + open sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))(); + open scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + open scaledp(f, ref_initialized_(l))(); + open ref_initialized_::>(l)(); + open Ctx(); + open_ref_initialized_RawVecInner(l); + end_ref_readonly(&(*l).ptr); + end_ref_readonly(&(*l).cap); + end_ref_padding_RawVecInner(l); + close [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + close [f]ref_initialized_::(&(*l).alloc)(); + close [f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + } { + close_frac_borrow_strong_(); + } + } + full_borrow_into_frac(k, sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))); + frac_borrow_split(k, scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l))); + frac_borrow_implies_scaled(k, f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + frac_borrow_implies_scaled(k, f, ref_initialized_(l)); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +} +``` + +The proof is now ready to call [`close_frac_borrow_strong_`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L355), except that it still needs to prove the *restoring lemma* that shows that the new payload predicate can be converted back into the original payload predicate when the lifetime ends. More accurately, `close_frac_borrow_strong_` consumes the new payload predicate `Q` as well as a *context* predicate `Ctx`; the restoring lemma must show that `Ctx() &*& Q()` can be converted back into `[f]P()` where `P` is original payload predicate and `f` is the fraction of `P` that was produced by `open_frac_borrow_strong_`. The context predicate captures the resources that are not needed for the new payload but that will be needed to convert the new payload predicate back into the old payload predicate. The present proof uses a local predicate `Ctx` for this context predicate. + +Lemma `close_frac_borrow_strong_` requires the restoring lemma to be proven. More specifically, it requires an `is_restore_frac_borrow` chunk, which serves as evidence that a lemma satisfying *lemma type* [`restore_frac_borrow`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L351) has been proven. More generally, an `is_T` predicate is introduced by VeriFast for each *lemma type* `T`. An `is_T` chunk can be produced using a `produce_lem_ptr_chunk T(lemTypeArgs)(lemParams) { Proof } { Client }` ghost command, specifying arguments `lemTypeArgs` for the lemma type parameters, parameter names `lemParams` for the lemma parameters, and two blocks of ghost code: a block `Proof` that proves a lemma that satisfies the lemma type, and a block `Client` that can use the `is_T` chunk. The `is_T` chunk is produced at the start of `Client` and consumed at the end. (Consuming the `is_T` chunk at the end of `Client` is necessary for preventing infinite recursion through lemma pointers.) + +The proof of the restoring lemma opens the payload predicates and the context predicate. It then calls `open_ref_initialized_RawVecInner` to turn the `[f]ref_initialized(l)` chunk back into chunks for the fields and for the padding. It then uses `end_ref_readonly` and `end_ref_padding_RawVecInner` to transfer the fractional ownership at `l` back to `l0`. At that point the original payload predicates can be closed up. + +The `Client` block of the `produce_lem_ptr_chunk` ghost command simply calls `close_frac_borrow_strong_`. This lemma produces a full borrow, which is turned into a fractured borrow using [`full_borrow_into_frac`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L262). The resulting fractured borrow is split into two using [`frac_borrow_split`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L314). At this point, the proof has two fractured borrows, each of whose payloads is scaled by some coefficient. The coefficients are dropped using lemma [`frac_borrow_implies_scaled`](https://github.com/verifast/verifast/blob/e829d5aaa295ed63b278e86ce694914f983f2d65/bin/rust/rust_belt/lifetime_logic.rsspec#L324). + +### RawVecInner associated function `needs_to_grow` + +The original code for this function is as follows: +```rust +impl RawVecInner { + fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool { + additional > self.capacity(elem_layout.size()).wrapping_sub(len) + } +} +``` + +The verified version is as follows: +```rust + fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool + /*@ + req [_]RawVecInner_share_(?k, ?t, self, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& + [?qa]lifetime_token(k); + @*/ + //@ ens [qa]lifetime_token(k) &*& elem_layout != elemLayout || result == (additional > std::num::wrapping_sub_usize(capacity, len)); + //@ safety_proof { ... } + { + //@ let self_ref = precreate_ref(self); + //@ init_ref_RawVecInner_(self_ref); + //@ open_frac_borrow(k, ref_initialized_(self_ref), qa/2); + //@ open [?f]ref_initialized_::>(self_ref)(); + let r = additional > unsafe { &*(self as *const RawVecInner) }.capacity(elem_layout.size()).wrapping_sub(len); + //@ close [f]ref_initialized_::>(self_ref)(); + //@ close_frac_borrow(f, ref_initialized_(self_ref)); + r + } +} +``` + +This function takes a shared reference to a RawVecInner object and calls method `capacity` on it. The original code simply uses `self` as the target expression for the method call; the verified code instead uses `unsafe { &*(self as *const RawVecInner) }`. These two expressions are equivalent (as verified by `refinement-checker`); it's just that the former triggers special treatment by VeriFast for implicit reborrows which is sometimes convenient but not in this case. (Specifically, it causes VeriFast to treat the reborrow as a call of lemma [`reborrow_ref_implicit`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/rust_belt/aliasing.rsspec#L6), which requires a `[_]frac_borrow(?k, ref_initialized_(x))` resource, where `x` denotes the place being reborrowed, i.e. `self` in this case. However, we do not have this resource available; we only have such a resource for the new shared reference `self_ref` created by the reborrow.) + +The proof precreates a reborrow and initializes it using lemma `init_ref_RawVecInner_` (discussed above). It then opens the fractured borrow containing the `ref_initialized(self_ref)` chunk to satisfy the [`create_ref`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/aliasing.rsspec#L87) call substituted by VeriFast for the reborrow operation. + +### RawVecInner associated function `with_capacity_in` + +```rust +impl RawVecInner { + fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self + /*@ + req thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + t == currentThread; + @*/ + /*@ + ens thread_token(t) &*& + RawVecInner(t, result, elem_layout, alloc_id, ?ptr, ?capacity_) &*& + array_at_lft_(alloc_id.lft, ptr, ?n, _) &*& + elem_layout.size() % elem_layout.align() != 0 || n == elem_layout.size() * capacity_ &*& + capacity <= capacity_; + @*/ + //@ safety_proof { ... } + { + match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) { + Ok(mut this) => { + unsafe { + // Make it more obvious that a subsequent Vec::reserve(capacity) will not allocate. + //@ let k = begin_lifetime(); + //@ share_RawVecInner(k, &this); + //@ let this_ref = precreate_ref(&this); + //@ init_ref_RawVecInner_(this_ref); + //@ open_frac_borrow(k, ref_initialized_(this_ref), 1/2); + //@ open [?f]ref_initialized_::>(this_ref)(); + let needs_to_grow = this.needs_to_grow(0, capacity, elem_layout); + //@ close [f]ref_initialized_::>(this_ref)(); + //@ close_frac_borrow(f, ref_initialized_(this_ref)); + //@ end_lifetime(k); + //@ end_share_RawVecInner(&this); + //@ open_points_to(&this); + + hint::assert_unchecked(!needs_to_grow); + } + this + } + Err(err) => handle_error(err), + } + } +} +``` + +This function calls `try_allocate_in` and then creates a shared reference to the resulting RawVecInner object and calls method `needs_to_grow` on it. The proof creates a lifetime, uses lemma `share_RawVecInner` (discussed above) to temporarily (for the duration of lifetime `k`) convert exclusive ownership of `this` into shared ownership of `this`, then precreates shared reference `this_ref` to `this` and initializes it with lemma `init_ref_RawVecInner_` (discussed above), then opens the fractured borrow containing the evidence that `this_ref` has been initialized, then calls `needs_to_grow`, then closes the fractured borrow back up, ends the lifetime, and calls `end_share_RawVecInner` (discussed above) to recover exclusive ownership of `this`. + +The proof then calls ghost command `open_points_to` to turn the single `points_to` chunk expressing ownership of the `this` variable into a separate one for each field of `this` (plus one expressing ownership of the struct padding). This is needed because the subsequent `this` expression loads from this variable, and when loading from a variable of a struct type whose definition is known, VeriFast expects the individual field chunks, not the single chunk for the entire struct. While VeriFast attempts to perform this conversion automatically in some cases, unfortunately this automation logic does not yet cover all cases. + +### RawVec associated function `with_capacity_in` + +```rust +impl RawVec { + pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self + //@ req thread_token(?t) &*& Allocator(t, alloc, ?alloc_id) &*& t == currentThread; + /*@ + ens thread_token(t) &*& + RawVec(t, result, alloc_id, ?ptr, ?capacity_) &*& + array_at_lft_(alloc_id.lft, ptr, capacity_, _) &*& + capacity <= capacity_; + @*/ + /*@ + safety_proof { + std::alloc::open_Allocator_own(alloc); + let result = call(); + close >.own(_t, result); + } + @*/ + { + //@ size_align::(); + let r = Self { + inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT), + _marker: PhantomData, + }; + //@ close RawVec(t, r, alloc_id, ?ptr, ?capacity_); + //@ u8s_at_lft__to_array_at_lft_(ptr, capacity_); + r + } +} +``` + +Recall the definition of predicate `RawVec`: +``` +pred RawVec(t: thread_id_t, self: RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = + RawVecInner(t, self.inner, Layout::new::, alloc_id, ?ptr_, capacity) &*& ptr == ptr_ as *T; +``` + +This function's proof first calls lemma [`size_align`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/prelude_core.rsspec#L51) to produce the fact that the size of a Rust type is always a multiple of its alignment. This satisfies a precondition of `RawVecInner::with_capacity_in`. After calling that function, it wraps the `RawVecInner` chunk into a `RawVec` chunk and finally calls lemma [`u8s_at_lft__to_array_at_lft_`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/rust_belt/points_to_at_lifetime.rsspec#L80) to convert the `arrat_at_lft_::(alloc_id.lft, ptr as *u8, capacity * T::LAYOUT.size(), _)` chunk into an `array_at_lft_::(alloc_id.lft, ptr, capacity, _)` chunk. (Since `ptr` is of type `*T`, VeriFast infers the type argument of the `array_of_lft_` assertion in `RawVec::with_capacity_in`'s postcondition.) + +This proof also proves semantic well-typedness of this function. Its generated specification is as follows: +```rust +fn with_capacity_in(capacity: usize, alloc: A) -> Self +//@ req thread_token(?t) &*& .own(t, alloc) &*& t == currentThread; +//@ ens thread_token(t) &*& >.own(t, result); +``` + +The proof defines `RawVec.own` as follows: +``` +pred >.own(t, self_) = + RawVec(t, self_, ?alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity, _); +``` + +That is, we define ownership of a RawVec value, as far as Rust's type system is concerned, to include not just ownership of the allocator and permission to reallocate and deallocate the allocation (if any), but also ownership of the allocated memory region itself. This is necessary because dropping a RawVec deallocates the allocation, and VeriFast treats dropping a value like a call of [`std::ptr::drop_in_place`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/std/lib.rsspec#L488), which requires only `.own`. + +The safety proof, as given in the `safety_proof` clause, is straightforward: first, the `.own` chunk is turned into an `Allocator` chunk using lemma [`open_Allocator_own`](https://github.com/verifast/verifast/blob/b6b92b53bb70832774c327ba09d1914a0063a659/bin/rust/std/lib.rsspec#L846), then the implementation proof is called, and then the RawVec and `array_at_lft_` chunks produced by that proof are bundled up into a `>.own` chunk using the `close` ghost command. + +#### `>.own` proof obligations + +Defining the `.own` predicate for a struct S defined by the program being verified generally entails a number of proof obligations. Specifically, if S is Send, VeriFast looks for a lemma `S_send` that proves that +`own(t, v)` is insensitive to the thread identifier `t`: + +``` +lem RawVec_send(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Send(typeid(RawVec)) == true &*& RawVec_own::(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& RawVec_own::(t1, v); +{ + open >.own(t0, v); + open RawVec(t0, v, ?alloc_id, ?ptr, ?capacity); + RawVecInner_send_::(t1); + close RawVec(t1, v, alloc_id, ptr, capacity); + close >.own(t1, v); +} +``` + +This proof uses auxiliary lemma `RawVecInner_send_`, which in turn uses [`Allocator_send`](https://github.com/verifast/verifast/blob/735de6650f65d138ad180359d8cdf368369be5d2/bin/rust/std/lib.rsspec#L838): +``` +lem RawVecInner_send_(t1: thread_id_t) + req type_interp::() &*& is_Send(typeid(A)) == true &*& RawVecInner::(?t0, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& RawVecInner::(t1, self_, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner(t0, self_, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_send(t1, self_.alloc); + close RawVecInner(t1, self_, elemLayout, alloc_id, ptr, capacity); +} +``` + +Furthermore, since Rust considers type `RawVec` to be *covariant* in T and A, VeriFast looks for a lemma `RawVec_own_mono` that proves that `>.own(t, v)` is preserved by weakening of T and A. However, the treatment of this proof obligation is [work in progress](https://github.com/verifast/verifast/issues/610). + +(Also, if a struct S does not implement Drop and its field types are not understood by VeriFast to be trivially droppable, VeriFast looks for a lemma `S_drop` that proves that `.own(t, v)` implies ownership of the non-trivially-droppable fields of `v`. This is necessary for the safety of dropping an S value. However, `RawVec` does implement Drop; the safety of dropping a RawVec value follows from the safety of its `drop` method.) + +### RawVec associated function `capacity` + +```rust +impl RawVec { + pub(crate) const fn capacity(&self) -> usize + //@ req [_]RawVec_share_(?k, ?t, self, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); + //@ ens [q]lifetime_token(k) &*& result == capacity; + /*@ + safety_proof { + open >.share(?k, _t, self); + call(); + } + @*/ + { + //@ open RawVec_share_(k, t, self, alloc_id, ptr, capacity); + //@ let inner_ref = precreate_ref(&(*self).inner); + //@ init_ref_RawVecInner_(inner_ref); + //@ open_frac_borrow(k, ref_initialized_(inner_ref), q/2); + //@ open [?f]ref_initialized_::>(inner_ref)(); + let r = self.inner.capacity(size_of::()); + //@ close [f]ref_initialized_::>(inner_ref)(); + //@ close_frac_borrow(f, ref_initialized_(inner_ref)); + r + } +} +``` + +This function requires shared ownership of a RawVec value, expressed using predicate `RawVec_share_`, which is a trivial wrapper around `RawVecInner_share_` (discussed above): +``` +pred RawVec_share_(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = + [_]RawVecInner_share_(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); +``` + +This proof includes a `safety_proof` clause that proves semantic well-typedness, i.e. compliance with the following generated specification: +```rust +pub(crate) const fn capacity(&self) -> usize +//@ req thread_token(?_t) &*& [?q]lifetime_token(?k) &*& [_]>.share(k, _t, self) &*& _t == currentThread; +//@ ens thread_token(_t) &*& [q]lifetime_token(k); +``` +which can be obtained from the following more uniform specification by unfolding `<&'a RawVec>.own(_t, self)`: +```rust +pub(crate) const fn capacity<'a>(&'a self) -> usize +//@ req thread_token(?_t) &*& [?q]lifetime_token('a) &*& <&'a RawVec>.own(_t, self); +//@ ens thread_token(_t) &*& [q]lifetime_token('a); +``` +Indeed, exclusive ownership of a shared reference is defined as shared ownership of the referent: `<&'a T>.own(t, l) = [_].share('a, t, l)`. + +The proof defines the meaning of shared ownership of a RawVec object as follows: +``` +pred >.share(k, t, l) = [_]RawVec_share_(k, t, l, ?alloc_id, ?ptr, ?capacity); +``` + +#### `>.share` proof obligations + +If a proof defines the `.share` predicate for a struct S, a number of proof obligations are imposed upon it. First of all, VeriFast looks for a lemma `S_share_full` that proves that ownership of a +full borrow of the ownership of a RawVec object can be converted into shared ownership of the object: +``` +lem RawVec_share_full(k: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& full_borrow(k, RawVec_full_borrow_content::(t, l)) &*& [?q]lifetime_token(k) &*& ref_origin(l) == l; + ens type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [_]RawVec_share::(k, t, l) &*& [q]lifetime_token(k); +{ + let klong = open_full_borrow_strong_m(k, RawVec_full_borrow_content::(t, l), q); + open RawVec_full_borrow_content::(t, l)(); + let self_ = *l; + open >.own(t, self_); + open RawVec(t, self_, ?alloc_id, ?ptr, ?capacity); + open RawVecInner(t, self_.inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + { + pred Ctx() = + if capacity * std::mem::size_of::() == 0 { + true + } else { + Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr as *u8, allocLayout) + } &*& + array_at_lft_(alloc_id.lft, ptr, capacity, _); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)), klong, RawVec_full_borrow_content(t, l))() { + open Ctx(); + open sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))(); + std::alloc::open_Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id); + open RawVecInner_frac_borrow_content::(&(*l).inner, Layout::new::(), ptr as *u8, capacity)(); + close RawVecInner(t, (*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + close RawVec(t, *l, alloc_id, ptr, capacity); + close >.own(t, *l); + close RawVec_full_borrow_content::(t, l)(); + } { + close Ctx(); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).inner.alloc); + close RawVecInner_frac_borrow_content::(&(*l).inner, Layout::new::(), ptr as *u8, capacity)(); + close sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))(); + close_full_borrow_strong_m(klong, RawVec_full_borrow_content(t, l), sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))); + full_borrow_mono(klong, k, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))); + full_borrow_split_m(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)); + } + } + std::alloc::share_Allocator_full_borrow_content_m(k, t, &(*l).inner.alloc, alloc_id); + full_borrow_into_frac_m(k, RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)); + close RawVecInner_share_::(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + leak RawVecInner_share_::(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + close RawVec_share_::(k, t, l, alloc_id, ptr, capacity); + leak RawVec_share_::(k, t, l, alloc_id, ptr, capacity); + close RawVec_share::(k, t, l); + leak RawVec_share::(k, t, l); +} +``` + +Secondly, VeriFast looks for a lemma `S_share_mono` that proves that shared ownership is preserved by weakening (i.e. shortening) of the lifetime. (Indeed, `.share(k, t, l)` asserts shared ownership of the object at `l` until the end of lifetime `k`, but does *not* assert that shared ownership necessarily ends when `k` ends.) + +``` +lem RawVec_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVec_share::(k, t, l); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share::(k1, t, l); +{ + open RawVec_share::(k, t, l); + RawVec_share__mono(k, k1, t, l); + close RawVec_share::(k1, t, l); + leak RawVec_share::(k1, t, l); +} + +lem RawVec_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVec_share_::(k, t, l, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k1, t, l, alloc_id, ptr, capacity); +{ + open RawVec_share_(k, t, l, alloc_id, ptr, capacity); + RawVecInner_share__mono(k, k1, t, &(*l).inner); + close RawVec_share_(k1, t, l, alloc_id, ptr, capacity); + leak RawVec_share_(k1, t, l, alloc_id, ptr, capacity); +} + +lem RawVecInner_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVecInner) + req type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVecInner_share_::(k, t, l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& [_]RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); +{ + open [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_share_mono::(k, k1, t, &(*l).alloc); + frac_borrow_mono(k, k1, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + close RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); +} +``` + +Finally, if S is Sync, VeriFast looks for a lemma `S_sync` proving that `.share(k, t, l)` is insensitive to the thread identifier `t`: +``` +lem RawVec_sync(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(RawVec)) == true &*& [_]RawVec_share::(?k, ?t0, ?l); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share::(k, t1, l); +{ + open RawVec_share::(k, t0, l); + RawVec_sync_::(t1); + close RawVec_share::(k, t1, l); + leak RawVec_share::(k, t1, l); +} + +lem RawVec_sync_(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(RawVec)) == true &*& [_]RawVec_share_::(?k, ?t0, ?l, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); +{ + open RawVec_share_::(k, t0, l, alloc_id, ptr, capacity); + RawVecInner_sync_::(t1); + close RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); + leak RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); +} + +lem RawVecInner_sync_(t1: thread_id_t) + req type_interp::() &*& is_Sync(typeid(A)) == true &*& [_]RawVecInner_share_::(?k, ?t0, ?l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& [_]RawVecInner_share_::(k, t1, l, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner_share_(k, t0, l, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_sync::(t1); + close RawVecInner_share_(k, t1, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t1, l, elemLayout, alloc_id, ptr, capacity); +} +``` + +(Actually, VeriFast should also check that `.share` is consistent with the *variance* of type S; that is, it should look for a lemma that proves that `.share` is preserved by weakening of S. Checking for such a proof obligation is [future work](https://github.com/verifast/verifast/issues/610).) + +### RawVec method `drop` + +```rust +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { + /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. + fn drop(&mut self) + //@ req thread_token(?t) &*& t == currentThread &*& >.full_borrow_content(t, self)(); + //@ ens thread_token(t) &*& (*self).inner |-> ?inner &*& >.own(t, inner); + { + //@ open >.full_borrow_content(t, self)(); + //@ open >.own(t, *self); + //@ open RawVec(t, *self, ?alloc_id, ?ptr, ?capacity); + //@ array_at_lft__to_u8s_at_lft_(ptr, capacity); + //@ size_align::(); + // SAFETY: We are in a Drop impl, self.inner will not be used again. + unsafe { self.inner.deallocate(T::LAYOUT) } + } +} +``` + +(Remember that `.full_borrow_content(t, l)()` is defined as `*l |-> ?v &*& .own(t, v)`, i.e. ownership of the place `l` and the value it stores.) + +This function calls `RawVecInner::deallocate`, which the proof verifies against the following specification: +```rust +impl RawVecInner { + unsafe fn deallocate(&mut self, elem_layout: Layout) + /*@ + req thread_token(?t) &*& + *self |-> ?self0 &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr_, ?capacity) &*& + elem_layout.size() % elem_layout.align() == 0 &*& + array_at_lft_(alloc_id.lft, ptr_, capacity * elem_layout.size(), _); + @*/ + //@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); +} +``` + +The written specification for `drop` matches its generated specification. More generally, the generated specification for a `Drop::drop` method requires ownership of the object being dropped, and ensures ownership +of each field subobject. (The latter reflects the fact that after calling `drop` on a struct value, Rust drops each of its fields.) + +For this specification to hold, we are forced to define `>.own` such that it does *not* assert permission to deallocate or reallocate the allocation, nor ownership of the allocated memory region itself: +``` +pred >.own(t, self_) = + .own(t, self_.alloc) &*& + RawVecInner0(self_, ?elemLayout, ?ptr, ?capacity); + +pred RawVecInner0(self: RawVecInner, elemLayout: Layout, ptr: *u8, capacity: usize) = + capacity == logical_capacity(self.cap, elemLayout.size()) &*& + ptr == self.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) + }; +``` +This predicate asserts only ownership of the allocator, as well as the facts expressing validity of the RawVecInner object. ## Caveats @@ -9,7 +1030,7 @@ First of all, this proof was performed with the following VeriFast command-line - `-ignore_unwind_paths`: This proof ignores code that is reachable only when unwinding. - `-allow_assume`: This proof uses a number of `assume` ghost statements and `assume_correct` clauses. These must be carefully audited. -Secondly, since VeriFast uses the `rustc` frontend, which assumes a particular target architecture, VeriFast's results hold only for the used Rust toolchain's target architecture. When VeriFast reports "0 errors found" for a Rust program, it always reports the targeted architecture as well (e.g. `0 errors found (2149 statements verified) (target: x86_64-unknown-linux-gnu (LP64))`). +Secondly, since VeriFast uses the `rustc` frontend, which assumes a particular target architecture and particular [configuration options](https://doc.rust-lang.org/reference/conditional-compilation.html#set-configuration-options) (such as [`debug_assertions`](https://doc.rust-lang.org/reference/conditional-compilation.html#debug_assertions)), VeriFast's results hold only for the used Rust toolchain's target architecture and the configuration options used. When VeriFast reports "0 errors found" for a Rust program, it always reports the targeted architecture as well (e.g. `0 errors found (2149 statements verified) (target: x86_64-unknown-linux-gnu (LP64))`). Thirdly, VeriFast has a number of [known unsoundnesses](https://github.com/verifast/verifast/issues?q=is%3Aissue+is%3Aopen+label%3Aunsoundness) (reasons why VeriFast might in some cases incorrectly accept a program), including the following: - VeriFast does not yet fully verify compliance with Rust's [pointer aliasing rules](https://doc.rust-lang.org/reference/behavior-considered-undefined.html). diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs b/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs index 9accd346de30a..07e33add35cea 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs +++ b/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs @@ -67,8 +67,7 @@ enum AllocInit { type Cap = core::num::niche_types::UsizeNoHighBit; -//@ fix Cap_new_(n: usize) -> UsizeNoHighBit { UsizeNoHighBit::new_(n) } -//@ fix Cap_as_inner_(cap: UsizeNoHighBit) -> usize { UsizeNoHighBit::as_inner_(cap) } +//@ fix Cap::new(n: usize) -> UsizeNoHighBit { UsizeNoHighBit::new(n) } const ZERO_CAP: Cap = unsafe { Cap::new_unchecked(0) }; @@ -77,7 +76,7 @@ const ZERO_CAP: Cap = unsafe { Cap::new_unchecked(0) }; /// # Safety: cap must be <= `isize::MAX`. unsafe fn new_cap(cap: usize) -> Cap //@ req std::mem::size_of::() == 0 || cap <= isize::MAX; -//@ ens result == if std::mem::size_of::() == 0 { Cap_new_(0) } else { Cap_new_(cap) }; +//@ ens result == if std::mem::size_of::() == 0 { Cap::new(0) } else { Cap::new(cap) }; //@ on_unwind_ens false; { if T::IS_ZST { ZERO_CAP } else { unsafe { Cap::new_unchecked(cap) } } @@ -132,84 +131,112 @@ struct RawVecInner { /*@ fix logical_capacity(cap: UsizeNoHighBit, elem_size: usize) -> usize { - if elem_size == 0 { usize::MAX } else { Cap_as_inner_(cap) } + if elem_size == 0 { usize::MAX } else { cap.as_inner() } } -pred RawVecInner0(alloc_id: alloc_id_t, u: Unique, cap: UsizeNoHighBit, elemLayout: Layout; ptr: *u8, capacity: usize) = - capacity == logical_capacity(cap, Layout::size_(elemLayout)) &*& - ptr == NonNull_ptr(Unique::non_null_(u)) &*& - ptr != 0 &*& - ptr as usize % Layout::align_(elemLayout) == 0 &*& +pred RawVecInner(t: thread_id_t, self: RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + Allocator(t, self.alloc, alloc_id) &*& + capacity == logical_capacity(self.cap, elemLayout.size()) &*& + ptr == self.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& pointer_within_limits(ptr) == true &*& - if capacity * Layout::size_(elemLayout) == 0 { + if capacity * elemLayout.size() == 0 { true } else { - capacity * Layout::size_(elemLayout) <= isize::MAX - isize::MAX % Layout::align_(elemLayout) &*& - alloc_block_in(alloc_id, ptr, Layout::from_size_align_(capacity * Layout::size_(elemLayout), Layout::align_(elemLayout))) + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) }; -pred RawVecInner(t: thread_id_t, self: RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = - Allocator(t, self.alloc, alloc_id) &*& - RawVecInner0(alloc_id, self.ptr, self.cap, elemLayout, ptr, capacity); - -pred >.own(t, self_) = .own(t, self_.alloc); +lem RawVecInner_send_(t1: thread_id_t) + req type_interp::() &*& is_Send(typeid(A)) == true &*& RawVecInner::(?t0, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& RawVecInner::(t1, self_, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner(t0, self_, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_send(t1, self_.alloc); + close RawVecInner(t1, self_, elemLayout, alloc_id, ptr, capacity); +} + +pred RawVecInner0(self: RawVecInner, elemLayout: Layout, ptr: *u8, capacity: usize) = + capacity == logical_capacity(self.cap, elemLayout.size()) &*& + ptr == self.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) + }; + +pred >.own(t, self_) = + .own(t, self_.alloc) &*& + RawVecInner0(self_, ?elemLayout, ?ptr, ?capacity); lem RawVecInner_drop() - req raw_vec::RawVecInner_own::(?_t, ?_v); + req RawVecInner_own::(?_t, ?_v); ens std::ptr::Unique_own::(_t, _v.ptr) &*& std::num::niche_types::UsizeNoHighBit_own(_t, _v.cap) &*& .own(_t, _v.alloc); { open RawVecInner_own::(_t, _v); + open RawVecInner0(_, _, _, _); std::ptr::close_Unique_own::(_t, _v.ptr); std::num::niche_types::close_UsizeNoHighBit_own(_t, _v.cap); } lem RawVecInner_own_mono() - req type_interp::() &*& type_interp::() &*& raw_vec::RawVecInner_own::(?t, ?v) &*& is_subtype_of::() == true; - ens type_interp::() &*& type_interp::() &*& raw_vec::RawVecInner_own::(t, raw_vec::RawVecInner:: { ptr: upcast(v.ptr), cap: upcast(v.cap), alloc: upcast(v.alloc) }); + req type_interp::() &*& type_interp::() &*& RawVecInner_own::(?t, ?v) &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& RawVecInner_own::(t, RawVecInner:: { ptr: upcast(v.ptr), cap: upcast(v.cap), alloc: upcast(v.alloc) }); { - assume(false); + assume(false); // https://github.com/verifast/verifast/issues/610 } lem RawVecInner_send(t1: thread_id_t) - req type_interp::() &*& is_Send(typeid(A)) == true &*& raw_vec::RawVecInner_own::(?t0, ?v); - ens type_interp::() &*& raw_vec::RawVecInner_own::(t1, v); + req type_interp::() &*& is_Send(typeid(A)) == true &*& RawVecInner_own::(?t0, ?v); + ens type_interp::() &*& RawVecInner_own::(t1, v); { open RawVecInner_own::(t0, v); Send::send::(t0, t1, v.alloc); close RawVecInner_own::(t1, v); } -lem RawVecInner_inv() +lem_auto RawVecInner_inv() req RawVecInner::(?t, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); - ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& ptr != 0 &*& ptr as usize % Layout::align_(elemLayout) == 0 &*& 0 <= capacity &*& capacity <= usize::MAX; + ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& + ptr != 0 &*& ptr as usize % elemLayout.align() == 0 &*& + 0 <= capacity &*& capacity <= usize::MAX; { open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); - open RawVecInner0(alloc_id, self_.ptr, self_.cap, elemLayout, ptr, capacity); std::num::niche_types::UsizeNoHighBit_inv(self_.cap); - close RawVecInner0(alloc_id, self_.ptr, self_.cap, elemLayout, ptr, capacity); close RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); } lem RawVecInner_inv2() req RawVecInner::(?t, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); - ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& pointer_within_limits(ptr) == true &*& ptr as usize % Layout::align_(elemLayout) == 0 &*& 0 <= capacity &*& capacity <= usize::MAX &*& if Layout::size_(elemLayout) == 0 { capacity == usize::MAX } else { capacity <= isize::MAX }; + ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& + pointer_within_limits(ptr) == true &*& ptr as usize % elemLayout.align() == 0 &*& + 0 <= capacity &*& capacity <= usize::MAX &*& + if elemLayout.size() == 0 { capacity == usize::MAX } else { capacity <= isize::MAX }; { open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); - open RawVecInner0(alloc_id, self_.ptr, self_.cap, elemLayout, ptr, capacity); std::num::niche_types::UsizeNoHighBit_inv(self_.cap); - close RawVecInner0(alloc_id, self_.ptr, self_.cap, elemLayout, ptr, capacity); close RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); } -pred_ctor RawVecInner_frac_borrow_content(l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize)(;) = +pred_ctor RawVecInner_frac_borrow_content(l: *RawVecInner, elemLayout: Layout, ptr: *u8, capacity: usize)(;) = struct_RawVecInner_padding(l) &*& - (*l).ptr |-> ?ptr_ &*& - (*l).cap |-> ?cap_ &*& - RawVecInner0(alloc_id, ptr_, cap_, elemLayout, ptr, capacity); + (*l).ptr |-> ?u &*& + (*l).cap |-> ?cap &*& + capacity == logical_capacity(cap, elemLayout.size()) &*& + ptr == u.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) + }; pred RawVecInner_share_(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = [_]std::alloc::Allocator_share(k, t, &(*l).alloc, alloc_id) &*& - [_]frac_borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)) &*& ptr != 0; + [_]frac_borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& ptr != 0; lem RawVecInner_share__inv() req [_]RawVecInner_share_::(?k, ?t, ?l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); @@ -218,21 +245,50 @@ lem RawVecInner_share__inv() open RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); } +lem RawVecInner_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVecInner) + req type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVecInner_share_::(k, t, l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& [_]RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); +{ + open [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_share_mono::(k, k1, t, &(*l).alloc); + frac_borrow_mono(k, k1, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + close RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_::(k1, t, l, elemLayout, alloc_id, ptr, capacity); +} + +lem RawVecInner_sync_(t1: thread_id_t) + req type_interp::() &*& is_Sync(typeid(A)) == true &*& [_]RawVecInner_share_::(?k, ?t0, ?l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& [_]RawVecInner_share_::(k, t1, l, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner_share_(k, t0, l, elemLayout, alloc_id, ptr, capacity); + std::alloc::Allocator_sync::(t1); + close RawVecInner_share_(k, t1, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t1, l, elemLayout, alloc_id, ptr, capacity); +} + pred RawVecInner_share_end_token(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = borrow_end_token(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)) &*& - borrow_end_token(k, RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)); + borrow_end_token(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) + }; lem share_RawVecInner(k: lifetime_t, l: *RawVecInner) nonghost_callers_only - req [?q]lifetime_token(k) &*& *l |-> ?self_ &*& RawVecInner(?t, self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); - ens [q]lifetime_token(k) &*& [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); + req [?q]lifetime_token(k) &*& + *l |-> ?self_ &*& + RawVecInner(?t, self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& + RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); { - RawVecInner_inv::(); open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); - open_points_to(l); - close RawVecInner_frac_borrow_content::(l, elemLayout, alloc_id, ptr, capacity)(); - borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)); - full_borrow_into_frac(k, RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)); + close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + full_borrow_into_frac(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); borrow(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); std::alloc::share_Allocator_full_borrow_content_(k, t, &(*l).alloc, alloc_id); @@ -249,24 +305,97 @@ lem end_share_RawVecInner(l: *RawVecInner) open RawVecInner_share_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); borrow_end(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); std::alloc::open_Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id); - borrow_end(k, RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)); - open RawVecInner_frac_borrow_content::(l, elemLayout, alloc_id, ptr, capacity)(); + borrow_end(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); close RawVecInner(t, *l, elemLayout, alloc_id, ptr, capacity); } -lem init_ref_RawVecInner(l: *RawVecInner) +lem init_ref_RawVecInner_(l: *RawVecInner) nonghost_callers_only - req ref_init_perm(l, ?l0) &*& [_]RawVecInner_share_(?k, ?t, l0, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); - ens [q]lifetime_token(k) &*& [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& [_]frac_borrow(k, ref_initialized_(l)); + req ref_init_perm(l, ?l0) &*& + [_]RawVecInner_share_(?k, ?t, l0, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k); + ens [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& + [_]frac_borrow(k, ref_initialized_(l)); { open_ref_init_perm_RawVecInner(l); open RawVecInner_share_(k, t, l0, elemLayout, alloc_id, ptr, capacity); std::alloc::init_ref_Allocator_share(k, t, &(*l).alloc); - frac_borrow_sep(k, RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc)); - let klong = open_frac_borrow_strong(k, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc)), q); - open [?f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc))(); - open [f]RawVecInner_frac_borrow_content::(l0, elemLayout, alloc_id, ptr, capacity)(); - assert [f]RawVecInner0(alloc_id, ?ptr_, ?cap_, elemLayout, ptr, capacity); + frac_borrow_sep(k, RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)); + open_frac_borrow_strong_( + k, + sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)), + q); + open [?f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + open [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + open [f]ref_initialized_::(&(*l).alloc)(); + let ptr_ = (*l0).ptr; + let cap_ = (*l0).cap; + init_ref_readonly(&(*l).ptr, 1/2); + init_ref_readonly(&(*l).cap, 1/2); + init_ref_padding_RawVecInner(l, 1/2); + { + pred P() = ref_padding_initialized(l); + close [1 - f]P(); + close_ref_initialized_RawVecInner(l); + open P(); + } + close [f/2]RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + close [f]ref_initialized_::>(l)(); + close scaledp(f, ref_initialized_(l))(); + close sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))(); + + { + pred Ctx() = + ref_padding_end_token(l, l0, f/2) &*& [f/2]struct_RawVecInner_padding(l0) &*& [1 - f]ref_padding_initialized(l) &*& + ref_readonly_end_token(&(*l).ptr, &(*l0).ptr, f/2) &*& [f/2](*l0).ptr |-> ptr_ &*& [1 - f]ref_initialized(&(*l).ptr) &*& + ref_readonly_end_token(&(*l).cap, &(*l0).cap, f/2) &*& [f/2](*l0).cap |-> cap_ &*& [1 - f]ref_initialized(&(*l).cap); + close Ctx(); + produce_lem_ptr_chunk restore_frac_borrow( + Ctx, + sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l))), + f, + sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)))() { + open sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))(); + open scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + open scaledp(f, ref_initialized_(l))(); + open ref_initialized_::>(l)(); + open Ctx(); + open_ref_initialized_RawVecInner(l); + end_ref_readonly(&(*l).ptr); + end_ref_readonly(&(*l).cap); + end_ref_padding_RawVecInner(l); + close [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + close [f]ref_initialized_::(&(*l).alloc)(); + close [f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + } { + close_frac_borrow_strong_(); + } + } + full_borrow_into_frac(k, sep_(scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l)))); + frac_borrow_split(k, scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l))); + frac_borrow_implies_scaled(k, f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + frac_borrow_implies_scaled(k, f, ref_initialized_(l)); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +} + +lem init_ref_RawVecInner_m(l: *RawVecInner) + req type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(l, ?l0) &*& [_]RawVecInner_share_(?k, ?t, l0, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); + ens type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& [_]frac_borrow(k, ref_initialized_(l)); +{ + open_ref_init_perm_RawVecInner(l); + open RawVecInner_share_(k, t, l0, elemLayout, alloc_id, ptr, capacity); + std::alloc::init_ref_Allocator_share_m(k, t, &(*l).alloc); + frac_borrow_sep(k, RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)); + let klong = open_frac_borrow_strong_m(k, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)), q); + open [?f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + open [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + let ptr_ = (*l0).ptr; + let cap_ = (*l0).cap; open [f]ref_initialized_::(&(*l).alloc)(); std::ptr::init_ref_Unique(&(*l).ptr, 1/2); std::num::niche_types::init_ref_UsizeNoHighBit(&(*l).cap, 1/2); @@ -282,67 +411,212 @@ lem init_ref_RawVecInner(l: *RawVecInner) [f/2]ref_initialized(&(*l).alloc) &*& ref_padding_end_token(l, l0, f/2) &*& [f/2]struct_RawVecInner_padding(l0) &*& [1 - f/2]ref_padding_initialized(l) &*& std::ptr::end_ref_Unique_token(&(*l).ptr, &(*l0).ptr, f/2) &*& [f/2](*l0).ptr |-> ptr_ &*& [1 - f/2]ref_initialized(&(*l).ptr) &*& - std::num::niche_types::end_ref_UsizeNoHighBit_token(&(*l).cap, &(*l0).cap, f/2) &*& [f/2](*l0).cap |-> cap_ &*& [1 - f/2]ref_initialized(&(*l).cap) &*& - [f/2]RawVecInner0(alloc_id, ptr_, cap_, elemLayout, ptr, capacity); - produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity))), klong, f, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc)))() { - open scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)))(); - open sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity))(); + std::num::niche_types::end_ref_UsizeNoHighBit_token(&(*l).cap, &(*l0).cap, f/2) &*& [f/2](*l0).cap |-> cap_ &*& [1 - f/2]ref_initialized(&(*l).cap); + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))), klong, f, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)))() { + open scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))(); + open sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); open ref_initialized_::>(l)(); - open RawVecInner_frac_borrow_content::(l, elemLayout, alloc_id, ptr, capacity)(); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); open_ref_initialized_RawVecInner(l); open Ctx(); std::ptr::end_ref_Unique(&(*l).ptr); std::num::niche_types::end_ref_UsizeNoHighBit(&(*l).cap); end_ref_padding_RawVecInner(l); - merge_fractions RawVecInner0(alloc_id, ptr_, cap_, elemLayout, _, _); - close [f]RawVecInner_frac_borrow_content::(l0, elemLayout, alloc_id, ptr, capacity)(); + close [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); close [f]ref_initialized_::(&(*l).alloc)(); - close [f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc))(); + close [f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); } { close Ctx(); close [f/2]ref_initialized_::>(l)(); - close [f/2]RawVecInner_frac_borrow_content::(l, elemLayout, alloc_id, ptr, capacity)(); - close [f/2]sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity))(); - close scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)))(); - close_frac_borrow_strong(klong, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, alloc_id, ptr, capacity), ref_initialized_(&(*l).alloc)), scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)))); - full_borrow_mono(klong, k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)))); + close [f/2]RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close [f/2]sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + close scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))(); + close_frac_borrow_strong_m(); + full_borrow_mono(klong, k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); } } - full_borrow_into_frac(k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)))); - frac_borrow_implies_scaled(k, f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity))); - frac_borrow_split(k, ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, alloc_id, ptr, capacity)); + full_borrow_into_frac_m(k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); + frac_borrow_implies_scaled(k, f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); + frac_borrow_split(k, ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); } +pred >.share(k, t, l) = [_]RawVecInner_share_(k, t, l, _, _, _, _); + +lem RawVecInner_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVecInner) + req type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVecInner_share::(k, t, l); + ens type_interp::() &*& [_]RawVecInner_share::(k1, t, l); +{ + open RawVecInner_share::(k, t, l); + RawVecInner_share__mono(k, k1, t, l); + close RawVecInner_share::(k1, t, l); + leak RawVecInner_share::(k1, t, l); +} + +lem RawVecInner_share_full(k: lifetime_t, t: thread_id_t, l: *RawVecInner) + req type_interp::() &*& atomic_mask(MaskTop) &*& full_borrow(k, RawVecInner_full_borrow_content::(t, l)) &*& [?q]lifetime_token(k) &*& ref_origin(l) == l; + ens type_interp::() &*& atomic_mask(MaskTop) &*& [_]RawVecInner_share::(k, t, l) &*& [q]lifetime_token(k); +{ + let klong = open_full_borrow_strong_m(k, RawVecInner_full_borrow_content(t, l), q); + open RawVecInner_full_borrow_content::(t, l)(); + open >.own(t, *l); + std::alloc::open_Allocator_own((*l).alloc); + assert Allocator(_, _, ?alloc_id); + open RawVecInner0(_, ?elemLayout, ?ptr, ?capacity); + { + pred Ctx() = true; + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), klong, RawVecInner_full_borrow_content(t, l))() { + open Ctx(); + open sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + std::alloc::open_Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + std::alloc::Allocator_to_own((*l).alloc); + close RawVecInner0(*l, elemLayout, ptr, capacity); + close >.own(t, *l); + close RawVecInner_full_borrow_content::(t, l)(); + } { + close Ctx(); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); + close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + close_full_borrow_strong_m(klong, RawVecInner_full_borrow_content(t, l), sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); + full_borrow_mono(klong, k, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); + full_borrow_split_m(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + } + } + std::alloc::share_Allocator_full_borrow_content_m(k, t, &(*l).alloc, alloc_id); + full_borrow_into_frac_m(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + close RawVecInner_share_::(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_::(k, t, l, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_share::(k, t, l); + leak RawVecInner_share::(k, t, l); +} + +lem init_ref_RawVecInner(l: *RawVecInner) + req type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(l, ?l0) &*& [_]RawVecInner_share::(?k, ?t, l0) &*& [?q]lifetime_token(k); + ens type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]RawVecInner_share::(k, t, l) &*& [_]frac_borrow(k, ref_initialized_(l)); +{ + open RawVecInner_share::(k, t, l0); + open_ref_init_perm_RawVecInner(l); + open RawVecInner_share_(k, t, l0, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + std::alloc::init_ref_Allocator_share_m(k, t, &(*l).alloc); + frac_borrow_sep(k, RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)); + let klong = open_frac_borrow_strong_m(k, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)), q); + open [?f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + open [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + let ptr_ = (*l0).ptr; + let cap_ = (*l0).cap; + open [f]ref_initialized_::(&(*l).alloc)(); + std::ptr::init_ref_Unique(&(*l).ptr, 1/2); + std::num::niche_types::init_ref_UsizeNoHighBit(&(*l).cap, 1/2); + init_ref_padding_RawVecInner(l, 1/2); + { + pred P() = ref_padding_initialized(l); + close [1 - f/2]P(); + close_ref_initialized_RawVecInner(l); + open P(); + } + { + pred Ctx() = + [f/2]ref_initialized(&(*l).alloc) &*& + ref_padding_end_token(l, l0, f/2) &*& [f/2]struct_RawVecInner_padding(l0) &*& [1 - f/2]ref_padding_initialized(l) &*& + std::ptr::end_ref_Unique_token(&(*l).ptr, &(*l0).ptr, f/2) &*& [f/2](*l0).ptr |-> ptr_ &*& [1 - f/2]ref_initialized(&(*l).ptr) &*& + std::num::niche_types::end_ref_UsizeNoHighBit_token(&(*l).cap, &(*l0).cap, f/2) &*& [f/2](*l0).cap |-> cap_ &*& [1 - f/2]ref_initialized(&(*l).cap); + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))), klong, f, sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc)))() { + open scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))(); + open sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + open ref_initialized_::>(l)(); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + open_ref_initialized_RawVecInner(l); + open Ctx(); + std::ptr::end_ref_Unique(&(*l).ptr); + std::num::niche_types::end_ref_UsizeNoHighBit(&(*l).cap); + end_ref_padding_RawVecInner(l); + close [f]RawVecInner_frac_borrow_content::(l0, elemLayout, ptr, capacity)(); + close [f]ref_initialized_::(&(*l).alloc)(); + close [f]sep_(RawVecInner_frac_borrow_content(l0, elemLayout, ptr, capacity), ref_initialized_(&(*l).alloc))(); + } { + close Ctx(); + close [f/2]ref_initialized_::>(l)(); + close [f/2]RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + close [f/2]sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))(); + close scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))(); + close_frac_borrow_strong_m(); + full_borrow_mono(klong, k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); + } + } + full_borrow_into_frac_m(k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); + frac_borrow_implies_scaled(k, f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); + frac_borrow_split(k, ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_share::(k, t, l); + leak RawVecInner_share(k, t, l); +} + +lem RawVecInner_sync(t1: thread_id_t) + req type_interp::() &*& is_Sync(typeid(A)) == true &*& [_]RawVecInner_share::(?k, ?t0, ?l); + ens type_interp::() &*& [_]RawVecInner_share::(k, t1, l); +{ + open RawVecInner_share::(k, t0, l); + RawVecInner_sync_::(t1); + close RawVecInner_share::(k, t1, l); + leak RawVecInner_share(k, t1, l); +} + @*/ /*@ pred RawVec(t: thread_id_t, self: RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = - RawVecInner(t, self.inner, Layout::new_::, alloc_id, ?ptr_, capacity) &*& ptr == ptr_ as *T; + RawVecInner(t, self.inner, Layout::new::, alloc_id, ?ptr_, capacity) &*& ptr == ptr_ as *T; pred >.own(t, self_) = RawVec(t, self_, ?alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity, _); lem RawVec_own_mono() - req type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& raw_vec::RawVec_own::(?t, ?v) &*& is_subtype_of::() == true &*& is_subtype_of::() == true; - ens type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& raw_vec::RawVec_own::(t, raw_vec::RawVec:: { inner: upcast(v.inner) }); + req type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& RawVec_own::(?t, ?v) &*& is_subtype_of::() == true &*& is_subtype_of::() == true; + ens type_interp::() &*& type_interp::() &*& type_interp::() &*& type_interp::() &*& RawVec_own::(t, RawVec:: { inner: upcast(v.inner) }); { - assume(false); + assume(false); // https://github.com/verifast/verifast/issues/610 } lem RawVec_send(t1: thread_id_t) - req type_interp::() &*& type_interp::() &*& is_Send(typeid(T)) == true &*& raw_vec::RawVec_own::(?t0, ?v); - ens type_interp::() &*& type_interp::() &*& raw_vec::RawVec_own::(t1, v); + req type_interp::() &*& type_interp::() &*& is_Send(typeid(RawVec)) == true &*& RawVec_own::(?t0, ?v); + ens type_interp::() &*& type_interp::() &*& RawVec_own::(t1, v); { - assume(false); // TODO! + open >.own(t0, v); + open RawVec(t0, v, ?alloc_id, ?ptr, ?capacity); + RawVecInner_send_::(t1); + close RawVec(t1, v, alloc_id, ptr, capacity); + close >.own(t1, v); } pred RawVec_share_(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = - [_]RawVecInner_share_(k, t, &(*l).inner, Layout::new_::(), alloc_id, ptr as *u8, capacity); + [_]RawVecInner_share_(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + +lem RawVec_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVec_share_::(k, t, l, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k1, t, l, alloc_id, ptr, capacity); +{ + open RawVec_share_(k, t, l, alloc_id, ptr, capacity); + RawVecInner_share__mono(k, k1, t, &(*l).inner); + close RawVec_share_(k1, t, l, alloc_id, ptr, capacity); + leak RawVec_share_(k1, t, l, alloc_id, ptr, capacity); +} + +lem RawVec_sync_(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(RawVec)) == true &*& [_]RawVec_share_::(?k, ?t0, ?l, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); +{ + open RawVec_share_::(k, t0, l, alloc_id, ptr, capacity); + RawVecInner_sync_::(t1); + close RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); + leak RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); +} pred RawVec_share_end_token(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = - RawVecInner_share_end_token(k, t, &(*l).inner, Layout::new_::(), alloc_id, ptr as *u8, capacity); + RawVecInner_share_end_token(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); lem share_RawVec(k: lifetime_t, l: *RawVec) nonghost_callers_only @@ -368,14 +642,14 @@ lem end_share_RawVec(l: *RawVec) close RawVec(t, *l, alloc_id, ptr, capacity); } -lem init_ref_RawVec(l: *RawVec) +lem init_ref_RawVec_(l: *RawVec) nonghost_callers_only req ref_init_perm(l, ?l0) &*& [_]RawVec_share_(?k, ?t, l0, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); ens [q]lifetime_token(k) &*& [_]RawVec_share_(k, t, l, alloc_id, ptr, capacity) &*& [_]frac_borrow(k, ref_initialized_(l)); { open RawVec_share_(k, t, l0, alloc_id, ptr, capacity); open_ref_init_perm_RawVec(l); - init_ref_RawVecInner(&(*l).inner); + init_ref_RawVecInner_(&(*l).inner); close RawVec_share_(k, t, l, alloc_id, ptr, capacity); leak RawVec_share_(k, t, l, alloc_id, ptr, capacity); @@ -402,6 +676,112 @@ lem init_ref_RawVec(l: *RawVec) } } +pred >.share(k, t, l) = [_]RawVec_share_(k, t, l, ?alloc_id, ?ptr, ?capacity); + +lem RawVec_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVec_share::(k, t, l); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share::(k1, t, l); +{ + open RawVec_share::(k, t, l); + RawVec_share__mono(k, k1, t, l); + close RawVec_share::(k1, t, l); + leak RawVec_share::(k1, t, l); +} + +lem RawVec_share_full(k: lifetime_t, t: thread_id_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& full_borrow(k, RawVec_full_borrow_content::(t, l)) &*& [?q]lifetime_token(k) &*& ref_origin(l) == l; + ens type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [_]RawVec_share::(k, t, l) &*& [q]lifetime_token(k); +{ + let klong = open_full_borrow_strong_m(k, RawVec_full_borrow_content::(t, l), q); + open RawVec_full_borrow_content::(t, l)(); + let self_ = *l; + open >.own(t, self_); + open RawVec(t, self_, ?alloc_id, ?ptr, ?capacity); + open RawVecInner(t, self_.inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + { + pred Ctx() = + if capacity * std::mem::size_of::() == 0 { + true + } else { + Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr as *u8, allocLayout) + } &*& + array_at_lft_(alloc_id.lft, ptr, capacity, _); + produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)), klong, RawVec_full_borrow_content(t, l))() { + open Ctx(); + open sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))(); + std::alloc::open_Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id); + open RawVecInner_frac_borrow_content::(&(*l).inner, Layout::new::(), ptr as *u8, capacity)(); + close RawVecInner(t, (*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + close RawVec(t, *l, alloc_id, ptr, capacity); + close >.own(t, *l); + close RawVec_full_borrow_content::(t, l)(); + } { + close Ctx(); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).inner.alloc); + close RawVecInner_frac_borrow_content::(&(*l).inner, Layout::new::(), ptr as *u8, capacity)(); + close sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))(); + close_full_borrow_strong_m(klong, RawVec_full_borrow_content(t, l), sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))); + full_borrow_mono(klong, k, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity))); + full_borrow_split_m(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).inner.alloc, alloc_id), RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)); + } + } + std::alloc::share_Allocator_full_borrow_content_m(k, t, &(*l).inner.alloc, alloc_id); + full_borrow_into_frac_m(k, RawVecInner_frac_borrow_content(&(*l).inner, Layout::new::(), ptr as *u8, capacity)); + close RawVecInner_share_::(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + leak RawVecInner_share_::(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + close RawVec_share_::(k, t, l, alloc_id, ptr, capacity); + leak RawVec_share_::(k, t, l, alloc_id, ptr, capacity); + close RawVec_share::(k, t, l); + leak RawVec_share::(k, t, l); +} + +lem RawVec_sync(t1: thread_id_t) + req type_interp::() &*& type_interp::() &*& is_Sync(typeid(RawVec)) == true &*& [_]RawVec_share::(?k, ?t0, ?l); + ens type_interp::() &*& type_interp::() &*& [_]RawVec_share::(k, t1, l); +{ + open RawVec_share::(k, t0, l); + RawVec_sync_::(t1); + close RawVec_share::(k, t1, l); + leak RawVec_share::(k, t1, l); +} + +lem init_ref_RawVec(l: *RawVec) + req type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(l, ?l0) &*& [_]RawVec_share::(?k, ?t, l0) &*& [?q]lifetime_token(k); + ens type_interp::() &*& type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]RawVec_share::(k, t, l) &*& [_]frac_borrow(k, ref_initialized_(l)); +{ + open RawVec_share::(k, t, l0); + open RawVec_share_(k, t, l0, ?alloc_id, ?ptr, ?capacity); + open_ref_init_perm_RawVec(l); + init_ref_RawVecInner_m(&(*l).inner); + close RawVec_share_(k, t, l, alloc_id, ptr, capacity); + leak RawVec_share_(k, t, l, alloc_id, ptr, capacity); + close RawVec_share::(k, t, l); + leak RawVec_share::(k, t, l); + + let klong = open_frac_borrow_strong_m(k, ref_initialized_(&(*l).inner), q); + open [?f]ref_initialized_::>(&(*l).inner)(); + close_ref_initialized_RawVec(l, f); + close [f]ref_initialized_::>(l)(); + { + pred Ctx() = true; + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, scaledp(f, ref_initialized_(l)), klong, f, ref_initialized_(&(*l).inner))() { + open Ctx(); + open scaledp(f, ref_initialized_(l))(); + open ref_initialized_::>(l)(); + open_ref_initialized_RawVec(l); + close [f]ref_initialized_::>(&(*l).inner)(); + } { + close Ctx(); + close scaledp(f, ref_initialized_(l))(); + close_frac_borrow_strong_m(); + full_borrow_mono(klong, k, scaledp(f, ref_initialized_(l))); + full_borrow_into_frac_m(k, scaledp(f, ref_initialized_(l))); + frac_borrow_implies_scaled(k, f, ref_initialized_(l)); + } + } +} + @*/ impl RawVec { @@ -502,11 +882,10 @@ impl RawVec { @*/ { //@ close exists(std::mem::size_of::()); - //@ std::alloc::Layout_inv(Layout::new_::()); + //@ std::alloc::Layout_inv(Layout::new::()); //@ std::alloc::is_valid_layout_size_of_align_of::(); - //@ std::ptr::Alignment_as_nonzero__new_(std::mem::align_of::()); + //@ std::ptr::Alignment_as_nonzero_new(std::mem::align_of::()); let r = Self { inner: RawVecInner::new_in(alloc, Alignment::of::()), _marker: PhantomData }; - //@ RawVecInner_inv::(); //@ close RawVec::(t, r, alloc_id, ?ptr, ?capacity); //@ u8s_at_lft__to_array_at_lft_(ptr, capacity); r @@ -519,7 +898,12 @@ impl RawVec { #[track_caller] pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self //@ req thread_token(?t) &*& Allocator(t, alloc, ?alloc_id) &*& t == currentThread; - //@ ens thread_token(t) &*& RawVec(t, result, alloc_id, ?ptr, ?capacity_) &*& array_at_lft_(alloc_id.lft, ptr, capacity_, _) &*& capacity <= capacity_; + /*@ + ens thread_token(t) &*& + RawVec(t, result, alloc_id, ?ptr, ?capacity_) &*& + array_at_lft_(alloc_id.lft, ptr, capacity_, _) &*& + capacity <= capacity_; + @*/ /*@ safety_proof { std::alloc::open_Allocator_own(alloc); @@ -533,7 +917,6 @@ impl RawVec { inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT), _marker: PhantomData, }; - //@ RawVecInner_inv::(); //@ close RawVec(t, r, alloc_id, ?ptr, ?capacity_); //@ u8s_at_lft__to_array_at_lft_(ptr, capacity_); r @@ -583,7 +966,7 @@ impl RawVec { //@ let k = begin_lifetime(); //@ share_RawVec(k, &self); //@ let self_ref = precreate_ref(&self); - //@ init_ref_RawVec(self_ref); + //@ init_ref_RawVec_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let capacity = self.capacity(); @@ -605,7 +988,7 @@ impl RawVec { //@ close_points_to(&me); //@ share_RawVec(k0, &me); //@ let me_ref0 = precreate_ref(&me); - //@ init_ref_RawVec(me_ref0); + //@ init_ref_RawVec_(me_ref0); //@ open_frac_borrow(k0, ref_initialized_(me_ref0), 1/2); //@ open [?f0]ref_initialized_::>(me_ref0)(); let me_ref = > as core::ops::Deref>::deref(&me); @@ -629,7 +1012,8 @@ impl RawVec { //@ std::mem::array_at_lft__to_array_at_lft_MaybeUninit(slice.ptr as *T); //@ open RawVec(_, _, _, _, _); //@ open RawVecInner(_, _, _, _, _, _); - //@ open RawVecInner0(_, _, _, _, _, _); + //@ size_align::(); + //@ if len * std::mem::size_of::() != 0 { std::alloc::Layout_repeat_some_size_aligned(Layout::new::(), len); } Box::from_raw_in(slice, alloc) } } @@ -653,8 +1037,8 @@ impl RawVec { if capacity * std::mem::size_of::() == 0 { true } else { - capacity * std::mem::size_of::() <= isize::MAX - isize::MAX % std::mem::align_of::() &*& - alloc_block_in(alloc_id, ptr as *u8, Layout::from_size_align_(capacity * std::mem::size_of::(), std::mem::align_of::())) + Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr as *u8, allocLayout) }; @*/ //@ ens RawVec(t, result, alloc_id, ptr, ?capacity_) &*& capacity <= capacity_; @@ -662,15 +1046,22 @@ impl RawVec { // SAFETY: Precondition passed to the caller unsafe { let ptr = ptr.cast(); - //@ std::alloc::Layout_inv(Layout::new_::()); + //@ std::alloc::Layout_inv(Layout::new::()); /*@ if 1 <= std::mem::size_of::() { - div_rem_nonneg(isize::MAX, std::mem::align_of::()); - mul_mono_l(1, std::mem::size_of::(), capacity); + if capacity != 0 { + mul_zero(capacity, std::mem::size_of::()); + assert Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some(Layout::new::(), capacity); + div_rem_nonneg(isize::MAX, std::mem::align_of::()); + mul_mono_l(1, std::mem::size_of::(), capacity); + mul_mono_l(std::mem::size_of::(), stride, capacity); + std::alloc::Layout_inv(allocLayout); + } } @*/ let capacity = new_cap::(capacity); - //@ close exists(Layout::new_::()); + //@ close exists(Layout::new::()); let r = Self { inner: RawVecInner::from_raw_parts_in(ptr, capacity, alloc), _marker: PhantomData, @@ -689,30 +1080,34 @@ impl RawVec { pub(crate) unsafe fn from_nonnull_in(ptr: NonNull, capacity: usize, alloc: A) -> Self /*@ req Allocator(?t, alloc, ?alloc_id) &*& - NonNull_ptr(ptr) as usize % std::mem::align_of::() == 0 &*& - pointer_within_limits(NonNull_ptr(ptr)) == true &*& + ptr.as_ptr() as usize % std::mem::align_of::() == 0 &*& + pointer_within_limits(ptr.as_ptr()) == true &*& if capacity * std::mem::size_of::() == 0 { true } else { - capacity * std::mem::size_of::() <= isize::MAX - isize::MAX % std::mem::align_of::() &*& - alloc_block_in(alloc_id, NonNull_ptr(ptr) as *u8, Layout::from_size_align_(capacity * std::mem::size_of::(), std::mem::align_of::())) + Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr.as_ptr() as *u8, allocLayout) }; @*/ - //@ ens RawVec(t, result, alloc_id, NonNull_ptr(ptr), ?capacity_) &*& capacity <= capacity_; + //@ ens RawVec(t, result, alloc_id, ptr.as_ptr(), ?capacity_) &*& capacity <= capacity_; { // SAFETY: Precondition passed to the caller unsafe { let ptr = ptr.cast(); - //@ std::ptr::NonNull_ptr_nonnull(ptr); - //@ std::alloc::Layout_inv(Layout::new_::()); + //@ std::alloc::Layout_inv(Layout::new::()); /*@ - if 1 <= std::mem::size_of::() { + if 1 <= std::mem::size_of::() && capacity != 0 { + mul_zero(capacity, std::mem::size_of::()); + assert Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some(Layout::new::(), capacity); + std::alloc::Layout_inv(allocLayout); div_rem_nonneg(isize::MAX, std::mem::align_of::()); mul_mono_l(1, std::mem::size_of::(), capacity); + mul_mono_l(std::mem::size_of::(), stride, capacity); } @*/ let capacity = new_cap::(capacity); - //@ close exists(Layout::new_::()); + //@ close exists(Layout::new::()); let r = Self { inner: RawVecInner::from_nonnull_in(ptr, capacity, alloc), _marker: PhantomData }; //@ close RawVec(t, r, alloc_id, _, _); r @@ -724,13 +1119,18 @@ impl RawVec { /// be careful. #[inline] pub(crate) const fn ptr(&self) -> *mut T - //@ req [_]RawVec_share_(?k, ?t, self, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k) &*& [_]frac_borrow(k, ref_initialized_(self)); + //@ req [_]RawVec_share_(?k, ?t, self, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); //@ ens [q]lifetime_token(k) &*& result == ptr; - //@ safety_proof { assume(false); } + /*@ + safety_proof { + open >.share(?k, _t, self); + call(); + } + @*/ { //@ open RawVec_share_(k, t, self, alloc_id, ptr, capacity); //@ let inner_ref = precreate_ref(&(*self).inner); - //@ init_ref_RawVecInner(inner_ref); + //@ init_ref_RawVecInner_(inner_ref); //@ open_frac_borrow(k, ref_initialized_(inner_ref), q/2); //@ open [?f]ref_initialized_::>(inner_ref)(); let r = self.inner.ptr(); @@ -751,11 +1151,16 @@ impl RawVec { pub(crate) const fn capacity(&self) -> usize //@ req [_]RawVec_share_(?k, ?t, self, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); //@ ens [q]lifetime_token(k) &*& result == capacity; - //@ safety_proof { assume(false); } + /*@ + safety_proof { + open >.share(?k, _t, self); + call(); + } + @*/ { //@ open RawVec_share_(k, t, self, alloc_id, ptr, capacity); //@ let inner_ref = precreate_ref(&(*self).inner); - //@ init_ref_RawVecInner(inner_ref); + //@ init_ref_RawVecInner_(inner_ref); //@ open_frac_borrow(k, ref_initialized_(inner_ref), q/2); //@ open [?f]ref_initialized_::>(inner_ref)(); let r = self.inner.capacity(size_of::()); @@ -857,7 +1262,6 @@ impl RawVec { /*@ match r { Result::Ok(u) => { - RawVecInner_inv::(); close RawVec(t, self1, alloc_id, ?ptr1, ?capacity1); u8s_at_lft__to_array_at_lft_(ptr1, capacity1); } @@ -945,7 +1349,6 @@ impl RawVec { /*@ match r { Result::Ok(u) => { - RawVecInner_inv::(); close RawVec(t, self1, alloc_id, ?ptr1, ?capacity1); u8s_at_lft__to_array_at_lft_(ptr1, capacity1); } @@ -979,13 +1382,14 @@ impl RawVec { unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. fn drop(&mut self) - //@ req thread_token(?t) &*& t == currentThread &*& RawVec_full_borrow_content::(t, self)(); + //@ req thread_token(?t) &*& t == currentThread &*& >.full_borrow_content(t, self)(); //@ ens thread_token(t) &*& (*self).inner |-> ?inner &*& >.own(t, inner); { - //@ open RawVec_full_borrow_content::(t, self)(); + //@ open >.full_borrow_content(t, self)(); //@ open >.own(t, *self); //@ open RawVec(t, *self, ?alloc_id, ?ptr, ?capacity); //@ array_at_lft__to_u8s_at_lft_(ptr, capacity); + //@ size_align::(); // SAFETY: We are in a Drop impl, self.inner will not be used again. unsafe { self.inner.deallocate(T::LAYOUT) } } @@ -994,22 +1398,53 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { impl RawVecInner { #[inline] const fn new_in(alloc: A, align: Alignment) -> Self - //@ req exists::(?elemSize) &*& thread_token(?t) &*& Allocator(t, alloc, ?alloc_id) &*& std::alloc::is_valid_layout(elemSize, NonZero::get_(Alignment::as_nonzero_(align))) == true; - //@ ens thread_token(t) &*& RawVecInner(t, result, Layout::from_size_align_(elemSize, NonZero::get_(Alignment::as_nonzero_(align))), alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity * elemSize, _) &*& capacity * elemSize == 0; + /*@ + req exists::(?elemSize) &*& + thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + std::alloc::is_valid_layout(elemSize, align.as_nonzero().get()) == true; + @*/ + /*@ + ens thread_token(t) &*& + RawVecInner(t, result, Layout::from_size_align(elemSize, align.as_nonzero().get()), alloc_id, ?ptr, ?capacity) &*& + array_at_lft_(alloc_id.lft, ptr, capacity * elemSize, _) &*& + capacity * elemSize == 0; + @*/ //@ on_unwind_ens false; - //@ safety_proof { assume(false); } + /*@ + safety_proof { + leak .own(_t, align); + close exists::(0); + std::alloc::open_Allocator_own(alloc); + std::ptr::Alignment_is_power_of_2(align); + if align.as_nonzero().get() <= isize::MAX { + div_rem_nonneg(isize::MAX, align.as_nonzero().get()); + } else { + div_rem_nonneg_unique(isize::MAX, align.as_nonzero().get(), 0, isize::MAX); + } + let result = call(); + open RawVecInner(_t, result, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + std::num::niche_types::UsizeNoHighBit_inv(result.cap); + std::alloc::Layout_inv(elemLayout); + mul_zero(capacity, elemLayout.size()); + assert elemLayout == Layout::from_size_align(0, align.as_nonzero().get()); + std::alloc::Layout_size_Layout_from_size_align(0, align.as_nonzero().get()); + assert elemLayout.size() == 0; + assert capacity * elemLayout.size() == 0; + std::alloc::Allocator_to_own(result.alloc); + close RawVecInner0(result, elemLayout, ptr, capacity); + close >.own(_t, result); + leak array_at_lft_(_, _, _, _); + } + @*/ { let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero())); // `cap: 0` means "unallocated". zero-sized types are ignored. let cap = ZERO_CAP; - //@ let layout = Layout::from_size_align_(elemSize, NonZero::get_(Alignment::as_nonzero_(align))); let r = Self { ptr, cap, alloc }; - //@ div_rem_nonneg_unique(NonZero::get_(Alignment::as_nonzero_(align)), NonZero::get_(Alignment::as_nonzero_(align)), 1, 0); - //@ std::num::NonZero_usize_limits(Alignment::as_nonzero_(align)); - //@ close RawVecInner0(alloc_id, ptr, cap, layout, _, logical_capacity(cap, elemSize)); + //@ div_rem_nonneg_unique(align.as_nonzero().get(), align.as_nonzero().get(), 1, 0); + //@ let layout = Layout::from_size_align(elemSize, align.as_nonzero().get()); //@ close RawVecInner(t, r, layout, alloc_id, _, _); - //@ std::num::NonZero_usize_limits(Alignment::as_nonzero_(align)); - //@ close array::(NonNull_ptr(Unique::non_null_(ptr)), 0, nil); r } @@ -1017,9 +1452,33 @@ impl RawVecInner { #[inline] #[track_caller] fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self - //@ req thread_token(?t) &*& Allocator(t, alloc, ?alloc_id) &*& t == currentThread &*& Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0; - //@ ens thread_token(t) &*& RawVecInner(t, result, elem_layout, alloc_id, ?ptr, ?capacity_) &*& array_at_lft_(alloc_id.lft, ptr, Layout::size_(elem_layout) * capacity_, _) &*& capacity <= capacity_; - //@ safety_proof { assume(false); } + /*@ + req thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + t == currentThread; + @*/ + /*@ + ens thread_token(t) &*& + RawVecInner(t, result, elem_layout, alloc_id, ?ptr, ?capacity_) &*& + array_at_lft_(alloc_id.lft, ptr, ?n, _) &*& + elem_layout.size() % elem_layout.align() != 0 || n == elem_layout.size() * capacity_ &*& + capacity <= capacity_; + @*/ + /*@ + safety_proof { + leak .own(_t, elem_layout); + std::alloc::open_Allocator_own(alloc); + let result = call(); + open RawVecInner(_t, result, elem_layout, ?alloc_id, ?ptr, ?capacity_); + std::alloc::Allocator_to_own(result.alloc); + close RawVecInner0(result, elem_layout, ptr, capacity_); + close >.own(_t, result); + if capacity_ * elem_layout.size() != 0 { + leak alloc_block_in(_, _, _); + } + leak array_at_lft_(_, _, _, _); + } + @*/ { match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) { Ok(mut this) => { @@ -1028,7 +1487,7 @@ impl RawVecInner { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, &this); //@ let this_ref = precreate_ref(&this); - //@ init_ref_RawVecInner(this_ref); + //@ init_ref_RawVecInner_(this_ref); //@ open_frac_borrow(k, ref_initialized_(this_ref), 1/2); //@ open [?f]ref_initialized_::>(this_ref)(); let needs_to_grow = this.needs_to_grow(0, capacity, elem_layout); @@ -1038,20 +1497,6 @@ impl RawVecInner { //@ end_share_RawVecInner(&this); //@ open_points_to(&this); - //@ assert RawVecInner(t, ?this_, elem_layout, alloc_id, ?ptr, ?capacity_); - //@ assert needs_to_grow == (capacity > std::num::wrapping_sub_usize(capacity_, 0)); - //@ std::num::niche_types::UsizeNoHighBit_inv(this_.cap); - //@ assert 0 <= logical_capacity(this_.cap, Layout::size_(elem_layout)); - //@ assert 0 <= capacity; - //@ std::alloc::Layout_inv(elem_layout); - //@ assert 0 <= Layout::size_(elem_layout); - /*@ - if Layout::size_(elem_layout) == 0 { - } else { - RawVecInner_inv2::(); - std::num::niche_types::UsizeNoHighBit_as_inner__new_(capacity); - } - @*/ hint::assert_unchecked(!needs_to_grow); } this @@ -1085,7 +1530,11 @@ impl RawVecInner { alloc: A, elem_layout: Layout, ) -> Result - //@ req thread_token(?t) &*& Allocator(t, alloc, ?alloc_id) &*& t == currentThread &*& Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0; + /*@ + req thread_token(?t) &*& + Allocator(t, alloc, ?alloc_id) &*& + t == currentThread; + @*/ /*@ ens thread_token(t) &*& match result { @@ -1093,13 +1542,41 @@ impl RawVecInner { RawVecInner(t, v, elem_layout, alloc_id, ?ptr, ?capacity_) &*& capacity <= capacity_ &*& match init { - raw_vec::AllocInit::Uninitialized => array_at_lft_(alloc_id.lft, ptr, capacity_ * Layout::size_(elem_layout), _), - raw_vec::AllocInit::Zeroed => array_at_lft(alloc_id.lft, ptr, capacity_ * Layout::size_(elem_layout), ?bs) &*& forall(bs, (eq)(0)) == true + raw_vec::AllocInit::Uninitialized => + array_at_lft_(alloc_id.lft, ptr, ?n, _) &*& + elem_layout.size() % elem_layout.align() != 0 || n == capacity_ * elem_layout.size(), + raw_vec::AllocInit::Zeroed => + array_at_lft(alloc_id.lft, ptr, ?n, ?bs) &*& + elem_layout.size() % elem_layout.align() != 0 || n == capacity_ * elem_layout.size() &*& + forall(bs, (eq)(0)) == true }, Result::Err(e) => .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + /*@ + safety_proof { + leak .own(_t, init) &*& .own(_t, elem_layout); + std::alloc::open_Allocator_own(alloc); + let result = call(); + match result { + Result::Ok(r) => { + open RawVecInner(_t, r, elem_layout, ?alloc_id, ?ptr, ?capacity_); + if capacity_ * elem_layout.size() != 0 { + leak alloc_block_in(_, _, _); + } + std::alloc::Allocator_to_own(r.alloc); + close RawVecInner0(r, elem_layout, ptr, capacity_); + close >.own(_t, r); + match init { + raw_vec::AllocInit::Uninitialized => { leak array_at_lft_(_, _, _, _); } + raw_vec::AllocInit::Zeroed => { leak array_at_lft(_, _, _, _); } + } + } + Result::Err(e) => { } + } + close , std::collections::TryReserveError>>.own(_t, result); + } + @*/ { //@ std::alloc::Layout_inv(elem_layout); @@ -1115,22 +1592,26 @@ impl RawVecInner { }, }; + //@ let elemLayout = elem_layout; + //@ let layout_ = layout; + //@ assert elemLayout.repeat(capacity) == some(pair(layout_, ?stride)); + //@ std::alloc::Layout_repeat_some(elemLayout, capacity); + //@ mul_mono_l(elemLayout.size(), stride, capacity); // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. if layout.size() == 0 { let elem_layout_alignment = elem_layout.alignment(); - //@ close exists(Layout::size_(elem_layout)); + //@ close exists(elem_layout.size()); let r = Self::new_in(alloc, elem_layout_alignment); //@ RawVecInner_inv2::(); //@ assert RawVecInner(_, _, _, _, ?ptr_, ?capacity_); - //@ mul_zero(capacity, Layout::size_(elem_layout)); - //@ if Layout::size_(elem_layout) != 0 { assert capacity == 0; } + //@ mul_mono_l(0, capacity, elem_layout.size()); + //@ mul_zero(capacity, elem_layout.size()); /*@ match init { raw_vec::AllocInit::Uninitialized => { close array_at_lft_(alloc_id.lft, ptr_, 0, []); } raw_vec::AllocInit::Zeroed => { close array_at_lft(alloc_id.lft, ptr_, 0, []); } } @*/ - //@ std::num::niche_types::UsizeNoHighBit_inv(r.cap); return Ok(r); } @@ -1147,7 +1628,7 @@ impl RawVecInner { unsafe { //@ let_lft 'a = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.allocate(layout); + r = alloc.allocate/*@::@*/(layout); //@ leak Allocator(_, _, _); } //@ end_lifetime(k); @@ -1162,7 +1643,7 @@ impl RawVecInner { { //@ let_lft 'a = k; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.allocate_zeroed(layout); + r = alloc.allocate_zeroed/*@::@*/(layout); //@ leak Allocator(_, _, _); } //@ end_lifetime(k); @@ -1185,118 +1666,147 @@ impl RawVecInner { // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity // here should change to `ptr.len() / size_of::()`. - //@ std::alloc::Layout_inv(elem_layout); - //@ assert 0 <= Layout::size_(elem_layout); - //@ assert Layout::size_(elem_layout) != 0; - //@ mul_mono_l(1, Layout::size_(elem_layout), capacity); - //@ std::alloc::Layout_inv(layout); - //@ assert Layout::size_(layout) == capacity * Layout::size_(elem_layout); - //@ div_rem_nonneg(isize::MAX, Layout::align_(elem_layout)); + /*@ + if elem_layout.size() % elem_layout.align() == 0 { + div_rem_nonneg(elem_layout.size(), elem_layout.align()); + div_rem_nonneg(stride, elem_layout.align()); + if elem_layout.size() / elem_layout.align() < stride / elem_layout.align() { + mul_mono_l(elem_layout.size() / elem_layout.align() + 1, stride / elem_layout.align(), elem_layout.align()); + } else { + if elem_layout.size() / elem_layout.align() > stride / elem_layout.align() { + mul_mono_l(stride / elem_layout.align() + 1, elem_layout.size() / elem_layout.align(), elem_layout.align()); + assert false; + } + } + assert stride == elem_layout.size(); + } + @*/ + /*@ + if elem_layout.size() == 0 { + div_rem_nonneg_unique(elem_layout.size(), elem_layout.align(), 0, 0); + assert false; + } + @*/ + //@ mul_mono_l(1, elem_layout.size(), capacity); let res = Self { ptr: Unique::from(ptr.cast()), cap: unsafe { Cap::new_unchecked(capacity) }, alloc, }; - //@ assert res.ptr == Unique::from_non_null_(NonNull::new_(NonNull_ptr(ptr.ptr))); - //@ std::ptr::NonNull_ptr_nonnull(ptr.ptr); - //@ std::ptr::Unique_non_null__from_non_null_(ptr.ptr); - //@ std::ptr::NonNull_new_ptr(ptr.ptr); - //@ assert Unique::non_null_(res.ptr) == ptr.ptr; - //@ assert Layout::align_(layout) == Layout::align_(elem_layout); - //@ std::alloc::alloc_block_in_aligned(NonNull_ptr(ptr.ptr)); - //@ close RawVecInner0(alloc_id, res.ptr, res.cap, elem_layout, _, _); - //@ close RawVecInner(t, res, elem_layout, alloc_id, NonNull_ptr(ptr.ptr), _); + //@ std::alloc::alloc_block_in_aligned(ptr.ptr.as_ptr()); + //@ close RawVecInner(t, res, elem_layout, alloc_id, ptr.ptr.as_ptr(), _); Ok(res) } #[inline] unsafe fn from_raw_parts_in(ptr: *mut u8, cap: Cap, alloc: A) -> Self /*@ - req exists(?elem_layout) &*& + req exists::(?elem_layout) &*& Allocator(?t, alloc, ?alloc_id) &*& ptr != 0 &*& - ptr as usize % Layout::align_(elem_layout) == 0 &*& - if Cap_as_inner_(cap) * Layout::size_(elem_layout) == 0 { + ptr as usize % elem_layout.align() == 0 &*& + if cap.as_inner() * elem_layout.size() == 0 { true } else { - Cap_as_inner_(cap) * Layout::size_(elem_layout) <= isize::MAX - isize::MAX % Layout::align_(elem_layout) &*& - alloc_block_in(alloc_id, ptr, Layout::from_size_align_(Cap_as_inner_(cap) * Layout::size_(elem_layout), Layout::align_(elem_layout))) + elem_layout.repeat(cap.as_inner()) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) }; @*/ - //@ ens RawVecInner(t, result, elem_layout, alloc_id, ptr, logical_capacity(cap, Layout::size_(elem_layout))); + //@ ens RawVecInner(t, result, elem_layout, alloc_id, ptr, logical_capacity(cap, elem_layout.size())); { let r = Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }; - //@ close RawVecInner0(alloc_id, r.ptr, r.cap, elem_layout, ptr, logical_capacity(cap, Layout::size_(elem_layout))); - //@ close RawVecInner(t, r, elem_layout, alloc_id, ptr, logical_capacity(cap, Layout::size_(elem_layout))); + //@ close RawVecInner(t, r, elem_layout, alloc_id, ptr, logical_capacity(cap, elem_layout.size())); r } #[inline] unsafe fn from_nonnull_in(ptr: NonNull, cap: Cap, alloc: A) -> Self /*@ - req exists(?elem_layout) &*& + req exists::(?elem_layout) &*& Allocator(?t, alloc, ?alloc_id) &*& - NonNull_ptr(ptr) as usize % Layout::align_(elem_layout) == 0 &*& - pointer_within_limits(NonNull_ptr(ptr)) == true &*& - if Cap_as_inner_(cap) * Layout::size_(elem_layout) == 0 { + ptr.as_ptr() as usize % elem_layout.align() == 0 &*& + pointer_within_limits(ptr.as_ptr()) == true &*& + if cap.as_inner() * elem_layout.size() == 0 { true } else { - Cap_as_inner_(cap) * Layout::size_(elem_layout) <= isize::MAX - isize::MAX % Layout::align_(elem_layout) &*& - alloc_block_in(alloc_id, NonNull_ptr(ptr), Layout::from_size_align_(Cap_as_inner_(cap) * Layout::size_(elem_layout), Layout::align_(elem_layout))) + elem_layout.repeat(cap.as_inner()) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr.as_ptr(), allocLayout) }; @*/ - //@ ens RawVecInner(t, result, elem_layout, alloc_id, NonNull_ptr(ptr), logical_capacity(cap, Layout::size_(elem_layout))); + //@ ens RawVecInner(t, result, elem_layout, alloc_id, ptr.as_ptr(), logical_capacity(cap, elem_layout.size())); { - //@ std::ptr::NonNull_ptr_nonnull(ptr); let r = Self { ptr: Unique::from(ptr), cap, alloc }; - //@ close RawVecInner0(alloc_id, r.ptr, r.cap, elem_layout, _, _); //@ close RawVecInner(t, r, elem_layout, alloc_id, _, _); r } #[inline] const fn ptr(&self) -> *mut T - //@ req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k) &*& [_]frac_borrow(k, ref_initialized_(self)); + /*@ + req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k); + @*/ //@ ens [q]lifetime_token(k) &*& result == ptr as *T; - //@ safety_proof { assume(false); } + /*@ + safety_proof { + open >.share(?k, _t, self); + call(); + } + @*/ { //@ RawVecInner_share__inv::(); - let r = self.non_null::(); + //@ let self_ref = precreate_ref(self); + //@ init_ref_RawVecInner_(self_ref); + //@ open_frac_borrow(k, ref_initialized_(self_ref), q/2); + //@ open [?f]ref_initialized_::>(self_ref)(); + let r = unsafe { &*(self as *const RawVecInner) }.non_null::(); + //@ close [f]ref_initialized_::>(self_ref)(); + //@ close_frac_borrow(f, ref_initialized_(self_ref)); r.as_ptr() } #[inline] const fn non_null(&self) -> NonNull //@ req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); - //@ ens [q]lifetime_token(k) &*& result == NonNull::new_(ptr as *T); - //@ safety_proof { assume(false); } + //@ ens [q]lifetime_token(k) &*& result.as_ptr() == ptr as *T; + /*@ + safety_proof { + open >.share(?k, _t, self); + let result = call(); + std::ptr::close_NonNull_own::(_t, result); + } + @*/ { //@ open RawVecInner_share_(k, t, self, elem_layout, alloc_id, ptr, capacity); - //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, alloc_id, ptr, capacity), q); - //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ open RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); + //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity), q); + //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); let r = self.ptr.cast().as_non_null_ptr(); - //@ close [f]RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); - //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ close_frac_borrow(f, RawVecInner_frac_borrow_content(self, elem_layout, alloc_id, ptr, capacity)); + //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + //@ close_frac_borrow(f, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity)); r } #[inline] const fn capacity(&self, elem_size: usize) -> usize - //@ req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& elem_size == Layout::size_(elem_layout) &*& [?q]lifetime_token(k); - //@ ens [q]lifetime_token(k) &*& result == capacity; - //@ safety_proof { assume(false); } + /*@ + req [_]RawVecInner_share_(?k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k); + @*/ + //@ ens [q]lifetime_token(k) &*& elem_size != elem_layout.size() || result == capacity; + /*@ + safety_proof { + open >.share(?k, _t, self); + call(); + } + @*/ { //@ open RawVecInner_share_(k, t, self, elem_layout, alloc_id, ptr, capacity); - //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, alloc_id, ptr, capacity), q); - //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ open RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); + //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity), q); + //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); let r = if elem_size == 0 { usize::MAX } else { self.cap.as_inner() }; - //@ close [f]RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); - //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ close_frac_borrow(f, RawVecInner_frac_borrow_content(self, elem_layout, alloc_id, ptr, capacity)); + //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + //@ close_frac_borrow(f, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity)); r } @@ -1307,29 +1817,32 @@ impl RawVecInner { #[inline] fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull, Layout)> - //@ req [_]RawVecInner_share_(?k, ?t, self, elem_layout, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); + /*@ + req [_]RawVecInner_share_(?k, ?t, self, elem_layout, ?alloc_id, ?ptr, ?capacity) &*& + [?q]lifetime_token(k) &*& elem_layout.size() % elem_layout.align() == 0; + @*/ /*@ ens [q]lifetime_token(k) &*& - if capacity * Layout::size_(elem_layout) == 0 { + if capacity * elem_layout.size() == 0 { result == Option::None } else { - result == Option::Some(std_tuple_2_::, Layout> {0: NonNull::new_(ptr), 1: Layout::from_size_align_(capacity * Layout::size_(elem_layout), Layout::align_(elem_layout))}) + result == Option::Some(?r) &*& + r.0.as_ptr() == ptr &*& + r.1 == Layout::from_size_align(capacity * elem_layout.size(), elem_layout.align()) }; @*/ //@ on_unwind_ens false; - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { //@ open RawVecInner_share_(k, t, self, elem_layout, alloc_id, ptr, capacity); - //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, alloc_id, ptr, capacity), q); - //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ open RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); + //@ open_frac_borrow(k, RawVecInner_frac_borrow_content(self, elem_layout, ptr, capacity), q); + //@ open [?f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); //@ std::num::niche_types::UsizeNoHighBit_inv((*self).cap); //@ std::alloc::Layout_inv(elem_layout); - //@ mul_zero(capacity, Layout::size_(elem_layout)); + //@ mul_zero(capacity, elem_layout.size()); if elem_layout.size() == 0 || self.cap.as_inner() == 0 { - //@ close [f]RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); - //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ close_frac_borrow(f, RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)); + //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + //@ close_frac_borrow(f, RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)); None } else { // We could use Layout::array here which ensures the absence of isize and usize overflows @@ -1337,15 +1850,18 @@ impl RawVecInner { // has already been allocated so we know it can't overflow and currently Rust does not // support such types. So we can do better by skipping some checks and avoid an unwrap. unsafe { - //@ is_power_of_2_pos(Layout::align_(elem_layout)); - //@ div_rem_nonneg(isize::MAX, Layout::align_(elem_layout)); + //@ let elemLayout = elem_layout; + //@ assert elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, capacity); + //@ std::alloc::Layout_inv(allocLayout); + //@ is_power_of_2_pos(elem_layout.align()); + //@ div_rem_nonneg(isize::MAX, elem_layout.align()); let alloc_size = elem_layout.size().unchecked_mul(self.cap.as_inner()); let layout = Layout::from_size_align_unchecked(alloc_size, elem_layout.align()); let ptr_ = self.ptr.into(); - //@ std::ptr::NonNull_new_ptr(std::ptr::Unique::non_null_((*self).ptr)); - //@ close [f]RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr, capacity); - //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)(); - //@ close_frac_borrow(f, RawVecInner_frac_borrow_content::(self, elem_layout, alloc_id, ptr, capacity)); + //@ std::ptr::NonNull_new_as_ptr((*self).ptr.as_non_null_ptr()); + //@ close [f]RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)(); + //@ close_frac_borrow(f, RawVecInner_frac_borrow_content::(self, elem_layout, ptr, capacity)); Some((ptr_, layout)) } } @@ -1393,28 +1909,31 @@ impl RawVecInner { ) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _); + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _); @*/ /*@ ens thread_token(t) &*& *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& len > capacity0 || len + additional <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let needs_to_grow = self.needs_to_grow(len, additional, elem_layout); @@ -1430,7 +1949,7 @@ impl RawVecInner { //@ let k2 = begin_lifetime(); //@ share_RawVecInner(k2, self); //@ let self_ref2 = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref2); + //@ init_ref_RawVecInner_(self_ref2); //@ open_frac_borrow(k2, ref_initialized_(self_ref2), 1/2); //@ open [?f2]ref_initialized_::>(self_ref2)(); let needs_to_grow2 = self.needs_to_grow(len, additional, elem_layout); @@ -1462,28 +1981,31 @@ impl RawVecInner { ) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _); + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _); @*/ /*@ ens thread_token(t) &*& *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& len > capacity0 || len + additional <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let needs_to_grow = self.needs_to_grow(len, additional, elem_layout); @@ -1499,7 +2021,7 @@ impl RawVecInner { //@ let k2 = begin_lifetime(); //@ share_RawVecInner(k2, self); //@ let self_ref2 = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref2); + //@ init_ref_RawVecInner_(self_ref2); //@ open_frac_borrow(k2, ref_initialized_(self_ref2), 1/2); //@ open [?f2]ref_initialized_::>(self_ref2)(); let needs_to_grow2 = self.needs_to_grow(len, additional, elem_layout); @@ -1526,19 +2048,35 @@ impl RawVecInner { #[inline] fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool - //@ req [_]RawVecInner_share_(?k, ?t, self, elem_layout, ?alloc_id, ?ptr, ?capacity) &*& [_]frac_borrow(k, ref_initialized_(self)) &*& [?qa]lifetime_token(k); - //@ ens [qa]lifetime_token(k) &*& result == (additional > std::num::wrapping_sub_usize(capacity, len)); - //@ safety_proof { assume(false); } + /*@ + req [_]RawVecInner_share_(?k, ?t, self, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& + [?qa]lifetime_token(k); + @*/ + //@ ens [qa]lifetime_token(k) &*& elem_layout != elemLayout || result == (additional > std::num::wrapping_sub_usize(capacity, len)); + /*@ + safety_proof { + leak .own(_t, elem_layout); + open >.share(?k, _t, self); + call(); + } + @*/ { - additional > self.capacity(elem_layout.size()).wrapping_sub(len) + //@ let self_ref = precreate_ref(self); + //@ init_ref_RawVecInner_(self_ref); + //@ open_frac_borrow(k, ref_initialized_(self_ref), qa/2); + //@ open [?f]ref_initialized_::>(self_ref)(); + let r = additional > unsafe { &*(self as *const RawVecInner) }.capacity(elem_layout.size()).wrapping_sub(len); + //@ close [f]ref_initialized_::>(self_ref)(); + //@ close_frac_borrow(f, ref_initialized_(self_ref)); + r } #[inline] unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) //@ req (*self).ptr |-> _ &*& (*self).cap |-> _ &*& cap <= isize::MAX; - //@ ens (*self).ptr |-> Unique::from_non_null_::(ptr.ptr) &*& (*self).cap |-> UsizeNoHighBit::new_(cap); + //@ ens (*self).ptr |-> Unique::from_non_null::(ptr.ptr) &*& (*self).cap |-> UsizeNoHighBit::new(cap); { - //@ std::ptr::NonNull_new_ptr(ptr.ptr); + //@ std::ptr::NonNull_new_as_ptr(ptr.ptr); // Allocators currently return a `NonNull<[u8]>` whose length matches // the size requested. If that ever changes, the capacity here should // change to `ptr.len() / size_of::()`. @@ -1554,9 +2092,10 @@ impl RawVecInner { ) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& capacity0 < len + additional; @*/ /*@ @@ -1564,14 +2103,16 @@ impl RawVecInner { *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& len + additional <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { // This is ensured by the calling contexts. if cfg!(debug_assertions) { //~allow_dead_code // FIXME: The source location associated with a dead `else` branch is the entire `if` statement :-( @@ -1598,11 +2139,12 @@ impl RawVecInner { let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap0); let new_layout = layout_array(cap, elem_layout)?; + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let current_memory = self.current_memory(elem_layout); @@ -1612,28 +2154,37 @@ impl RawVecInner { //@ end_share_RawVecInner(self); //@ open RawVecInner(t, ?self01, elem_layout, alloc_id, ptr0, capacity0); - //@ open RawVecInner0(alloc_id, ?ptr0_, ?cap0_, elem_layout, ptr0, capacity0); - //@ assert NonNull_ptr(Unique::non_null_(ptr0_)) == ptr0; + //@ let ptr0_ = self01.ptr; + //@ let cap0_ = self01.cap; + //@ assert ptr0_.as_non_null_ptr().as_ptr() == ptr0; //@ std::alloc::Layout_inv(elem_layout); - //@ std::alloc::Layout_size__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); - //@ std::alloc::Layout_align__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); + /*@ + if capacity0 * elem_layout.size() != 0 { + let elemLayout = elem_layout; + assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); + std::alloc::Layout_inv(allocLayout); + } + @*/ + + //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); + //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - //@ if capacity0 * Layout::size_(elem_layout) == 0 { leak ptr0[..0] |-> _; } + //@ if capacity0 * elem_layout.size() == 0 { leak ptr0[..0] |-> _; } //@ std::num::niche_types::UsizeNoHighBit_inv(self01.cap); //@ assert cap >= len + additional; - //@ mul_mono_l(1, Layout::size_(elem_layout), Cap_as_inner_(self01.cap)); - //@ mul_mono_l(1, Layout::size_(elem_layout), cap); - //@ assert Layout::size_(new_layout) == cap * Layout::size_(elem_layout); - //@ assert cap <= Layout::size_(new_layout); - //@ mul_mono_l(Cap_as_inner_(self01.cap), cap, Layout::size_(elem_layout)); + //@ mul_mono_l(1, elem_layout.size(), self01.cap.as_inner()); + //@ mul_mono_l(1, elem_layout.size(), cap); + //@ assert new_layout.size() == cap * elem_layout.size(); + //@ assert cap <= new_layout.size(); + //@ mul_mono_l(self01.cap.as_inner(), cap, elem_layout.size()); //@ open_points_to(self); match core::ops::Try::branch(finish_grow(new_layout, current_memory, &mut self.alloc)) { core::ops::ControlFlow::Break(residual) => { //@ let self1 = *self; - //@ close RawVecInner0(alloc_id, ptr0_, cap0_, elem_layout, ptr0, capacity0); //@ close RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0); core::ops::FromResidual::from_residual(residual) } @@ -1642,15 +2193,13 @@ impl RawVecInner { unsafe { self.set_ptr_and_cap(ptr, cap); //@ let self1 = *self; - //@ std::ptr::NonNull_ptr_nonnull(ptr.ptr); - //@ std::alloc::alloc_block_in_aligned(NonNull_ptr(ptr.ptr)); - //@ std::num::niche_types::UsizeNoHighBit_as_inner__new_(cap); - //@ mul_zero(Layout::size_(elem_layout), cap); - //@ assert 0 <= Cap_as_inner_(self0.cap); - //@ assert 0 <= logical_capacity(self0.cap, Layout::size_(elem_layout)); + //@ std::alloc::alloc_block_in_aligned(ptr.ptr.as_ptr()); + //@ std::num::niche_types::UsizeNoHighBit_as_inner_new(cap); + //@ mul_zero(elem_layout.size(), cap); + //@ assert 0 <= self0.cap.as_inner(); + //@ assert 0 <= logical_capacity(self0.cap, elem_layout.size()); //@ assert cap != 0; //@ std::alloc::Layout_inv(new_layout); - //@ close RawVecInner0(alloc_id, Unique::from_non_null_(ptr.ptr), UsizeNoHighBit::new_(cap), elem_layout, NonNull_ptr(ptr.ptr), _); //@ close RawVecInner::(t, self1, elem_layout, alloc_id, _, _); } Ok(()) @@ -1666,9 +2215,10 @@ impl RawVecInner { ) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& capacity0 < len + additional; @*/ /*@ @@ -1676,14 +2226,16 @@ impl RawVecInner { *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& len + additional <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { if elem_layout.size() == 0 { // Since we return a capacity of `usize::MAX` when the type size is @@ -1697,11 +2249,12 @@ impl RawVecInner { let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; //@ leak .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); let new_layout = layout_array(cap, elem_layout)?; + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let current_memory = self.current_memory(elem_layout); @@ -1710,48 +2263,52 @@ impl RawVecInner { //@ end_lifetime(k); //@ end_share_RawVecInner(self); - //@ RawVecInner_inv::(); - //@ open_points_to(self); //@ open RawVecInner(t, *self, elem_layout, alloc_id, ptr0, capacity0); - //@ open RawVecInner0(alloc_id, ?ptr0_, ?cap0_, elem_layout, ptr0, capacity0); - //@ assert NonNull_ptr(Unique::non_null_(ptr0_)) == ptr0; + //@ let ptr0_ = (*self).ptr; + //@ let cap0_ = (*self).cap; + //@ assert ptr0_.as_non_null_ptr().as_ptr() == ptr0; //@ std::alloc::Layout_inv(elem_layout); - //@ std::alloc::Layout_size__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); - //@ std::alloc::Layout_align__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); + /*@ + if capacity0 * elem_layout.size() != 0 { + let elemLayout = elem_layout; + assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); + std::alloc::Layout_inv(allocLayout); + } + @*/ + //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); + //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - //@ if capacity0 * Layout::size_(elem_layout) == 0 { leak ptr0[..0] |-> _; } + //@ if capacity0 * elem_layout.size() == 0 { leak ptr0[..0] |-> _; } //@ std::num::niche_types::UsizeNoHighBit_inv(self0.cap); //@ assert cap == len + additional; - //@ mul_mono_l(1, Layout::size_(elem_layout), Cap_as_inner_(self0.cap)); - //@ mul_mono_l(1, Layout::size_(elem_layout), cap); - //@ mul_mono_l(capacity0, cap, Layout::size_(elem_layout)); + //@ mul_mono_l(1, elem_layout.size(), self0.cap.as_inner()); + //@ mul_mono_l(1, elem_layout.size(), cap); + //@ mul_mono_l(capacity0, cap, elem_layout.size()); match core::ops::Try::branch(finish_grow(new_layout, current_memory, &mut self.alloc)) { core::ops::ControlFlow::Break(residual) => { //@ let self1 = *self; - //@ close RawVecInner0(alloc_id, ptr0_, cap0_, elem_layout, ptr0, capacity0); //@ close RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0); core::ops::FromResidual::from_residual(residual) } core::ops::ControlFlow::Continue(ptr) => { // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items unsafe { - //@ assert Layout::size_(new_layout) == Layout::size_(elem_layout) * cap; - //@ mul_mono_l(1, Layout::size_(elem_layout), cap); + //@ assert new_layout.size() == elem_layout.size() * cap; + //@ mul_mono_l(1, elem_layout.size(), cap); self.set_ptr_and_cap(ptr, cap); //@ let self1 = *self; - //@ std::ptr::NonNull_ptr_nonnull(ptr.ptr); - //@ std::alloc::alloc_block_in_aligned(NonNull_ptr(ptr.ptr)); - //@ std::num::niche_types::UsizeNoHighBit_as_inner__new_(cap); - //@ mul_zero(Layout::size_(elem_layout), cap); - //@ assert 0 <= Cap_as_inner_(self0.cap); - //@ assert 0 <= logical_capacity(self0.cap, Layout::size_(elem_layout)); + //@ std::alloc::alloc_block_in_aligned(ptr.ptr.as_ptr()); + //@ std::num::niche_types::UsizeNoHighBit_as_inner_new(cap); + //@ mul_zero(elem_layout.size(), cap); + //@ assert 0 <= self0.cap.as_inner(); + //@ assert 0 <= logical_capacity(self0.cap, elem_layout.size()); //@ assert cap != 0; //@ std::alloc::Layout_inv(new_layout); - //@ close RawVecInner0(alloc_id, Unique::from_non_null_(ptr.ptr), UsizeNoHighBit::new_(cap), elem_layout, NonNull_ptr(ptr.ptr), _); //@ close RawVecInner::(t, self1, elem_layout, alloc_id, _, _); } Ok(()) @@ -1764,28 +2321,31 @@ impl RawVecInner { fn shrink(&mut self, cap: usize, elem_layout: Layout) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _); + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _); @*/ /*@ ens thread_token(t) &*& *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& cap <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ - //@ safety_proof { assume(false); } + //@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let capacity = self.capacity(elem_layout.size()); @@ -1816,9 +2376,10 @@ impl RawVecInner { ) -> Result<(), TryReserveError> /*@ req thread_token(?t) &*& t == currentThread &*& - Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0 &*& + elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& - RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& cap <= capacity0; @*/ /*@ @@ -1826,10 +2387,12 @@ impl RawVecInner { *self |-> ?self1 &*& match result { Result::Ok(u) => - RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& array_at_lft_(alloc_id.lft, ptr1, capacity1 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& cap <= capacity1, Result::Err(e) => - RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * Layout::size_(elem_layout), _) &*& + RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& .own(t, e) }; @*/ @@ -1837,7 +2400,7 @@ impl RawVecInner { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let current_memory = self.current_memory(elem_layout); @@ -1854,11 +2417,18 @@ impl RawVecInner { //@ open_points_to(self); //@ open RawVecInner(t, ?self01, elem_layout, alloc_id, ptr0, capacity0); - //@ open RawVecInner0(alloc_id, self01.ptr, self01.cap, elem_layout, ptr0, capacity0); - //@ assert NonNull_ptr(Unique::non_null_(self01.ptr)) == ptr0; + //@ assert self01.ptr.as_non_null_ptr().as_ptr() == ptr0; //@ std::alloc::Layout_inv(elem_layout); - //@ std::alloc::Layout_size__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); - //@ std::alloc::Layout_align__Layout_from_size_align_(capacity0 * Layout::size_(elem_layout), Layout::align_(elem_layout)); + /*@ + if capacity0 * elem_layout.size() != 0 { + let elemLayout = elem_layout; + assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); + std::alloc::Layout_inv(allocLayout); + } + @*/ + //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); + //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); // If shrinking to 0, deallocate the buffer. We don't reach this point // for the T::IS_ZST case since current_memory() will have returned @@ -1869,7 +2439,7 @@ impl RawVecInner { unsafe { //@ let_lft 'a = k1; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - self.alloc.deallocate(ptr, layout); + self.alloc.deallocate/*@::@*/(ptr, layout); //@ leak Allocator(_, _, _); }; //@ end_lifetime(k1); @@ -1878,15 +2448,14 @@ impl RawVecInner { unsafe { Unique::new_unchecked(ptr::without_provenance_mut(elem_layout.align())) }; self.cap = ZERO_CAP; //@ let ptr1_ = (*self).ptr; - //@ assert NonNull_ptr(Unique::non_null_(ptr1_)) as usize == Layout::align_(elem_layout); - //@ div_rem_nonneg_unique(Layout::align_(elem_layout), Layout::align_(elem_layout), 1, 0); - //@ close RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, _, _); + //@ assert ptr1_.as_non_null_ptr().as_ptr() as usize == elem_layout.align(); + //@ div_rem_nonneg_unique(elem_layout.align(), elem_layout.align(), 1, 0); //@ close RawVecInner(t, *self, elem_layout, alloc_id, _, _); } else { let ptr = unsafe { // Layout cannot overflow here because it would have // overflowed earlier when capacity was larger. - //@ mul_mono_l(cap, capacity0, Layout::size_(elem_layout)); + //@ mul_mono_l(cap, capacity0, elem_layout.size()); let new_size = elem_layout.size().unchecked_mul(cap); let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); //@ let alloc_ref = precreate_ref(&(*self).alloc); @@ -1895,7 +2464,7 @@ impl RawVecInner { { //@ let_lft 'a = k1; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = self.alloc.shrink(ptr, layout, new_layout); + r = self.alloc.shrink/*@::@*/(ptr, layout, new_layout); //@ leak Allocator(_, _, _); }; //@ end_lifetime(k1); @@ -1904,7 +2473,6 @@ impl RawVecInner { match r { Ok(ptr1) => Ok(ptr1), Err(err) => { - //@ close RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, ptr0, capacity0); //@ close RawVecInner(t, *self, elem_layout, alloc_id, ptr0, capacity0); let e = AllocError { layout: *new_layout_ref, non_exhaustive: () }; //@ std::alloc::close_Layout_own(t, new_layout); @@ -1918,10 +2486,9 @@ impl RawVecInner { unsafe { //@ std::num::niche_types::UsizeNoHighBit_inv(self01.cap); self.set_ptr_and_cap(ptr, cap); - //@ std::ptr::NonNull_ptr_nonnull(ptr_1.ptr); - //@ std::alloc::alloc_block_in_aligned(NonNull_ptr(ptr_1.ptr)); - //@ mul_zero(cap, Layout::size_(elem_layout)); - //@ close RawVecInner0(alloc_id, (*self).ptr, (*self).cap, elem_layout, _, _); + //@ std::alloc::alloc_block_in_aligned(ptr_1.ptr.as_ptr()); + //@ mul_zero(cap, elem_layout.size()); + //@ std::alloc::Layout_repeat_size_aligned_intro(elem_layout, cap); //@ close RawVecInner(t, *self, elem_layout, alloc_id, _, _); } } @@ -1936,14 +2503,20 @@ impl RawVecInner { /// Ideally this function would take `self` by move, but it cannot because it exists to be /// called from a `Drop` impl. unsafe fn deallocate(&mut self, elem_layout: Layout) - //@ req thread_token(?t) &*& *self |-> ?self0 &*& RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr_, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr_, capacity * Layout::size_(elem_layout), _); + /*@ + req thread_token(?t) &*& + *self |-> ?self0 &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr_, ?capacity) &*& + elem_layout.size() % elem_layout.align() == 0 &*& + array_at_lft_(alloc_id.lft, ptr_, capacity * elem_layout.size(), _); + @*/ //@ ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); //@ on_unwind_ens thread_token(t) &*& *self |-> ?self1 &*& >.own(t, self1); { //@ let k = begin_lifetime(); //@ share_RawVecInner(k, self); //@ let self_ref = precreate_ref(self); - //@ init_ref_RawVecInner(self_ref); + //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); let current_memory = self.current_memory(elem_layout); @@ -1953,26 +2526,22 @@ impl RawVecInner { //@ end_share_RawVecInner(self); //@ open_points_to(self); - //@ open RawVecInner(t, ?self01, elem_layout, alloc_id, ptr_, capacity); - //@ open RawVecInner0(alloc_id, ?u, ?cap, elem_layout, _, _); + //@ open RawVecInner(t, _, elem_layout, alloc_id, ptr_, capacity); if let Some((ptr, layout)) = current_memory { //@ let alloc_ref = precreate_ref(&(*self).alloc); //@ let k1 = begin_lifetime(); unsafe { //@ let_lft 'a = k1; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - //@ assert ptr_ == std::ptr::NonNull_ptr(ptr); - //@ std::alloc::Layout_inv(elem_layout); - //@ std::alloc::Layout_size__Layout_from_size_align_(capacity * Layout::size_(elem_layout), Layout::align_(elem_layout)); - //@ assert capacity * Layout::size_(elem_layout) == Layout::size_(layout); - self.alloc.deallocate(ptr, layout); - //@ leak Allocator(_, _, _); + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, capacity); + //@ assert capacity * elem_layout.size() == layout.size(); + self.alloc.deallocate/*@::@*/(ptr, layout); } //@ end_lifetime(k1); //@ std::alloc::end_ref_Allocator_at_lifetime::(); } - //@ if current_memory == Option::None { leak ptr_[..capacity * Layout::size_(elem_layout)] |-> _; } //@ std::alloc::Allocator_to_own((*self).alloc); + //@ close RawVecInner0(*self, elem_layout, ptr_, capacity); //@ close >.own(t, *self); } } @@ -1993,27 +2562,30 @@ req thread_token(?t) &*& t == currentThread &*& match current_memory { Option::None => true, Option::Some(memory) => - alloc_block_in(alloc_id, NonNull_ptr(memory.0), memory.1) &*& array_at_lft_(alloc_id.lft, NonNull_ptr(memory.0), Layout::size_(memory.1), _) &*& - Layout::size_(memory.1) <= Layout::size_(new_layout) &*& - Layout::align_(memory.1) == Layout::align_(new_layout) + alloc_block_in(alloc_id, memory.0.as_ptr(), memory.1) &*& + array_at_lft_(alloc_id.lft, memory.0.as_ptr(), memory.1.size(), _) &*& + memory.1.size() <= new_layout.size() &*& + memory.1.align() == new_layout.align() }; @*/ /*@ ens thread_token(t) &*& *alloc |-> ?alloc1 &*& Allocator(t, alloc1, alloc_id) &*& match result { Result::Ok(ptr) => - alloc_block_in(alloc_id, NonNull_ptr(ptr.ptr), new_layout) &*& array_at_lft_(alloc_id.lft, NonNull_ptr(ptr.ptr), Layout::size_(new_layout), _) &*& - Layout::size_(new_layout) <= isize::MAX, + alloc_block_in(alloc_id, ptr.ptr.as_ptr(), new_layout) &*& + array_at_lft_(alloc_id.lft, ptr.ptr.as_ptr(), new_layout.size(), _) &*& + new_layout.size() <= isize::MAX, Result::Err(e) => match current_memory { Option::None => true, Option::Some(memory) => - alloc_block_in(alloc_id, NonNull_ptr(memory.0), memory.1) &*& array_at_lft_(alloc_id.lft, NonNull_ptr(memory.0), Layout::size_(memory.1), _) + alloc_block_in(alloc_id, memory.0.as_ptr(), memory.1) &*& + array_at_lft_(alloc_id.lft, memory.0.as_ptr(), memory.1.size(), _) } &*& .own(currentThread, e) }; @*/ -//@ safety_proof { assume(false); } +//@ safety_proof { assume(false); } // This function should be marked `unsafe`; see https://github.com/rust-lang/rust/pull/145067 { alloc_guard(new_layout.size())?; @@ -2038,7 +2610,7 @@ ens thread_token(t) &*& *alloc |-> ?alloc1 &*& Allocator(t, alloc1, alloc_id) &* { //@ let_lft 'a = k1; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.grow(ptr, old_layout, new_layout); + r = alloc.grow/*@::@*/(ptr, old_layout, new_layout); //@ leak Allocator(_, _, _); } //@ end_lifetime(k1); @@ -2052,7 +2624,7 @@ ens thread_token(t) &*& *alloc |-> ?alloc1 &*& Allocator(t, alloc1, alloc_id) &* { //@ let_lft 'a = k1; //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.allocate(new_layout); + r = alloc.allocate/*@::@*/(new_layout); //@ leak Allocator(_, _, _); } //@ end_lifetime(k1); @@ -2106,7 +2678,16 @@ ens thread_token(currentThread) &*& Result::Err(err) => .own(currentThread, err) }; @*/ -//@ safety_proof { assume(false); } +/*@ +safety_proof { + let result = call(); + match result { + Result::Ok(r) => { tuple_0_eq(r); close_tuple_0_own(_t); } + Result::Err(e) => { } + } + close >.own(_t, result); +} +@*/ { if usize::BITS < 64 && alloc_size > isize::MAX as usize { //~allow_dead_code Err(CapacityOverflow.into()) //~allow_dead_code @@ -2117,17 +2698,25 @@ ens thread_token(currentThread) &*& #[inline] fn layout_array(cap: usize, elem_layout: Layout) -> Result -//@ req thread_token(currentThread) &*& Layout::size_(elem_layout) % Layout::align_(elem_layout) == 0; +//@ req thread_token(currentThread); /*@ ens thread_token(currentThread) &*& match result { - Result::Ok(layout) => - Layout::size_(layout) == cap * Layout::size_(elem_layout) &*& - Layout::align_(layout) == Layout::align_(elem_layout), + Result::Ok(layout) => elem_layout.repeat(cap) == some(pair(layout, ?stride)), Result::Err(err) => .own(currentThread, err) }; @*/ -//@ safety_proof { assume(false); } +/*@ +safety_proof { + leak .own(_t, elem_layout); + let result = call(); + match result { + Result::Ok(layout) => { std::alloc::close_Layout_own(_t, layout); } + Result::Err(e) => {} + } + close >.own(_t, result); +} +@*/ { let r = match elem_layout.repeat(cap) { Ok(info) => Ok(info.0),