diff --git a/.github/workflows/verifast-proof-diff.yml b/.github/workflows/verifast-proof-diff.yml new file mode 100644 index 00000000000..c5ce62da2a3 --- /dev/null +++ b/.github/workflows/verifast-proof-diff.yml @@ -0,0 +1,8 @@ +name: verifast-proof-diff +on: [pull_request] +jobs: + proof_diff: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - run: Test/VeriFast/tasks/vTaskSwitchContext/diff.sh `pwd` diff --git a/.gitignore b/.gitignore index af2ca7b0ab5..c8a67038e1f 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,7 @@ __pycache__/ # Ignore certificate files. *.pem *.crt + +# Ignore OS bookkeeping files +.DS_Store +.vscode/settings.json diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..f304bb47270 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,8 @@ +[submodule "verification/verifast/demos/FreeRTOS-SMP-Demos"] + path = Test/VeriFast/tasks/vTaskSwitchContext/demos/FreeRTOS-SMP-Demos + url = https://github.com/Tobias-internship-AWS-2022/FreeRTOS-SMP-Demos.git + branch = verifast +[submodule "verification/verifast/sdks/pico-sdk"] + path = Test/VeriFast/tasks/vTaskSwitchContext/sdks/pico-sdk + url = https://github.com/Tobias-internship-AWS-2022/pico-sdk.git + branch = verifast diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/.gitignore b/Test/VeriFast/tasks/vTaskSwitchContext/.gitignore new file mode 100644 index 00000000000..79f5c6bfb19 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/.gitignore @@ -0,0 +1,8 @@ +# Ignore log files +pp_log + +# Ignore preprocessing output +preprocessed_files + +# Ignore generated stats +stats \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/README.md new file mode 100644 index 00000000000..aac8223fe62 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/README.md @@ -0,0 +1,601 @@ +# FreeRTOS VeriFast Proofs +This directory contains an unbounded memory safety and thread safety proof +for the core of the task scheduler: `vTaskSwitchContext` + + + +## VeriFast +[VeriFast](https://github.com/verifast/verifast) +is a deductive program verifier for C based on separation logic. +It supports verifying concurrent code and reasoning about complex data structures. + +VeriFast proofs are *unbounded*. +That is, until explicitly specified, it does not assume any bound on the size of the involved data structures. +Hence, proofs give us unbounded guarantees. +In our case, this means that our proof holds for any number of tasks and any size of the involved data structures. + +Reasoning about concurrent code can be tricky because of all the interleavings that can occur. +VeriFast does not assume anything about the occuring interleavings. +Therefore, the proven guarantees hold for every possible interleaving that might occur during runtime. + +Being a deductive verifier, VeriFast requires us to manually write a proof. +In particular, we have to specify what well-formed data structures look like and to annotate the code with proof steps. +It then symbolically executes the annotated code and queries an SMT solver to check the validity of proof steps. + +This directory contains all the specifications and proof steps necessary to check that the scheduler is memory and thread safe. + + + +## Key Result +Informally, the proof guarantees the following: +``` +Proof Assumptions: + - Data structure specification + - Locking discipline + - Contracts abstracting assembly + - FreeRTOS config + - Function contract of `vTaskSwitchContext` + +==> + +Unbounded memory & thread safety guarantees for `vTaskSwitchContext`: + ∀ #tasks. ∀ task interleavings. ∀ interrupt schedules. ∀ data sizes. ∀ cores C1, …, Cn. + vTaskSwitchContext(C1) || … || vTaskSwitchContext(Cn) + => (no memory error ∧ no race condition) +``` + +We have to model certain aspects of the system and our proof assumes that these models are correct (cf. `Proof Assumptions` below for a detailed explanation). +In particular, this modeling step includes writing a precondition for `vTaskSwitchContext` that specifies the context in which the function may be called. + +Our proof considers any number of running tasks, any possible task interleavings and any interrupts that might occur during execution. +In particular, it considers any possible size for the involved data structures, since it is an unbounded proof. + +The proof ensures that every concurrent execution of `vTaskSwitchContext` on any cores is memory safe and mutually thread safe. +That is, when we execute multiple instances of the function on different cores, we won't get any memory errors or data races, no matter how these instances interleave or when interrupts occur. + + +# Found Buffer Underflow +During the verification of `vTaskSwitchContext` we found a buffer underflow, fixed it and verified that our fix works. +The guarantees stated in the section above concern the fixed-up code. +We submitted the fix as a pull request: [Fixed buffer underflow in prvSelectHighestPriorityTask. #607](https://github.com/FreeRTOS/FreeRTOS-Kernel/pull/607) + +Our verification target `vTaskSwitchContext` calls the auxiliary function `prvSelectHighestPriorityTask` to choose the task that will be scheduled next. +This works as long as the idle tasks have already been created. +The idle tasks are tasks whose only purpose is to run and do nothing in case there is no other task that can be scheduled. + +However, `prvSelectHighestPriorityTask` can also be called before the idle tasks have been created. +When that happens, the function decrements the global variable `uxTopReadyPriority` to -1. +This variable is supposed to store the highest priority for which we know that there is a ready task. +Priorities start at 0, so -1 is an invalid value. + +During the next regular context switch, `vTaskSwitchContext` calls `prvSelectHighestPriorityTask`. +The latter looks at `uxTopReadyPriority` to detect at which priority level it should start its search. +Hence, it accesses the global ready list array at index -1, i.e., `pxReadyTasksLists[ uxCurrentPriority ]`. +This causes a memory error. + +# Proof Directory Structure +``` +FreeRTOS-Kernel +│ +│ +│ +├── *.c files +│ The base directory contains the source files. Note that our proof uses +│ annotated copies of these files located in the proof directory. +│ +│ +├── include +│ Contains the header files. Note that our proof uses annotated copies of +│ these files located in the proof directory. +│ +│ +├── portable +│ └── Thirdparty +│ └── GCC +│ └── RP2040 +│ Contains the Raspberry Pi Pico setup. +│ +│ +├── .github/workflows +│ └── verifast-proof-diff.yml +│ This workflow is triggered on every pull request and checks for +│ potential divergences between the production code and the proof. +│ +│ +└── Test/VeriFast/tasks/vTaskSwitchContext + │ + ├── run-verifast.sh + │ Shell script to check the proof with VeriFast. + │ + ├── run-vfide.sh + │ Shell script to load the proof into the VeriFast IDE. + │ + ├── diff.sh + │ Shell script to flag changes in the production code that potentially + │ break the validity of the VeriFast proof. An empty diff means that the + │ proof and the production code remain in sync. + │ + ├── preprocessing_scripts + │ Contains scripts to preprocess and rewrite the source code. + │ + ├── demos + │ Contains the FreeRTOS SMP demo. Our proofs use some of its + │ configuration files. + │ + ├── include + │ Contains annotated copies of header files residing in + │ 'FreeRTOS-Kernel/include'. These files are annotated with VeriFast + | predicates, lemmas and proof steps. + │ + │ + ├── proof + │ Contains the VeriFast proof files. + │ │ + │ ├── *.h files + │ │ Headers containing VeriFast formalizations and proofs. + │ │ + │ ├── README.md + │ │ Contains overview about proof files. + │ │ + │ ├── single_core_proofs + │ │ Contains the old list formalization and proofs written by + │ │ Aalok Thakkar and Nathan Chong in 2020 for the single-core + │ │ setup. + │ │ + │ └── single_core_proofs_extended + │ Contains new proofs extending the single-core list + │ formalization. + │ + │ + ├── proof_setup + │ Contains config files for the proof. The proof assumes a setup for + │ RP2040. + │ + ├── sdks + │ Contains SDKs referenced by the proof setup. + │ Some files are annotated with VeriFast contracts. + │ + ├── src + │ Contains annotated copies of source files residing in the repository's + │ base directory 'FreeRTOS-Kernel'. The files are annotated with VeriFast + │ predicates, lemmas and proof steps. + │ + └── stats + Contains some statistics about the VeriFast proof. +``` + + + +# Checking the Proof +The proof can be checked by running one of the scripts `run-verifast.sh` and +`run-vfide.sh` residing in this directory (see repo structure above). +Both scripts preprocess the annotated code with Clang and rewrite syntax +VeriFast does not understand into something equivalent. +The result is written to a temporary file (`preprocessed_files/tasks_vf_pp.c`) +before it is processed by VeriFast. +This file contains a copy of all the code and annotations required to check the +proof. +Both scripts expect the command line arguments explained below. + +- #### run-verifast.sh: + Preprocesses the code and proof files and uses the + command-line version of VeriFast to check the resulting proof file. + A call must have the form: + #### run-verifast.sh \ \ + where + - \ is the absolute path to this repository's base directory, + i.e., `FreeRTOS-Kernel` in the repo structure depicted above. + - \ is the absolute path to the VeriFast installation + directory. + +- #### run-vfide.sh: + Preprocesses the code and proof files and loads the resulting proof file into + the VeriFast IDE. + A call must have the form: + #### run-vfide.sh \ \ \[\\] + where + - \ \ are as explained above + - \ is an optional argument specifying the IDE's font size. + + +# Reading the Proof +The most important aspects any reader has to know about before they can understand the proof are the locking discipline and the lock invariants. +We suggest to read the proof in a top-down approach. +That is, the reader should start by reading the documentation and definitions of the most important concepts. +Afterwards, we suggest to continue by reading the most important parts of the verified functions: +The contracts and the loop invariants. +Only once these are understood, we suggest to read the low-level proof annotations in the verified functions (e.g. open/close statements, lemma calls). + +We propose the following order: + 1. The locking discipline, formalized and documented in `proof/port_locking_contracts.h`. + + FreeRTOS uses macros to invoke synchronization mechanisms (activating/deactivating interrupts and acquiring/releasing locks). + The definitions of these macros are port-specific. + The file `proof/port_locking_contracts.h` contains contracts abstracting the port-specific definitions and formalizing the synchronization mechanisms and the locking discipline, e.g., the order in which locks have to be acquired. + + 2. The lock invariants, formalized and documented in `proof/lock_predicates.h`. + + The invariants express which resources the locks and the masking of interrupts protect. + When we acquire a lock or deactivate interrupts, the invariants determine which level of access permissions (i.e. read or write access) we get for the protected resources. + Since the locks protect the ready lists and task control blocks, the invariants reference the ready list and task predicates defined in `proof/ready_list_predicates.h` and `task_predicates.h`. + + 3. The contracts for the functions we verified, i.e., `vTaskSwitchContext` and `prvSelectHighestPriorityTask`, cf. `src/tasks.c`. + + 4. The loop invariants in `prvSelectHighestPriorityTask`. + + 5. The low-level proof annotations in `vTaskSwitchContext` and `prvSelectHighestPriorityTask`, e.g., open/close statements and lemma calls. + + + + + + +# Maintaining the Proof +This directory contains annotated copies of FreeRTOS source and header files. +The annotations in these files tell VeriFast which functions it should verify and what the proof looks like. +Including these annotations in the production code would lead to a huge visual burden for developers. +The downside of including them in a separate copy of the code is that the proof and the production code may get out of sync without anyone noticing. + +Therefore, we provide a GitHub workflow to check for potential divergences, cf. +`FreeRTOS-Kernel/.github/workflows/verifast-proof-diff.yml`. +The workflow is triggered on every pull request. +It aggregates and preprocesses the parts of the production code relevant to our proof as well as the annotated copies in this directory. +Afterwards, it computes a diff between both versions and fails if the result is not empty, in which case the diff result will be logged in the GitHub actions log. +An empty diff means that the pull request did not change anything that can affect our proof and the proof remains valid. +A non-empty diff shows which changes in the pull request potentially impact our proof. +In this case, the changes should also be applied to the annotated copies and the proof should be checked again. +If the detected divergence was not a false positive and indeed impacted the proof, the proof will likely require manual repair. + +The diff can also be manually checked by running the command +`diff.sh `, where the argument is the absolute path to the repository's base directory. + + + +# Disclaimer +All scripts and proofs have been tested under OS X 12.6.1 and with the VeriFast nightly build from Dec 31, 2022 (corresponds to commit [9e32b122b54152a2ac75a811aa422d638b56c6ab](https://github.com/verifast/verifast/commit/9e32b122b54152a2ac75a811aa422d638b56c6ab)). + + + +# Proof Assumptions +We have to model certain aspects of the system in order to reason about the task scheduler. +The proof treats these models as assumptions. +Therefore, the proof's correctness relies on the correctness of our models. + + + +- ### FreeRTOS Configuration + The VeriFast proofs assume a setup for the Raspberry Pi Pico, i.e., RP2040, cf. directory `proof_setup`. + We use the config files from the official FreeRTOS SMP demo for the RP2040 and from official RP2040 port. + The most important properties of this configuration are: + - It supports running multiple priorities in parallel on different cores. + - Core affinity is deactivated, i.e., all tasks may be scheduled on any core. + + The Raspberry Pi Pico only has two cores and we want to ensure that our proof does not accidentally rely on the properties that come with this binary setup. + Hence, we changed the number of cores to an arbitrary large number. + + + +- ### Contracts Abstracting Assembly + The port layer of FreeRTOS contains assembly code that is essential for our proof. + In particular, code to mask interrupts and code to acquire and release locks. + VeriFast is a program verifier for C and not designed to handle any kind of assembly. + The port-specific assembly is called via macros with a port-specific definition. + We redefined these macros to call dummy function prototypes instead. + We equipped these prototypes with VeriFast contracts that capture the semantics of the original assembly code, cf. `proof/port_locking_contracts.h`. + This way, VeriFast refers to the contracts to reason about the macro calls and does not have to deal with the assembly code. + + + +- ### Data structure specification + VeriFast expects us to specify the memory layout of the data structures accessed by the task scheduler. + In a proof, these specifications tell us what a well-formed instance of a data structure looks like and how me may manipulate it to preserve well-formedness. + + Most notably, the scheduler searches the so called "ready lists", a global array of cyclic doubly linked lists storing tasks of specific priorities that are ready to be scheduled. + Reasoning about this data structure is challenging because it requires heavy reasoning about its complex internals. + + Previously, Aalok Thakkar and Nathan Chong used VeriFast to prove functional correctness of the stand-alone list data structure for a single-core setup, c.f. [FreeRTOS Pull Request 836: Update VeriFast proofs](https://github.com/FreeRTOS/FreeRTOS/pull/836). + We reused their formalization and proofs as much as possible. + However, we had to heavily adapt both to tailor them to the needs of the scheduler proof, cf. `Proof Details` below. + + The reused specification resides in `proofs/single_core_proofs/`. + The full ready list array is specified in `proofs/ready_list_predicates.h`. + + +- ### Function Contract of `vTaskSwitchContext` + VeriFast expects every function that it verifies to have a so called "function contract". + These contracts consist of a precondition, also called the "requires clause" and a postcondition, also called the "ensures clause". + The precondition characterizes the context in which the function may be called. + This determines the state in which our proof starts. + The postcondition characterizes the state we want to be in when the function terminates. + + Starting from the precondition, VeriFast symbolically executes the function's code and our annotated proof steps. + The proof succeeds if every step succeeds and if the proof ends in a state that complies with the specified postcondition. + + Hence, the function contract determines *WHAT* we prove. + `vTaskSwitchContext` is called by an interrupt defined in the port layer on some core `C`. + This interrupt masks interrupts on this core and acquires the locks protecting the ready lists. + Therefore, the precondition of `vTaskSwitchContext` states that: + - the function is executed on an arbitrary core `C` + - interrupts on core `C` are deactivated + - the locks protecting the ready lists have been acquired + - that all the relevant global data structures are well-formed + + The postcondition states that all these properties are preserved, which is what the interrupt calling into the scheduler expects. + + + +- ### Locking discipline and lock invariants + FreeRTOS' SMP implementation uses the following synchronization mechanisms: + - Deactivating interrupts: + Some data is only meant to be accessed on a specific core C. + Such data may only be accessed after interrupts on core C have been deactivated. + For instance the global array `pxCurrentTCBs` in `tasks.c` has an entry for + every core. + `pxCurrentTCBs[C]` stores a pointer to the task control block (TCB) of the task running on core C. + Core C is always allowed to read `pxCurrentTCBs[C]`. + However, writing requires the interrupts on core C to be deactivated. + + - task lock: + The task lock is used to protect ciritical sections and resources from being accessed by multiple tasks simultaneously. + + - ISR lock: + The ISR/ interrupt lock is used to protect critical sections and resources from being accessed by multiple interrupts simultaneously. + + - task lock + ISR lock: + Access to certain resources and ciritical sections are protected by both the task lock and the ISR lock. + For these, it is crucial that we first acquire the task lock and then the ISR lock. + Likewise, we must release them in opposite order. + Failure to comply with this order may lead to deadlocks. + The resources protected by both locks are the main resources this proof deals with. + These include the ready lists and the certain access rights to the tasks' run states. + + #### Lock Invariants + Every synchronization mechanism protects specific data structures and sections of code. + For our proof, we associate every synchronization mechanism `L` with permissions to access the resources it protects. + We do this by defining a so called "lock invariant" `I`. + Besides pure access permissions the invariant can also specify more specifc properties, such as that a data structure must be well-formed. + (We call it "lock invariant" even though we also use the same technique to model the masking of interrupts.) + When we acquire lock `L` (or deactivate the interrupts) we produce the lock invariant `I`. + That means, we get the access permissions `I` expresses. + When we release the lock `L` (or reactivate the interrupts), we consume the invariant `I`. + That means that we lose the access permissions granted by `I`. + While we hold the lock, we are free to manipulate the resources it protects (according to the permissions granted by `I`). + However, we have to prove that whatever we do with these resources preserves any guarantees given by the invariant. + For instance, if `I` says a data structure is well-formed then we must prove that our actions preserve well-formedness. + Otherwise, consuming `I` during the release step will fail and consequently the entire proof will fail. + + FreeRTOS uses macros with port-specifc definitions to acquire and release locks and to mask and unmask interrupts. + We abstracted these with VeriFast contracts defined in `proof/port_locking_contracts.h`. + The contracts ensure that invoking any synchronization mechanism produces or consumes the corresponding invariant. + The invariants are defined in `proof/lock_predicates.h` + + + + +# Proof Details + +## Context Switches and Ready Lists +Our proof ensures that the context switches performed by `vTaskSwitchContext` are memory and thread safe. +The most difficult part of a context switch is to find a new task that we can schedule. +For that, `vTaskSwitchContext` calls `prvSelectHighestPriorityTask` which searches for the task with the highest priority that can be scheduled. +FreeRTOS maintains a global data structure called the "ready lists". +It is an array `pxReadyTasksLists` with an entry for every priority level that a task might have. +For every such priority level `p`, `pxReadyTasksLists[p]` stores a cyclic doubly linked list containing all tasks of priority level `p` that are ready to be scheduled, including currently running ones. +`prvSelectHighestPriorityTask` searches through these lists in descending order. +That is, in order to verify `vTaskSwitchContext`, we have to reason about the ready lists. + + + +## Reusing the Single-Core List Formalization and Proofs +In 2020 Aalok Thakkar and Nathan Chong verified the functional correctness of the FreeRTOS list API for a single-core setup, cf. [FreeRTOS Pull Request 836: Update VeriFast proofs](https://github.com/FreeRTOS/FreeRTOS/pull/836). +The list API has not been changed during the port of FreeRTOS to SMP. +Ready lists are fully protected by the task and ISR locks, which allows FreeRTOS to continue using the single-core implementation of the list API. + +We reuse the single-core list formalization to model the ready list for each priority level. +However, due to challenges that arise in the scheduler, we had to extend and adapt the existing formalization. + +The single-core list formalization and lemmas that we reuse are located in `proofs/single_core_proofs/scp_list_predicates.h`. +The list API is defined in `include/list.h` and `src/list.c`. +The latter also contains the API proofs. + + + +## Comparing the Original List Proofs and Our Adaptation +As mentioned, we had to heavily adapt the list formalization and proofs to reuse them for the scheduler verification. +Therefore, both `scp_list_predicates.h` and `list.c` contain an updated version of the formalization and proofs used by our context-switch proof and the original version by Aalok Thakkar and Nathan Chong. +The latter is guarded by a preprocessor define `VERIFAST_SINGLE_CORE`. +We can compare both versions by preprocessing both files twice: Once with the define `VERIFAST_SINGLE_CORE`, which yields the original version, and once without which gives us the version used by our proofs. +Afterwards, a diff will show all the adaptations we had to apply. + + + +## List Predicates + +The single-core list formalization defines two main predicates: +- ``` + predicate xLIST_ITEM(struct xLIST_ITEM *n, + TickType_t xItemValue, + struct xLIST_ITEM *pxNext, + struct xLIST_ITEM *pxPrevious, + struct xLIST *pxContainer;) + ``` + Represents a list item of type `xLIST_ITEM`. + The arguments have the following semantics: + - `n`: A pointer to the node whose memory the predicate represents. + - `xItemValue`: The value stored in node `n`. + - `pxNext`: The node's "next" pointer, i.e., `n->pxNext`. + - `pxPrevious`: The node's "previous" pointer, i.e., `n->pxPrevious`. + - `pxContainer`: The doubly linked list containing this node. +- ``` + predicate DLS(struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + struct xLIST *pxContainer) + ``` + Represents a non-empty doubly linked list segment. + The semantics of the arguments are as follows: + - `n`: The left-most node in the segment. + - `nPrev`: The left-most node's "previous" pointer, i.e., `n->pxPrevious`. + - `mNext`: The right-most node's "next" pointer, i.e., `m->pxNext`. + - `m`: The right-most node. + - `cells`: A VeriFast list storing pointers to all nodes the list contains. + - `vals`: A VeriFast list storing the list nodes' values. + - `pxContainer`: A pointer to list struct. + + The single-core formalization also uses `DLS` not just to represent list segments but also to express unsegmented cyclic linked lists. + In FreeRTOS lists start with a sentinel, called "end". + Using the `DLS` predicate, a cyclic list has the form: + `DLS(end, endPrev, end, endPrev, cells, vals, list)` + + + + +## Issue 1: List Predicates Do Not Expose Tasks +Each node in a ready list points to task control block (TCB) representing a task that is ready to run. +The TCB a node points to is called its "owner". +`prvSelectHighestPriorityTask` iterates through the ready lists and looks at each TCB it finds to determine which task to schedule next. +Hence, it is crucial that we reason about these TCBs. +However, the list predicates depicted above do not expose this information. +Hence, we have to extend the predicate signatures to: +``` + predicate xLIST_ITEM(struct xLIST_ITEM *n, + TickType_t xItemValue, + struct xLIST_ITEM *pxNext, + struct xLIST_ITEM *pxPrevious, + void* pxOwner, + struct xLIST *pxContainer;) +``` +where `pxOwner` is the TCB pointer stored in the represented node +and +``` +predicate DLS(struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + list owners, + struct xLIST *pxContainer) +``` +where `owners` is a list of all the TCBs pointed to by the list nodes. + +While this change seems simple on a first glance, it forced us to adapt all the list proofs we reuse. + + + +## Issue 2: Model-induced Complexity + +The formalization of doubly-linked list segments induces heavy complexity. +The problem lies in the fact that `DLS` cannot express empty list segments. +This leads to complex case distinctions whenever we access list nodes. +Consequently, our proof becomes very complex and every list access leads to an exponential blow-up of the proof tree. +This in turn leads to very bad performance when checking the proof. +We solved this problem by introducing a new representation of a cyclic doubly-linked list as a potentially empty prefix, the node we want to access and a potentially empty suffix: `DLS_prefix(....) &*& xLIST_ITEM(node, ...) &*& DLS_suffix(...)` +We added lemmas that allow us to freely convert between a `DLS` predicate and our new representation. +Thereby, the proof became a lot simpler and it reduced the time needed to check the proof from ~20 minutes to about 12.5 seconds. +The following sections explain the details of the problem and our solution. + +### Iterating through a DLS + +The function `prvSelectHighestPriorityTask` iterates through the ready lists. +Hence, reasoning about it requires us to reason about iteration through memory described by a `DLS` predicate instance. Consider the following scenario: +We have a `DLS` predicate representing our cyclic ready list and a task item pointer `pxTaskItem` which points to an element of this list. + +- `DLS(end, endPrev, end, endPrev, cells, vals, owners, readyList)` +- `mem(pxTaskItem, cells) == true` + +Suppose we want to move the task pointer forward + +- `pxTaskItem2 = pxTaskItem->pxNext` + +In order to verify this line we have to do two things: + +1. Justify the heap access to `pxTaskItem->pxNext` +2. Prove that `pxTaskItem2` points to an element of the list. This is + necessary to reason about any code that uses `pxTaskItem2`. + +We can do this by opening the recursive predicate at the nodes for `pxTaskItem` and `pxTaskItem->next`, for which we can reuse the existing list proof lemmas. +When the right parts of the predicate are exposed, we can prove (1) and (2). +Afterwards, we have to close the predicate again. + + + + + +### Proofs Are Hard + +Proving (1) and (2) forces us to consider many different cases, which leads to complicated proofs. +The position of `pxTaskItem` in the list determines how we should open the `DLS` (either by using the existing `split` lemma or with VeriFast’s `open` command) and also how we have to close it at the end of the proof. +Accessing `pxTaskItem->pxNext` introduces more case splits that complicate the proof. +Again, closing the predicate has to account for all the introduced cases. + +Introducing lemmas to open and close the predicate helps us to hide this complexity inside the lemmas. +Thereby, the main proof using these lemmas gets shorter. +However, the next section explains why this approach does not eliminate the complexity. + +Note that proofs for forward iteration cannot be reused for backwards iteration. +Instead the latter requires separate proofs. + + + +### Bad Performance + +As explained above, reasoning about a single statement that moves the item pointer forward or backward introduces many case splits. `prvSelectHighestPriorityTask` contains multiple statements that manipulate the item pointer. +From VeriFast’s perspective, each consecutive proof of such an iteration statement splits up the proof tree further. +In other words: Every iteration statement leads to an exponential blow-up of the sub-proof-tree rooted at this statement. +This is the case even though this part of the code we reason about is linear. + +Introducing lemmas for opening and closing shortens the consecutive iteration proofs significantly, but does not eliminate the case splits. +The reason for this is that the `DLS` predicate cannot express empty segments and depending on the current proof path, the shape of the heap changes. +Our proof has to account for the following possibilities: +- non-empty prefix and no suffix: + ``` + DLS(...) &*& xLIST_ITEM(node, ...) + ``` +- non-empty prefix and non-empty suffix: + ``` + DLS(...) &*& xLIST_ITEM(node, ...) &*& DLS(...) + ``` +- no prefix and non-empty suffix: + ``` + xLIST_ITEM(node, ...) &*& DLS(...) + ``` + +In our proof we know that the ready list we travers always contains the sentinel and an additional node. +So, we can eliminate the case where both the prefix and the suffix are empty. + +We cannot unify the representation of the proof state as long as we stick to the `DLS` predicate. +Instead the opening lemma’s postcondition and the closing lemma’s precondition must reflect the case split. +Consequently, applying the lemmas in a proof introduces the case splits anyway and consecutive iteration statements/ lemma applications increase the number of proof paths exponentially. +VeriFast requires ~20 min to reason about 4 iteration statements. + + + +### Solution: Introduce new representation for opened DLS + + +The only way to eliminate the case splits in `prvSelectHighestPriorityTask` is to unify the proof state of an opened `DLS` across all proof paths. +We introduce two new predicates that express potentially empty prefixes and suffixes of opened cyclic `DLS`. +With that, we can formalize an opened list in a unified way as + +- `DLS_prefix(....) &*& xLIST_ITEM(pxTaskItem, ...) &*& DLS_suffix(...)` + +Additionally, we write opening and closing lemmas that transform the a `DLS` predicate instance into our new representation and back. +The proof state we get after opening the list does not force VeriFast to consider any case splits. +This finally eliminates the complexity induced by the non-empty list model. + +Eliminating these case splits reduces verification time from ~20min to ~12.5s + +Before we introduced this new list representation, we wrote opening and closing lemmas that used the `DLS` formulation. +It turns out that switching to the new representation does not only simplify the proof state we get after opening, but it also simplifies the opening and closing lemmas, though they remain very complicated. + +The old opening and closing lemmas required switching the SMT solver to Z3, which is much slower than VeriFast's standard SMT solver. +The lemmas required heavy reasoning about applications of `list` fixpoint functions and the shape of the inductive `list` datatype. +VeriFast offers limited capabilities to reason about fixpoint functions (apart from axiomatizing) and the standard SMT solver often has problems reasoning about the shape of results, e.g., assertions of the form `drop(i, vals) == cons(_, _)`. +The new lemmas' proofs don’t require Z3. +This allowed us to switch back to VeriFast’s standard SMT solver. + +Note that the lemmas still have to consider every possible case internally. That is, the opening and closing lemmas remain complicated. diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/demos/FreeRTOS-SMP-Demos b/Test/VeriFast/tasks/vTaskSwitchContext/demos/FreeRTOS-SMP-Demos new file mode 160000 index 00000000000..345437a815d --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/demos/FreeRTOS-SMP-Demos @@ -0,0 +1 @@ +Subproject commit 345437a815defb4d7ccc549d3f04e7ec0883e8ad diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/diff.sh b/Test/VeriFast/tasks/vTaskSwitchContext/diff.sh new file mode 100755 index 00000000000..bb00188cf31 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/diff.sh @@ -0,0 +1,80 @@ +#!/bin/bash + + +# This script produces a diff between two versions of 'tasks.c': +# (i) The production version of the source file and (ii) the verified version. +# The diff is computed from the preprocessed version of both files which include +# all code relevant to the proof. That is, that any change in a file required +# by the VeriFast proof will shot up in the diff. +# The diff report will be written to 'stats/diff_report.txt' directory. +# +# This script expects the following arguments: +# $1 : Absolute path to the base directory of this repository. + + +# Checking validity of command line arguments. +HELP="false" +if [ $1 == "-h" ] || [ $1 == "--help" ]; then + HELP="true" +else + if [ $# != 1 ] ; then + echo Wrong number of arguments. Found $#, expected 1. + HELP="true" + fi + + if [ ! -d "$1" ]; then + echo Directory "$1" does not exist. + HELP="true" + fi +fi + +if [ "$HELP" != "false" ]; then + echo Expected call of the form + echo "diff.sh " + echo "where is the absolute path to the base directory of this repository." + exit +fi + + +# Relative or absolute path to the directory this script and `paths.sh` reside in. +PREFIX=`dirname $0` +# Absolute path to the base of this repository. +REPO_BASE_DIR="$1" + + +# Load functions used to compute paths. +. "$PREFIX/paths.sh" + + +VF_PROOF_BASE_DIR=`vf_proof_base_dir $REPO_BASE_DIR` +PP_SCRIPT_DIR=`pp_script_dir $REPO_BASE_DIR` +PP="$PP_SCRIPT_DIR/preprocess_file_for_diff.sh" +LOG_DIR=`pp_log_dir $REPO_BASE_DIR` +STATS_DIR=`stats_dir $REPO_BASE_DIR` + +# Unpreprocessed verions of tasks.c +PROD_TASKS_C=`prod_tasks_c $REPO_BASE_DIR` +VF_TASKS_C=`vf_annotated_tasks_c $REPO_BASE_DIR` + +# Preprocessed versions of tasks.c +PP_OUT_DIR=`pp_out_dir $REPO_BASE_DIR` +PP_PROD_TASKS_C=`pp_prod_tasks_c $REPO_BASE_DIR` +PP_VF_TASKS_C=`pp_vf_tasks_c $REPO_BASE_DIR` + +ensure_output_dirs_exist $REPO_BASE_DIR + +echo preprocessing production version of 'tasks.c' +$PP $PROD_TASKS_C $PP_PROD_TASKS_C \ + "$LOG_DIR/pp_prod_tasks_c_error_report.txt" \ + $REPO_BASE_DIR $VF_PROOF_BASE_DIR + +echo preprocessing verified version of 'tasks.c' +$PP $VF_TASKS_C $PP_VF_TASKS_C \ + "$LOG_DIR/pp_vf_tasks_c_error_report.txt" \ + $REPO_BASE_DIR $VF_PROOF_BASE_DIR + + +echo Computing diff: +echo + +git diff --no-index --ignore-all-space $PP_PROD_TASKS_C $PP_VF_TASKS_C diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/include/list.h b/Test/VeriFast/tasks/vTaskSwitchContext/include/list.h new file mode 100644 index 00000000000..5a19ba019ef --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/include/list.h @@ -0,0 +1,450 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* + * This is the list implementation used by the scheduler. While it is tailored + * heavily for the schedulers needs, it is also available for use by + * application code. + * + * list_ts can only store pointers to list_item_ts. Each ListItem_t contains a + * numeric value (xItemValue). Most of the time the lists are sorted in + * descending item value order. + * + * Lists are created already containing one list item. The value of this + * item is the maximum possible that can be stored, it is therefore always at + * the end of the list and acts as a marker. The list member pxHead always + * points to this marker - even though it is at the tail of the list. This + * is because the tail contains a wrap back pointer to the true head of + * the list. + * + * In addition to it's value, each list item contains a pointer to the next + * item in the list (pxNext), a pointer to the list it is in (pxContainer) + * and a pointer to back to the object that contains it. These later two + * pointers are included for efficiency of list manipulation. There is + * effectively a two way link between the object containing the list item and + * the list item itself. + * + * + * \page ListIntroduction List Implementation + * \ingroup FreeRTOSIntro + */ + + +#ifndef LIST_H +#define LIST_H + + +#ifdef VERIFAST + /* Reason for rewrite: + * VeriFast bug: + * Both `#ifdef INC_FREERTOS_H` and its negation `#ifdef INC_FREERTOS_H` + * evaluate to true. See minimal example `define_name`. + */ + #define INC_FREERTOS_H + /* Remember that this header is included indirectly `tasks.c` after it + * includes `FreeRTOS.h`. + */ + // TODO: Remove this work-around once VF has been fixed. +#endif /* VERIFAST */ + +#ifndef INC_FREERTOS_H + #error "FreeRTOS.h must be included before list.h" +#endif + +#ifdef VERIFAST + /* Reason for rewrite: + * VeriFast's normal and context-free preprocessor consume different + * numbers of tokens when expanding `PRIVILEGED_FUNCTION` in this file. + */ + #define PRIVILEGED_FUNCTION +#endif /* VERIFAST */ + +/* + * The list structure members are modified from within interrupts, and therefore + * by rights should be declared volatile. However, they are only modified in a + * functionally atomic way (within critical sections of with the scheduler + * suspended) and are either passed by reference into a function or indexed via + * a volatile variable. Therefore, in all use cases tested so far, the volatile + * qualifier can be omitted in order to provide a moderate performance + * improvement without adversely affecting functional behaviour. The assembly + * instructions generated by the IAR, ARM and GCC compilers when the respective + * compiler's options were set for maximum optimisation has been inspected and + * deemed to be as intended. That said, as compiler technology advances, and + * especially if aggressive cross module optimisation is used (a use case that + * has not been exercised to any great extend) then it is feasible that the + * volatile qualifier will be needed for correct optimisation. It is expected + * that a compiler removing essential code because, without the volatile + * qualifier on the list structure members and with aggressive cross module + * optimisation, the compiler deemed the code unnecessary will result in + * complete and obvious failure of the scheduler. If this is ever experienced + * then the volatile qualifier can be inserted in the relevant places within the + * list structures by simply defining configLIST_VOLATILE to volatile in + * FreeRTOSConfig.h (as per the example at the bottom of this comment block). + * If configLIST_VOLATILE is not defined then the preprocessor directives below + * will simply #define configLIST_VOLATILE away completely. + * + * To use volatile list structure members then add the following line to + * FreeRTOSConfig.h (without the quotes): + * "#define configLIST_VOLATILE volatile" + */ +#ifndef configLIST_VOLATILE + #define configLIST_VOLATILE +#endif /* configSUPPORT_CROSS_MODULE_OPTIMISATION */ + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +/* Macros that can be used to place known values within the list structures, + * then check that the known values do not get corrupted during the execution of + * the application. These may catch the list data structures being overwritten in + * memory. They will not catch data errors caused by incorrect configuration or + * use of FreeRTOS.*/ +#if ( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) + /* Define the macros to do nothing. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) + #define listTEST_LIST_INTEGRITY( pxList ) +#else /* if ( configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES == 0 ) */ + /* Define macros that add new members into the list structures. */ + #define listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue1; + #define listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE TickType_t xListItemIntegrityValue2; + #define listFIRST_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue1; + #define listSECOND_LIST_INTEGRITY_CHECK_VALUE TickType_t xListIntegrityValue2; + +/* Define macros that set the new structure members to known values. */ + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ) ( pxItem )->xListItemIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) ( pxList )->xListIntegrityValue1 = pdINTEGRITY_CHECK_VALUE + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) ( pxList )->xListIntegrityValue2 = pdINTEGRITY_CHECK_VALUE + +/* Define macros that will assert if one of the structure members does not + * contain its expected value. */ + #define listTEST_LIST_ITEM_INTEGRITY( pxItem ) configASSERT( ( ( pxItem )->xListItemIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxItem )->xListItemIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) + #define listTEST_LIST_INTEGRITY( pxList ) configASSERT( ( ( pxList )->xListIntegrityValue1 == pdINTEGRITY_CHECK_VALUE ) && ( ( pxList )->xListIntegrityValue2 == pdINTEGRITY_CHECK_VALUE ) ) +#endif /* configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES */ + + +/* + * Definition of the only type of object that a list can contain. + */ +struct xLIST; +struct xLIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; /*< The value being listed. In most cases this is used to sort the list in descending order. */ + struct xLIST_ITEM * configLIST_VOLATILE pxNext; /*< Pointer to the next ListItem_t in the list. */ + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; /*< Pointer to the previous ListItem_t in the list. */ + void * pvOwner; /*< Pointer to the object (normally a TCB) that contains the list item. There is therefore a two way link between the object containing the list item and the list item itself. */ + struct xLIST * configLIST_VOLATILE pxContainer; /*< Pointer to the list in which this list item is placed (if any). */ + listSECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +}; +typedef struct xLIST_ITEM ListItem_t; /* For some reason lint wants this as two separate definitions. */ + +struct xMINI_LIST_ITEM +{ + listFIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + configLIST_VOLATILE TickType_t xItemValue; + struct xLIST_ITEM * configLIST_VOLATILE pxNext; + struct xLIST_ITEM * configLIST_VOLATILE pxPrevious; +}; +typedef struct xMINI_LIST_ITEM MiniListItem_t; + +/* + * Definition of the type of queue used by the scheduler. + */ +typedef struct xLIST +{ + listFIRST_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + volatile UBaseType_t uxNumberOfItems; + ListItem_t * configLIST_VOLATILE pxIndex; /*< Used to walk through the list. Points to the last item returned by a call to listGET_OWNER_OF_NEXT_ENTRY (). */ + #ifdef VERIFAST + /* Reason for rewrite: + * This change allows us to reuse the existing single-core list proofs, + * for which an identical rewrite for assumed. + */ + ListItem_t xListEnd; + #else + MiniListItem_t xListEnd; /*< List item that contains the maximum possible item value meaning it is always at the end of the list and is therefore used as a marker. */ + #endif /* VERIFAST */ + listSECOND_LIST_INTEGRITY_CHECK_VALUE /*< Set to a known value if configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ +} List_t; + +/* + * Access macro to set the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listSET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_OWNER( pxListItem, pxOwner ) ( ( pxListItem )->pvOwner = ( void * ) ( pxOwner ) ) + +/* + * Access macro to get the owner of a list item. The owner of a list item + * is the object (usually a TCB) that contains the list item. + * + * \page listGET_LIST_ITEM_OWNER listSET_LIST_ITEM_OWNER + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_OWNER( pxListItem ) ( ( pxListItem )->pvOwner ) + +/* + * Access macro to set the value of the list item. In most cases the value is + * used to sort the list in descending order. + * + * \page listSET_LIST_ITEM_VALUE listSET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listSET_LIST_ITEM_VALUE( pxListItem, xValue ) ( ( pxListItem )->xItemValue = ( xValue ) ) + +/* + * Access macro to retrieve the value of the list item. The value can + * represent anything - for example the priority of a task, or the time at + * which a task should be unblocked. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_LIST_ITEM_VALUE( pxListItem ) ( ( pxListItem )->xItemValue ) + +/* + * Access macro to retrieve the value of the list item at the head of a given + * list. + * + * \page listGET_LIST_ITEM_VALUE listGET_LIST_ITEM_VALUE + * \ingroup LinkedList + */ +#define listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext->xItemValue ) + +/* + * Return the list item at the head of the list. + * + * \page listGET_HEAD_ENTRY listGET_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_HEAD_ENTRY( pxList ) ( ( ( pxList )->xListEnd ).pxNext ) + +/* + * Return the next list item. + * + * \page listGET_NEXT listGET_NEXT + * \ingroup LinkedList + */ +#define listGET_NEXT( pxListItem ) ( ( pxListItem )->pxNext ) + +/* + * Return the list item that marks the end of the list + * + * \page listGET_END_MARKER listGET_END_MARKER + * \ingroup LinkedList + */ +#define listGET_END_MARKER( pxList ) ( ( ListItem_t const * ) ( &( ( pxList )->xListEnd ) ) ) + +/* + * Access macro to determine if a list contains any items. The macro will + * only have the value true if the list is empty. + * + * \page listLIST_IS_EMPTY listLIST_IS_EMPTY + * \ingroup LinkedList + */ +#define listLIST_IS_EMPTY( pxList ) ( ( ( pxList )->uxNumberOfItems == ( UBaseType_t ) 0 ) ? pdTRUE : pdFALSE ) + +/* + * Access macro to return the number of items in the list. + */ +#define listCURRENT_LIST_LENGTH( pxList ) ( ( pxList )->uxNumberOfItems ) + +/* + * Access function to obtain the owner of the next entry in a list. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list + * and returns that entry's pxOwner parameter. Using multiple calls to this + * function it is therefore possible to move through every item contained in + * a list. + * + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxTCB pxTCB is set to the address of the owner of the next list item. + * @param pxList The list from which the next item owner is to be returned. + * + * \page listGET_OWNER_OF_NEXT_ENTRY listGET_OWNER_OF_NEXT_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_NEXT_ENTRY( pxTCB, pxList ) \ + { \ + List_t * const pxConstList = ( pxList ); \ + /* Increment the index to the next item and return the item, ensuring */ \ + /* we don't return the marker used at the end of the list. */ \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + if( ( void * ) ( pxConstList )->pxIndex == ( void * ) &( ( pxConstList )->xListEnd ) ) \ + { \ + ( pxConstList )->pxIndex = ( pxConstList )->pxIndex->pxNext; \ + } \ + ( pxTCB ) = ( pxConstList )->pxIndex->pvOwner; \ + } + + + +/* + * Access function to obtain the owner of the first entry in a list. Lists + * are normally sorted in ascending item value order. + * + * This function returns the pxOwner member of the first item in the list. + * The pxOwner parameter of a list item is a pointer to the object that owns + * the list item. In the scheduler this is normally a task control block. + * The pxOwner parameter effectively creates a two way link between the list + * item and its owner. + * + * @param pxList The list from which the owner of the head item is to be + * returned. + * + * \page listGET_OWNER_OF_HEAD_ENTRY listGET_OWNER_OF_HEAD_ENTRY + * \ingroup LinkedList + */ +#define listGET_OWNER_OF_HEAD_ENTRY( pxList ) ( ( &( ( pxList )->xListEnd ) )->pxNext->pvOwner ) + +/* + * Check to see if a list item is within a list. The list item maintains a + * "container" pointer that points to the list it is in. All this macro does + * is check to see if the container and the list match. + * + * @param pxList The list we want to know if the list item is within. + * @param pxListItem The list item we want to know if is in the list. + * @return pdTRUE if the list item is in the list, otherwise pdFALSE. + */ +#define listIS_CONTAINED_WITHIN( pxList, pxListItem ) ( ( ( pxListItem )->pxContainer == ( pxList ) ) ? ( pdTRUE ) : ( pdFALSE ) ) + +/* + * Return the list a list item is contained within (referenced from). + * + * @param pxListItem The list item being queried. + * @return A pointer to the List_t object that references the pxListItem + */ +#define listLIST_ITEM_CONTAINER( pxListItem ) ( ( pxListItem )->pxContainer ) + +/* + * This provides a crude means of knowing if a list has been initialised, as + * pxList->xListEnd.xItemValue is set to portMAX_DELAY by the vListInitialise() + * function. + */ +#define listLIST_IS_INITIALISED( pxList ) ( ( pxList )->xListEnd.xItemValue == portMAX_DELAY ) + +/* + * Must be called before a list is used! This initialises all the members + * of the list structure and inserts the xListEnd item into the list as a + * marker to the back of the list. + * + * @param pxList Pointer to the list being initialised. + * + * \page vListInitialise vListInitialise + * \ingroup LinkedList + */ +void vListInitialise( List_t * const pxList ) PRIVILEGED_FUNCTION; + +/* + * Must be called before a list item is used. This sets the list container to + * null so the item does not think that it is already contained in a list. + * + * @param pxItem Pointer to the list item being initialised. + * + * \page vListInitialiseItem vListInitialiseItem + * \ingroup LinkedList + */ +void vListInitialiseItem( ListItem_t * const pxItem ) PRIVILEGED_FUNCTION; +//@ requires pxItem->pxContainer |-> _; +//@ ensures pxItem->pxContainer |-> 0; + +/* + * Insert a list item into a list. The item will be inserted into the list in + * a position determined by its item value (descending item value order). + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The item that is to be placed in the list. + * + * \page vListInsert vListInsert + * \ingroup LinkedList + */ +void vListInsert( List_t * const pxList, + ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Insert a list item into a list. The item will be inserted in a position + * such that it will be the last item within the list returned by multiple + * calls to listGET_OWNER_OF_NEXT_ENTRY. + * + * The list member pxIndex is used to walk through a list. Calling + * listGET_OWNER_OF_NEXT_ENTRY increments pxIndex to the next item in the list. + * Placing an item in a list using vListInsertEnd effectively places the item + * in the list position pointed to by pxIndex. This means that every other + * item within the list will be returned by listGET_OWNER_OF_NEXT_ENTRY before + * the pxIndex parameter again points to the item being inserted. + * + * @param pxList The list into which the item is to be inserted. + * + * @param pxNewListItem The list item to be inserted into the list. + * + * \page vListInsertEnd vListInsertEnd + * \ingroup LinkedList + */ +void vListInsertEnd( List_t * const pxList, + ListItem_t * const pxNewListItem ) PRIVILEGED_FUNCTION; + +/* + * Remove an item from a list. The list item has a pointer to the list that + * it is in, so only the list item need be passed into the function. + * + * @param uxListRemove The item to be removed. The item will remove itself from + * the list pointed to by it's pxContainer parameter. + * + * @return The number of items that remain in the list after the list item has + * been removed. + * + * \page uxListRemove uxListRemove + * \ingroup LinkedList + */ +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) PRIVILEGED_FUNCTION; + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + +#endif /* ifndef LIST_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/include/portable.h b/Test/VeriFast/tasks/vTaskSwitchContext/include/portable.h new file mode 100644 index 00000000000..d9bb2e72089 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/include/portable.h @@ -0,0 +1,245 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/*----------------------------------------------------------- +* Portable layer API. Each function must be defined for each port. +*----------------------------------------------------------*/ + +#ifndef PORTABLE_H +#define PORTABLE_H + +/* Each FreeRTOS port has a unique portmacro.h header file. Originally a + * pre-processor definition was used to ensure the pre-processor found the correct + * portmacro.h file for the port being used. That scheme was deprecated in favour + * of setting the compiler's include path such that it found the correct + * portmacro.h file - removing the need for the constant and allowing the + * portmacro.h file to be located anywhere in relation to the port being used. + * Purely for reasons of backward compatibility the old method is still valid, but + * to make it clear that new projects should not use it, support for the port + * specific constants has been moved into the deprecated_definitions.h header + * file. */ +#include "deprecated_definitions.h" + +/* If portENTER_CRITICAL is not defined then including deprecated_definitions.h + * did not result in a portmacro.h header file being included - and it should be + * included here. In this case the path to the correct portmacro.h header file + * must be set in the compiler's include path. */ +#ifndef portENTER_CRITICAL + #include "portmacro.h" +#endif + +#if portBYTE_ALIGNMENT == 32 + #define portBYTE_ALIGNMENT_MASK ( 0x001f ) +#endif + +#if portBYTE_ALIGNMENT == 16 + #define portBYTE_ALIGNMENT_MASK ( 0x000f ) +#endif + +#if portBYTE_ALIGNMENT == 8 + #define portBYTE_ALIGNMENT_MASK ( 0x0007 ) +#endif + +#if portBYTE_ALIGNMENT == 4 + #define portBYTE_ALIGNMENT_MASK ( 0x0003 ) +#endif + +#if portBYTE_ALIGNMENT == 2 + #define portBYTE_ALIGNMENT_MASK ( 0x0001 ) +#endif + +#if portBYTE_ALIGNMENT == 1 + #define portBYTE_ALIGNMENT_MASK ( 0x0000 ) +#endif + +#ifndef portBYTE_ALIGNMENT_MASK + #error "Invalid portBYTE_ALIGNMENT definition" +#endif + +#ifndef portNUM_CONFIGURABLE_REGIONS + #define portNUM_CONFIGURABLE_REGIONS 1 +#endif + +#ifndef portHAS_STACK_OVERFLOW_CHECKING + #define portHAS_STACK_OVERFLOW_CHECKING 0 +#endif + +#ifndef portARCH_NAME + #define portARCH_NAME NULL +#endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +#include "mpu_wrappers.h" + +/* + * Setup the stack of a new task so it is ready to be placed under the + * scheduler control. The registers have to be placed on the stack in + * the order that the port expects to find them. + * + */ +#if ( portUSING_MPU_WRAPPERS == 1 ) + #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + TaskFunction_t pxCode, + void * pvParameters, + BaseType_t xRunPrivileged ) PRIVILEGED_FUNCTION; + #endif +#else /* if ( portUSING_MPU_WRAPPERS == 1 ) */ + #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + StackType_t * pxEndOfStack, + TaskFunction_t pxCode, + void * pvParameters ) PRIVILEGED_FUNCTION; + #else + StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, + TaskFunction_t pxCode, + void * pvParameters ) PRIVILEGED_FUNCTION; + ///@ requires true; + ///@ ensures true; + #endif +#endif /* if ( portUSING_MPU_WRAPPERS == 1 ) */ + +/* Used by heap_5.c to define the start address and size of each memory region + * that together comprise the total FreeRTOS heap space. */ +typedef struct HeapRegion +{ + uint8_t * pucStartAddress; + size_t xSizeInBytes; +} HeapRegion_t; + +/* Used to pass information about the heap out of vPortGetHeapStats(). */ +typedef struct xHeapStats +{ + size_t xAvailableHeapSpaceInBytes; /* The total heap size currently available - this is the sum of all the free blocks, not the largest block that can be allocated. */ + size_t xSizeOfLargestFreeBlockInBytes; /* The maximum size, in bytes, of all the free blocks within the heap at the time vPortGetHeapStats() is called. */ + size_t xSizeOfSmallestFreeBlockInBytes; /* The minimum size, in bytes, of all the free blocks within the heap at the time vPortGetHeapStats() is called. */ + size_t xNumberOfFreeBlocks; /* The number of free memory blocks within the heap at the time vPortGetHeapStats() is called. */ + size_t xMinimumEverFreeBytesRemaining; /* The minimum amount of total free memory (sum of all free blocks) there has been in the heap since the system booted. */ + size_t xNumberOfSuccessfulAllocations; /* The number of calls to pvPortMalloc() that have returned a valid memory block. */ + size_t xNumberOfSuccessfulFrees; /* The number of calls to vPortFree() that has successfully freed a block of memory. */ +} HeapStats_t; + +/* + * Used to define multiple heap regions for use by heap_5.c. This function + * must be called before any calls to pvPortMalloc() - not creating a task, + * queue, semaphore, mutex, software timer, event group, etc. will result in + * pvPortMalloc being called. + * + * pxHeapRegions passes in an array of HeapRegion_t structures - each of which + * defines a region of memory that can be used as the heap. The array is + * terminated by a HeapRegions_t structure that has a size of 0. The region + * with the lowest start address must appear first in the array. + */ +void vPortDefineHeapRegions( const HeapRegion_t * const pxHeapRegions ) PRIVILEGED_FUNCTION; + +/* + * Returns a HeapStats_t structure filled with information about the current + * heap state. + */ +void vPortGetHeapStats( HeapStats_t * pxHeapStats ); + +#ifdef VERIFAST + /* Reason for rewrite: + * VeriFast treats the `malloc` and `free` functions specially, + * in a particular built-in way that cannot be axiomatized within + * VeriFast's specification language. + * + * When `malloc( sizeof(struct S) )` is called for a user defined + * struct `S`, VeriFast instantiates the corresponding + * `malloc_block_S(...)` predicate as well as points-to chunks + * for its fields. + * Reversely, calling `free` cleans up all the predicates instantiated + * by `malloc`. + */ + #define pvPortMalloc malloc + #define vPortFree(ptr) free( (void*) ptr) +#else + /* + * Map to the memory management routines required for the port. + */ + void * pvPortMalloc( size_t xSize ) PRIVILEGED_FUNCTION; + void vPortFree( void * pv ) PRIVILEGED_FUNCTION; +#endif /* VERIFAST */ + + +void vPortInitialiseBlocks( void ) PRIVILEGED_FUNCTION; +size_t xPortGetFreeHeapSize( void ) PRIVILEGED_FUNCTION; +size_t xPortGetMinimumEverFreeHeapSize( void ) PRIVILEGED_FUNCTION; + +#if( configSTACK_ALLOCATION_FROM_SEPARATE_HEAP == 1 ) + void *pvPortMallocStack( size_t xSize ) PRIVILEGED_FUNCTION; + void vPortFreeStack( void *pv ) PRIVILEGED_FUNCTION; +#else + #define pvPortMallocStack pvPortMalloc + #define vPortFreeStack vPortFree +#endif + +/* + * Setup the hardware ready for the scheduler to take control. This generally + * sets up a tick interrupt and sets timers for the correct tick frequency. + */ +BaseType_t xPortStartScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * Undo any hardware/ISR setup that was performed by xPortStartScheduler() so + * the hardware is left in its original condition after the scheduler stops + * executing. + */ +void vPortEndScheduler( void ) PRIVILEGED_FUNCTION; + +/* + * The structures and methods of manipulating the MPU are contained within the + * port layer. + * + * Fills the xMPUSettings structure with the memory region information + * contained in xRegions. + */ +#if ( portUSING_MPU_WRAPPERS == 1 ) + struct xMEMORY_REGION; + void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings, + const struct xMEMORY_REGION * const xRegions, + StackType_t * pxBottomOfStack, + uint32_t ulStackDepth ) PRIVILEGED_FUNCTION; +#endif + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ + +#endif /* PORTABLE_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/include/stack_macros.h b/Test/VeriFast/tasks/vTaskSwitchContext/include/stack_macros.h new file mode 100644 index 00000000000..853aef44062 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/include/stack_macros.h @@ -0,0 +1,206 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef STACK_MACROS_H +#define STACK_MACROS_H + +/* + * Call the stack overflow hook function if the stack of the task being swapped + * out is currently overflowed, or looks like it might have overflowed in the + * past. + * + * Setting configCHECK_FOR_STACK_OVERFLOW to 1 will cause the macro to check + * the current stack state only - comparing the current top of stack value to + * the stack limit. Setting configCHECK_FOR_STACK_OVERFLOW to greater than 1 + * will also cause the last few stack bytes to be checked to ensure the value + * to which the bytes were set when the task was created have not been + * overwritten. Note this second test does not guarantee that an overflowed + * stack will always be recognised. + */ + +/*-----------------------------------------------------------*/ + +/* + * portSTACK_LIMIT_PADDING is a number of extra words to consider to be in + * use on the stack. + */ +#ifndef portSTACK_LIMIT_PADDING + #define portSTACK_LIMIT_PADDING 0 +#endif + +#if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH < 0 ) ) + +/* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack <= pxCurrentTCB->pxStack + portSTACK_LIMIT_PADDING ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if ( ( configCHECK_FOR_STACK_OVERFLOW == 1 ) && ( portSTACK_GROWTH > 0 ) ) + +/* Only the current stack state is to be checked. */ + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + \ + /* Is the currently saved stack pointer within the stack limit? */ \ + if( pxCurrentTCB->pxTopOfStack >= pxCurrentTCB->pxEndOfStack - portSTACK_LIMIT_PADDING ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* configCHECK_FOR_STACK_OVERFLOW == 1 */ +/*-----------------------------------------------------------*/ + +#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH < 0 ) ) + + #ifdef VERIFAST + /* Reason for rewrite: + * VeriFast complains about unspecified evaluation order of + * - `pxCurrentTCB->pxStack` + * - `vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName );` + * + */ + #define taskCHECK_FOR_STACK_OVERFLOW() VF__taskCHECK_FOR_STACK_OVERFLOW() + + void VF__taskCHECK_FOR_STACK_OVERFLOW() + /*@ requires TCB_stack_p(?gCurrentTCB, ?ulFreeBytesOnStack) &*& + TCB_criticalNesting_p(gCurrentTCB, ?uxCriticalNesting) &*& + // chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()` + interruptState_p(coreID_f(), ?state) &*& + interruptsDisabled_f(state) == true &*& + pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB); + @*/ + /*@ ensures TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack) &*& + TCB_criticalNesting_p(gCurrentTCB, uxCriticalNesting) &*& + // chunks required by `pxCurrentTCB` aka `xTaskGetCurrentTaskHandle()` + interruptState_p(coreID_f(), state) &*& + interruptsDisabled_f(state) == true &*& + pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB); \ + @*/ \ + { \ + /*@ open TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \ + /*@ assert( stack_p(?pxStack, ?ulStackDepth, ?pxTopOfStack, \ + ?ulFreeBytes, ?ulUsedCells, ?ulUnalignedBytes) ); \ + @*/ \ + /*@ open stack_p(_, _, _, _, _, _); @*/ \ + /* The detour below allows us to skip proving that `ulFreeBytes` \ + * is a multiple of `sizeof(StackType_t)`. \ + */ \ + /*@ integers__to_chars(pxTopOfStack+1); @*/ \ + /*@ chars_join((char*) pxStack); @*/ \ + /*@ chars_to_integers_(pxStack, sizeof(StackType_t), false, 4); @*/ \ + TCB_t* tcb0 = pxCurrentTCB; \ + const uint32_t * const pulStack = ( uint32_t * ) tcb0->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + /*@ bool gOverflow = false; @*/ \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + /*@ gOverflow = true; @*/ \ + /*@ integers__to_chars(pxStack); @*/ \ + /*@ chars_join((char*) pxStack); @*/ \ + /*@ chars_split((char*) pxStack, ulFreeBytesOnStack); @*/ \ + /*@ close stack_p(pxStack, ulStackDepth, pxTopOfStack, \ + ulFreeBytes, ulUsedCells, ulUnalignedBytes); \ + @*/ \ + /*@ close TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack); @*/ \ + TCB_t* tcb1 = pxCurrentTCB; \ + TCB_t* tcb2 = pxCurrentTCB; \ + vApplicationStackOverflowHook( ( TaskHandle_t ) tcb1, tcb2->pcTaskName ); \ + } \ + /*@ \ + if(!gOverflow) { \ + integers__to_chars(pxStack); \ + chars_join((char*) pxStack); \ + chars_split((char*) pxStack, ulFreeBytesOnStack); \ + close stack_p(pxStack, ulStackDepth, pxTopOfStack, \ + ulFreeBytes, ulUsedCells, ulUnalignedBytes); \ + close TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack); \ + } \ + @*/ \ + } + #else + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + const uint32_t * const pulStack = ( uint32_t * ) pxCurrentTCB->pxStack; \ + const uint32_t ulCheckValue = ( uint32_t ) 0xa5a5a5a5; \ + \ + if( ( pulStack[ 0 ] != ulCheckValue ) || \ + ( pulStack[ 1 ] != ulCheckValue ) || \ + ( pulStack[ 2 ] != ulCheckValue ) || \ + ( pulStack[ 3 ] != ulCheckValue ) ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + #endif /* VERIFAST */ + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) && ( portSTACK_GROWTH > 0 ) ) + + #define taskCHECK_FOR_STACK_OVERFLOW() \ + { \ + int8_t * pcEndOfStack = ( int8_t * ) pxCurrentTCB->pxEndOfStack; \ + static const uint8_t ucExpectedStackBytes[] = { tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, \ + tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE, tskSTACK_FILL_BYTE }; \ + \ + \ + pcEndOfStack -= sizeof( ucExpectedStackBytes ); \ + \ + /* Has the extremity of the task stack ever been written over? */ \ + if( memcmp( ( void * ) pcEndOfStack, ( void * ) ucExpectedStackBytes, sizeof( ucExpectedStackBytes ) ) != 0 ) \ + { \ + vApplicationStackOverflowHook( ( TaskHandle_t ) pxCurrentTCB, pxCurrentTCB->pcTaskName ); \ + } \ + } + +#endif /* #if( configCHECK_FOR_STACK_OVERFLOW > 1 ) */ +/*-----------------------------------------------------------*/ + +/* Remove stack overflow macro if not being used. */ +#ifndef taskCHECK_FOR_STACK_OVERFLOW + #define taskCHECK_FOR_STACK_OVERFLOW() +#endif + + + +#endif /* STACK_MACROS_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/include/task.h b/Test/VeriFast/tasks/vTaskSwitchContext/include/task.h new file mode 100644 index 00000000000..87ac87e1307 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/include/task.h @@ -0,0 +1,3318 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + +#ifndef INC_TASK_H +#define INC_TASK_H + +#ifdef VERIFAST + /* Reason for rewrite: + * VeriFast bug: + * Both `#ifdef INC_FREERTOS_H` and its negation `#ifdef INC_FREERTOS_H` + * evaluate to true. See minimal example `define_name`. + */ + #define INC_FREERTOS_H + /* Remember that this header is included by `tasks.c` after it includes + * `FreeRTOS.h`. + */ + // TODO: Remove this work-around once VF has been fixed. +#endif /* VERIFAST */ + + +/* Remark: + * Note that the following VF section renders the previous one obsolete. + * However, we keep the above as a reminder until the corresponding bug + * has been fixed. + */ +#ifdef VERIFAST + /* Reason for rewrite: + * Even though in the current verification setup, `FreeRTOS.h` is always + * included before this file is processed, VeriFast does not consider this + * context when processing this file. VeriFast forbids macro expansions to + * depend on a potentially variable context, e.g, `configSTACK_DEPTH_TYPE` + * which expands to 'uint16_t' if and only if `FreeRTOS.h` has been + * included. + */ + #include "FreeRTOS.h" +#endif /* VERIFAST */ + +#ifndef INC_FREERTOS_H + #error "include FreeRTOS.h must appear in source files before include task.h" +#endif + +#include "list.h" + +/* *INDENT-OFF* */ +#ifdef __cplusplus + extern "C" { +#endif +/* *INDENT-ON* */ + +/*----------------------------------------------------------- +* MACROS AND DEFINITIONS +*----------------------------------------------------------*/ + +#define tskKERNEL_VERSION_NUMBER "V10.4.3" +#define tskKERNEL_VERSION_MAJOR 10 +#define tskKERNEL_VERSION_MINOR 4 +#define tskKERNEL_VERSION_BUILD 3 + +/* MPU region parameters passed in ulParameters + * of MemoryRegion_t struct. */ +#define tskMPU_REGION_READ_ONLY ( 1UL << 0UL ) +#define tskMPU_REGION_READ_WRITE ( 1UL << 1UL ) +#define tskMPU_REGION_EXECUTE_NEVER ( 1UL << 2UL ) +#define tskMPU_REGION_NORMAL_MEMORY ( 1UL << 3UL ) +#define tskMPU_REGION_DEVICE_MEMORY ( 1UL << 4UL ) + +/* The direct to task notification feature used to have only a single notification + * per task. Now there is an array of notifications per task that is dimensioned by + * configTASK_NOTIFICATION_ARRAY_ENTRIES. For backward compatibility, any use of the + * original direct to task notification defaults to using the first index in the + * array. */ +#define tskDEFAULT_INDEX_TO_NOTIFY ( 0 ) + +/** + * task. h + * + * Type by which tasks are referenced. For example, a call to xTaskCreate + * returns (via a pointer parameter) an TaskHandle_t variable that can then + * be used as a parameter to vTaskDelete to delete the task. + * + * \defgroup TaskHandle_t TaskHandle_t + * \ingroup Tasks + */ +struct tskTaskControlBlock; /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +typedef struct tskTaskControlBlock * TaskHandle_t; + +/* + * Defines the prototype to which the application task hook function must + * conform. + */ +typedef BaseType_t (* TaskHookFunction_t)( void * ); + +/* Task states returned by eTaskGetState. */ +typedef enum +{ + eRunning = 0, /* A task is querying the state of itself, so must be running. */ + eReady, /* The task being queried is in a ready or pending ready list. */ + eBlocked, /* The task being queried is in the Blocked state. */ + eSuspended, /* The task being queried is in the Suspended state, or is in the Blocked state with an infinite time out. */ + eDeleted, /* The task being queried has been deleted, but its TCB has not yet been freed. */ + eInvalid /* Used as an 'invalid state' value. */ +} eTaskState; + +/* Actions that can be performed when vTaskNotify() is called. */ +typedef enum +{ + eNoAction = 0, /* Notify the task without updating its notify value. */ + eSetBits, /* Set bits in the task's notification value. */ + eIncrement, /* Increment the task's notification value. */ + eSetValueWithOverwrite, /* Set the task's notification value to a specific value even if the previous value has not yet been read by the task. */ + eSetValueWithoutOverwrite /* Set the task's notification value if the previous value has been read by the task. */ +} eNotifyAction; + +/* + * Used internally only. + */ +typedef struct xTIME_OUT +{ + BaseType_t xOverflowCount; + TickType_t xTimeOnEntering; +} TimeOut_t; + +/* + * Defines the memory ranges allocated to the task when an MPU is used. + */ +typedef struct xMEMORY_REGION +{ + void * pvBaseAddress; + uint32_t ulLengthInBytes; + uint32_t ulParameters; +} MemoryRegion_t; + +/* + * Parameters required to create an MPU protected task. + */ +typedef struct xTASK_PARAMETERS +{ + TaskFunction_t pvTaskCode; + const char * pcName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + configSTACK_DEPTH_TYPE usStackDepth; + void * pvParameters; + UBaseType_t uxPriority; + StackType_t * puxStackBuffer; + MemoryRegion_t xRegions[ portNUM_CONFIGURABLE_REGIONS ]; + #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + StaticTask_t * const pxTaskBuffer; + #endif +} TaskParameters_t; + +/* Used with the uxTaskGetSystemState() function to return the state of each task + * in the system. */ +typedef struct xTASK_STATUS +{ + TaskHandle_t xHandle; /* The handle of the task to which the rest of the information in the structure relates. */ + const char * pcTaskName; /* A pointer to the task's name. This value will be invalid if the task was deleted since the structure was populated! */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + UBaseType_t xTaskNumber; /* A number unique to the task. */ + eTaskState eCurrentState; /* The state in which the task existed when the structure was populated. */ + UBaseType_t uxCurrentPriority; /* The priority at which the task was running (may be inherited) when the structure was populated. */ + UBaseType_t uxBasePriority; /* The priority to which the task will return if the task's current priority has been inherited to avoid unbounded priority inversion when obtaining a mutex. Only valid if configUSE_MUTEXES is defined as 1 in FreeRTOSConfig.h. */ + uint32_t ulRunTimeCounter; /* The total run time allocated to the task so far, as defined by the run time stats clock. See https://www.FreeRTOS.org/rtos-run-time-stats.html. Only valid when configGENERATE_RUN_TIME_STATS is defined as 1 in FreeRTOSConfig.h. */ + StackType_t * pxStackBase; /* Points to the lowest address of the task's stack area. */ + configSTACK_DEPTH_TYPE usStackHighWaterMark; /* The minimum amount of stack space that has remained for the task since the task was created. The closer this value is to zero the closer the task has come to overflowing its stack. */ +} TaskStatus_t; + +/* Possible return values for eTaskConfirmSleepModeStatus(). */ +typedef enum +{ + eAbortSleep = 0, /* A task has been made ready or a context switch pended since portSUPPRESS_TICKS_AND_SLEEP() was called - abort entering a sleep mode. */ + eStandardSleep, /* Enter a sleep mode that will not last any longer than the expected idle time. */ + eNoTasksWaitingTimeout /* No tasks are waiting for a timeout so it is safe to enter a sleep mode that can only be exited by an external interrupt. */ +} eSleepModeStatus; + +/** + * Defines the priority used by the idle task. This must not be modified. + * + * \ingroup TaskUtils + */ +#define tskIDLE_PRIORITY ( ( UBaseType_t ) 0U ) + +/** + * Defines affinity to all available cores. + * + */ +#define tskNO_AFFINITY ( ( UBaseType_t ) -1U ) + + + +/** + * task. h + * + * Macro for forcing a context switch. + * + * \defgroup taskYIELD taskYIELD + * \ingroup SchedulerControl + */ +#define taskYIELD() portYIELD() + +/** + * task. h + * + * Macro to mark the start of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskENTER_CRITICAL taskENTER_CRITICAL + * \ingroup SchedulerControl + */ +#define taskENTER_CRITICAL() portENTER_CRITICAL() +#define taskENTER_CRITICAL_FROM_ISR() portSET_INTERRUPT_MASK_FROM_ISR() + +/** + * task. h + * + * Macro to mark the end of a critical code region. Preemptive context + * switches cannot occur when in a critical region. + * + * NOTE: This may alter the stack (depending on the portable implementation) + * so must be used with care! + * + * \defgroup taskEXIT_CRITICAL taskEXIT_CRITICAL + * \ingroup SchedulerControl + */ +#define taskEXIT_CRITICAL() portEXIT_CRITICAL() +#define taskEXIT_CRITICAL_FROM_ISR( x ) portCLEAR_INTERRUPT_MASK_FROM_ISR( x ) + +/** + * task. h + * + * Macro to disable all maskable interrupts. + * This also returns what the interrupt state was + * upon being called. This state may subsequently + * be passed to taskRESTORE_INTERRUPTS(). + * + * \defgroup taskDISABLE_INTERRUPTS taskDISABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskDISABLE_INTERRUPTS() portDISABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to enable microcontroller interrupts. + * + * \defgroup taskENABLE_INTERRUPTS taskENABLE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskENABLE_INTERRUPTS() portENABLE_INTERRUPTS() + +/** + * task. h + * + * Macro to restore microcontroller interrupts to + * a previous state. + * + * \defgroup taskRESTORE_INTERRUPTS taskRESTORE_INTERRUPTS + * \ingroup SchedulerControl + */ +#define taskRESTORE_INTERRUPTS(ulState) portRESTORE_INTERRUPTS(ulState) + +/** + * task. h + * + * Macro that determines if it is being called from within an ISR + * or a task. Returns non-zero if it is in an ISR. + * + * \defgroup taskCHECK_IF_IN_ISR taskCHECK_IF_IN_ISR + * \ingroup SchedulerControl + */ +#define taskCHECK_IF_IN_ISR() portCHECK_IF_IN_ISR() + +/* Definitions returned by xTaskGetSchedulerState(). taskSCHEDULER_SUSPENDED is + * 0 to generate more optimal code when configASSERT() is defined as the constant + * is used in assert() statements. */ +#define taskSCHEDULER_SUSPENDED ( ( BaseType_t ) 0 ) +#define taskSCHEDULER_NOT_STARTED ( ( BaseType_t ) 1 ) +#define taskSCHEDULER_RUNNING ( ( BaseType_t ) 2 ) + +/* Check if core value is valid */ +#define taskVALID_CORE_ID( xCoreID ) ( ( BaseType_t ) ( ( 0 <= xCoreID ) && ( xCoreID < configNUM_CORES ) ) ) + +/*----------------------------------------------------------- +* TASK CREATION API +*----------------------------------------------------------*/ + +/** + * task. h + *
+ * BaseType_t xTaskCreate(
+ *                            TaskFunction_t pxTaskCode,
+ *                            const char *pcName,
+ *                            configSTACK_DEPTH_TYPE usStackDepth,
+ *                            void *pvParameters,
+ *                            UBaseType_t uxPriority,
+ *                            TaskHandle_t *pxCreatedTask
+ *                        );
+ * 
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * https://www.FreeRTOS.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * See xTaskCreateStatic() for a version that does not use any dynamic memory + * allocation. + * + * xTaskCreate() can only be used to create a task that has unrestricted + * access to the entire microcontroller memory map. Systems that include MPU + * support can alternatively create an MPU constrained task using + * xTaskCreateRestricted(). + * + * @param pxTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. Max length defined by configMAX_TASK_NAME_LEN - default + * is 16. + * + * @param usStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 16 bits wide and usStackDepth is defined as 100, 200 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task should run. Systems that + * include MPU support can optionally create tasks in a privileged (system) + * mode by setting bit portPRIVILEGE_BIT of the priority parameter. For + * example, to create a privileged task at priority 2 the uxPriority parameter + * should be set to ( 2 | portPRIVILEGE_BIT ). + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: + *
+ * // Task to be created.
+ * void vTaskCode( void * pvParameters )
+ * {
+ *   for( ;; )
+ *   {
+ *       // Task code goes here.
+ *   }
+ * }
+ *
+ * // Function that creates a task.
+ * void vOtherFunction( void )
+ * {
+ * static uint8_t ucParameterToPass;
+ * TaskHandle_t xHandle = NULL;
+ *
+ *   // Create the task, storing the handle.  Note that the passed parameter ucParameterToPass
+ *   // must exist for the lifetime of the task, so in this case is declared static.  If it was just an
+ *   // an automatic stack variable it might no longer exist, or at least have been corrupted, by the time
+ *   // the new task attempts to access it.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, &ucParameterToPass, tskIDLE_PRIORITY, &xHandle );
+ *   configASSERT( xHandle );
+ *
+ *   // Use the handle to delete the task.
+ *   if( xHandle != NULL )
+ *   {
+ *      vTaskDelete( xHandle );
+ *   }
+ * }
+ * 
+ * \defgroup xTaskCreate xTaskCreate + * \ingroup Tasks + */ +#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +#if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+* TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
+ *                               const char *pcName,
+ *                               uint32_t ulStackDepth,
+ *                               void *pvParameters,
+ *                               UBaseType_t uxPriority,
+ *                               StackType_t *puxStackBuffer,
+ *                               StaticTask_t *pxTaskBuffer );
+ * 
+ * + * Create a new task and add it to the list of tasks that are ready to run. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreate() then both blocks of memory are automatically dynamically + * allocated inside the xTaskCreate() function. (see + * https://www.FreeRTOS.org/a00111.html). If a task is created using + * xTaskCreateStatic() then the application writer must provide the required + * memory. xTaskCreateStatic() therefore allows a task to be created without + * using any dynamic memory allocation. + * + * @param pxTaskCode Pointer to the task entry function. Tasks + * must be implemented to never return (i.e. continuous loop). + * + * @param pcName A descriptive name for the task. This is mainly used to + * facilitate debugging. The maximum length of the string is defined by + * configMAX_TASK_NAME_LEN in FreeRTOSConfig.h. + * + * @param ulStackDepth The size of the task stack specified as the number of + * variables the stack can hold - not the number of bytes. For example, if + * the stack is 32-bits wide and ulStackDepth is defined as 100 then 400 bytes + * will be allocated for stack storage. + * + * @param pvParameters Pointer that will be used as the parameter for the task + * being created. + * + * @param uxPriority The priority at which the task will run. + * + * @param puxStackBuffer Must point to a StackType_t array that has at least + * ulStackDepth indexes - the array will then be used as the task's stack, + * removing the need for the stack to be allocated dynamically. + * + * @param pxTaskBuffer Must point to a variable of type StaticTask_t, which will + * then be used to hold the task's data structures, removing the need for the + * memory to be allocated dynamically. + * + * @return If neither puxStackBuffer nor pxTaskBuffer are NULL, then the task + * will be created and a handle to the created task is returned. If either + * puxStackBuffer or pxTaskBuffer are NULL then the task will not be created and + * NULL is returned. + * + * Example usage: + *
+ *
+ *  // Dimensions of the buffer that the task being created will use as its stack.
+ *  // NOTE:  This is the number of words the stack will hold, not the number of
+ *  // bytes.  For example, if each stack item is 32-bits, and this is set to 100,
+ *  // then 400 bytes (100 * 32-bits) will be allocated.
+ #define STACK_SIZE 200
+ *
+ *  // Structure that will hold the TCB of the task being created.
+ *  StaticTask_t xTaskBuffer;
+ *
+ *  // Buffer that the task being created will use as its stack.  Note this is
+ *  // an array of StackType_t variables.  The size of StackType_t is dependent on
+ *  // the RTOS port.
+ *  StackType_t xStack[ STACK_SIZE ];
+ *
+ *  // Function that implements the task being created.
+ *  void vTaskCode( void * pvParameters )
+ *  {
+ *      // The parameter value is expected to be 1 as 1 is passed in the
+ *      // pvParameters value in the call to xTaskCreateStatic().
+ *      configASSERT( ( uint32_t ) pvParameters == 1UL );
+ *
+ *      for( ;; )
+ *      {
+ *          // Task code goes here.
+ *      }
+ *  }
+ *
+ *  // Function that creates a task.
+ *  void vOtherFunction( void )
+ *  {
+ *      TaskHandle_t xHandle = NULL;
+ *
+ *      // Create the task without using any dynamic memory allocation.
+ *      xHandle = xTaskCreateStatic(
+ *                    vTaskCode,       // Function that implements the task.
+ *                    "NAME",          // Text name for the task.
+ *                    STACK_SIZE,      // Stack size in words, not bytes.
+ *                    ( void * ) 1,    // Parameter passed into the task.
+ *                    tskIDLE_PRIORITY,// Priority at which the task is created.
+ *                    xStack,          // Array to use as the task's stack.
+ *                    &xTaskBuffer );  // Variable to hold the task's data structure.
+ *
+ *      // puxStackBuffer and pxTaskBuffer were not NULL, so the task will have
+ *      // been created, and xHandle will be the task's handle.  Use the handle
+ *      // to suspend the task.
+ *      vTaskSuspend( xHandle );
+ *  }
+ * 
+ * \defgroup xTaskCreateStatic xTaskCreateStatic + * \ingroup Tasks + */ +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) PRIVILEGED_FUNCTION; +#endif /* configSUPPORT_STATIC_ALLOCATION */ + +#if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ * BaseType_t xTaskCreateRestricted( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * 
+ * + * Only available when configSUPPORT_DYNAMIC_ALLOCATION is set to 1. + * + * xTaskCreateRestricted() should only be used in systems that include an MPU + * implementation. + * + * Create a new task and add it to the list of tasks that are ready to run. + * The function parameters define the memory regions and associated access + * permissions allocated to the task. + * + * See xTaskCreateRestrictedStatic() for a version that does not use any + * dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: + *
+ * // Create an TaskParameters_t structure that defines the task to be created.
+ * static const TaskParameters_t xCheckTaskParameters =
+ * {
+ *  vATask,     // pvTaskCode - the function that implements the task.
+ *  "ATask",    // pcName - just a text name for the task to assist debugging.
+ *  100,        // usStackDepth - the stack size DEFINED IN WORDS.
+ *  NULL,       // pvParameters - passed into the task function as the function parameters.
+ *  ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+ *  cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+ *
+ *  // xRegions - Allocate up to three separate memory regions for access by
+ *  // the task, with appropriate access permissions.  Different processors have
+ *  // different memory alignment requirements - refer to the FreeRTOS documentation
+ *  // for full information.
+ *  {
+ *      // Base address                 Length  Parameters
+ *      { cReadWriteArray,              32,     portMPU_REGION_READ_WRITE },
+ *      { cReadOnlyArray,               32,     portMPU_REGION_READ_ONLY },
+ *      { cPrivilegedOnlyAccessArray,   128,    portMPU_REGION_PRIVILEGED_READ_WRITE }
+ *  }
+ * };
+ *
+ * int main( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *  // Create a task from the const structure defined above.  The task handle
+ *  // is requested (the second parameter is not NULL) but in this case just for
+ *  // demonstration purposes as its not actually used.
+ *  xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+ *
+ *  // Start the scheduler.
+ *  vTaskStartScheduler();
+ *
+ *  // Will only get here if there was insufficient memory to create the idle
+ *  // and/or timer task.
+ *  for( ;; );
+ * }
+ * 
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +#if ( portUSING_MPU_WRAPPERS == 1 ) + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ * BaseType_t xTaskCreateRestrictedStatic( TaskParameters_t *pxTaskDefinition, TaskHandle_t *pxCreatedTask );
+ * 
+ * + * Only available when configSUPPORT_STATIC_ALLOCATION is set to 1. + * + * xTaskCreateRestrictedStatic() should only be used in systems that include an + * MPU implementation. + * + * Internally, within the FreeRTOS implementation, tasks use two blocks of + * memory. The first block is used to hold the task's data structures. The + * second block is used by the task as its stack. If a task is created using + * xTaskCreateRestricted() then the stack is provided by the application writer, + * and the memory used to hold the task's data structure is automatically + * dynamically allocated inside the xTaskCreateRestricted() function. If a task + * is created using xTaskCreateRestrictedStatic() then the application writer + * must provide the memory used to hold the task's data structures too. + * xTaskCreateRestrictedStatic() therefore allows a memory protected task to be + * created without using any dynamic memory allocation. + * + * @param pxTaskDefinition Pointer to a structure that contains a member + * for each of the normal xTaskCreate() parameters (see the xTaskCreate() API + * documentation) plus an optional stack buffer and the memory region + * definitions. If configSUPPORT_STATIC_ALLOCATION is set to 1 the structure + * contains an additional member, which is used to point to a variable of type + * StaticTask_t - which is then used to hold the task's data structure. + * + * @param pxCreatedTask Used to pass back a handle by which the created task + * can be referenced. + * + * @return pdPASS if the task was successfully created and added to a ready + * list, otherwise an error code defined in the file projdefs.h + * + * Example usage: + *
+ * // Create an TaskParameters_t structure that defines the task to be created.
+ * // The StaticTask_t variable is only included in the structure when
+ * // configSUPPORT_STATIC_ALLOCATION is set to 1.  The PRIVILEGED_DATA macro can
+ * // be used to force the variable into the RTOS kernel's privileged data area.
+ * static PRIVILEGED_DATA StaticTask_t xTaskBuffer;
+ * static const TaskParameters_t xCheckTaskParameters =
+ * {
+ *  vATask,     // pvTaskCode - the function that implements the task.
+ *  "ATask",    // pcName - just a text name for the task to assist debugging.
+ *  100,        // usStackDepth - the stack size DEFINED IN WORDS.
+ *  NULL,       // pvParameters - passed into the task function as the function parameters.
+ *  ( 1UL | portPRIVILEGE_BIT ),// uxPriority - task priority, set the portPRIVILEGE_BIT if the task should run in a privileged state.
+ *  cStackBuffer,// puxStackBuffer - the buffer to be used as the task stack.
+ *
+ *  // xRegions - Allocate up to three separate memory regions for access by
+ *  // the task, with appropriate access permissions.  Different processors have
+ *  // different memory alignment requirements - refer to the FreeRTOS documentation
+ *  // for full information.
+ *  {
+ *      // Base address                 Length  Parameters
+ *      { cReadWriteArray,              32,     portMPU_REGION_READ_WRITE },
+ *      { cReadOnlyArray,               32,     portMPU_REGION_READ_ONLY },
+ *      { cPrivilegedOnlyAccessArray,   128,    portMPU_REGION_PRIVILEGED_READ_WRITE }
+ *  }
+ *
+ *  &xTaskBuffer; // Holds the task's data structure.
+ * };
+ *
+ * int main( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *  // Create a task from the const structure defined above.  The task handle
+ *  // is requested (the second parameter is not NULL) but in this case just for
+ *  // demonstration purposes as its not actually used.
+ *  xTaskCreateRestricted( &xRegTest1Parameters, &xHandle );
+ *
+ *  // Start the scheduler.
+ *  vTaskStartScheduler();
+ *
+ *  // Will only get here if there was insufficient memory to create the idle
+ *  // and/or timer task.
+ *  for( ;; );
+ * }
+ * 
+ * \defgroup xTaskCreateRestrictedStatic xTaskCreateRestrictedStatic + * \ingroup Tasks + */ +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) PRIVILEGED_FUNCTION; +#endif + +/** + * task. h + *
+ * void vTaskAllocateMPURegions( TaskHandle_t xTask, const MemoryRegion_t * const pxRegions );
+ * 
+ * + * Memory regions are assigned to a restricted task when the task is created by + * a call to xTaskCreateRestricted(). These regions can be redefined using + * vTaskAllocateMPURegions(). + * + * @param xTask The handle of the task being updated. + * + * @param xRegions A pointer to a MemoryRegion_t structure that contains the + * new memory region definitions. + * + * Example usage: + *
+ * // Define an array of MemoryRegion_t structures that configures an MPU region
+ * // allowing read/write access for 1024 bytes starting at the beginning of the
+ * // ucOneKByte array.  The other two of the maximum 3 definable regions are
+ * // unused so set to zero.
+ * static const MemoryRegion_t xAltRegions[ portNUM_CONFIGURABLE_REGIONS ] =
+ * {
+ *  // Base address     Length      Parameters
+ *  { ucOneKByte,       1024,       portMPU_REGION_READ_WRITE },
+ *  { 0,                0,          0 },
+ *  { 0,                0,          0 }
+ * };
+ *
+ * void vATask( void *pvParameters )
+ * {
+ *  // This task was created such that it has access to certain regions of
+ *  // memory as defined by the MPU configuration.  At some point it is
+ *  // desired that these MPU regions are replaced with that defined in the
+ *  // xAltRegions const struct above.  Use a call to vTaskAllocateMPURegions()
+ *  // for this purpose.  NULL is used as the task handle to indicate that this
+ *  // function should modify the MPU regions of the calling task.
+ *  vTaskAllocateMPURegions( NULL, xAltRegions );
+ *
+ *  // Now the task can continue its function, but from this point on can only
+ *  // access its stack and the ucOneKByte array (unless any other statically
+ *  // defined or shared regions have been declared elsewhere).
+ * }
+ * 
+ * \defgroup xTaskCreateRestricted xTaskCreateRestricted + * \ingroup Tasks + */ +void vTaskAllocateMPURegions( TaskHandle_t xTask, + const MemoryRegion_t * const pxRegions ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskDelete( TaskHandle_t xTaskToDelete );
+ * 
+ * + * INCLUDE_vTaskDelete must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Remove a task from the RTOS real time kernel's management. The task being + * deleted will be removed from all ready, blocked, suspended and event lists. + * + * NOTE: The idle task is responsible for freeing the kernel allocated + * memory from tasks that have been deleted. It is therefore important that + * the idle task is not starved of microcontroller processing time if your + * application makes any calls to vTaskDelete (). Memory allocated by the + * task code is not automatically freed, and should be freed before the task + * is deleted. + * + * See the demo application file death.c for sample code that utilises + * vTaskDelete (). + * + * @param xTaskToDelete The handle of the task to be deleted. Passing NULL will + * cause the calling task to be deleted. + * + * Example usage: + *
+ * void vOtherFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *   // Create the task, storing the handle.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+ *
+ *   // Use the handle to delete the task.
+ *   vTaskDelete( xHandle );
+ * }
+ * 
+ * \defgroup vTaskDelete vTaskDelete + * \ingroup Tasks + */ +void vTaskDelete( TaskHandle_t xTaskToDelete ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- +* TASK CONTROL API +*----------------------------------------------------------*/ + +/** + * task. h + *
+ * void vTaskDelay( const TickType_t xTicksToDelay );
+ * 
+ * + * Delay a task for a given number of ticks. The actual time that the + * task remains blocked depends on the tick rate. The constant + * portTICK_PERIOD_MS can be used to calculate real time from the tick + * rate - with the resolution of one tick period. + * + * INCLUDE_vTaskDelay must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * + * vTaskDelay() specifies a time at which the task wishes to unblock relative to + * the time at which vTaskDelay() is called. For example, specifying a block + * period of 100 ticks will cause the task to unblock 100 ticks after + * vTaskDelay() is called. vTaskDelay() does not therefore provide a good method + * of controlling the frequency of a periodic task as the path taken through the + * code, as well as other task and interrupt activity, will effect the frequency + * at which vTaskDelay() gets called and therefore the time at which the task + * next executes. See xTaskDelayUntil() for an alternative API function designed + * to facilitate fixed frequency execution. It does this by specifying an + * absolute time (rather than a relative time) at which the calling task should + * unblock. + * + * @param xTicksToDelay The amount of time, in tick periods, that + * the calling task should block. + * + * Example usage: + * + * void vTaskFunction( void * pvParameters ) + * { + * // Block for 500ms. + * const TickType_t xDelay = 500 / portTICK_PERIOD_MS; + * + * for( ;; ) + * { + * // Simply toggle the LED every 500ms, blocking between each toggle. + * vToggleLED(); + * vTaskDelay( xDelay ); + * } + * } + * + * \defgroup vTaskDelay vTaskDelay + * \ingroup TaskCtrl + */ +void vTaskDelay( const TickType_t xTicksToDelay ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * BaseType_t xTaskDelayUntil( TickType_t *pxPreviousWakeTime, const TickType_t xTimeIncrement );
+ * 
+ * + * INCLUDE_xTaskDelayUntil must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Delay a task until a specified time. This function can be used by periodic + * tasks to ensure a constant execution frequency. + * + * This function differs from vTaskDelay () in one important aspect: vTaskDelay () will + * cause a task to block for the specified number of ticks from the time vTaskDelay () is + * called. It is therefore difficult to use vTaskDelay () by itself to generate a fixed + * execution frequency as the time between a task starting to execute and that task + * calling vTaskDelay () may not be fixed [the task may take a different path though the + * code between calls, or may get interrupted or preempted a different number of times + * each time it executes]. + * + * Whereas vTaskDelay () specifies a wake time relative to the time at which the function + * is called, xTaskDelayUntil () specifies the absolute (exact) time at which it wishes to + * unblock. + * + * The macro pdMS_TO_TICKS() can be used to calculate the number of ticks from a + * time specified in milliseconds with a resolution of one tick period. + * + * @param pxPreviousWakeTime Pointer to a variable that holds the time at which the + * task was last unblocked. The variable must be initialised with the current time + * prior to its first use (see the example below). Following this the variable is + * automatically updated within xTaskDelayUntil (). + * + * @param xTimeIncrement The cycle time period. The task will be unblocked at + * time *pxPreviousWakeTime + xTimeIncrement. Calling xTaskDelayUntil with the + * same xTimeIncrement parameter value will cause the task to execute with + * a fixed interface period. + * + * @return Value which can be used to check whether the task was actually delayed. + * Will be pdTRUE if the task way delayed and pdFALSE otherwise. A task will not + * be delayed if the next expected wake time is in the past. + * + * Example usage: + *
+ * // Perform an action every 10 ticks.
+ * void vTaskFunction( void * pvParameters )
+ * {
+ * TickType_t xLastWakeTime;
+ * const TickType_t xFrequency = 10;
+ * BaseType_t xWasDelayed;
+ *
+ *     // Initialise the xLastWakeTime variable with the current time.
+ *     xLastWakeTime = xTaskGetTickCount ();
+ *     for( ;; )
+ *     {
+ *         // Wait for the next cycle.
+ *         xWasDelayed = xTaskDelayUntil( &xLastWakeTime, xFrequency );
+ *
+ *         // Perform action here. xWasDelayed value can be used to determine
+ *         // whether a deadline was missed if the code here took too long.
+ *     }
+ * }
+ * 
+ * \defgroup xTaskDelayUntil xTaskDelayUntil + * \ingroup TaskCtrl + */ +BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) PRIVILEGED_FUNCTION; + +/* + * vTaskDelayUntil() is the older version of xTaskDelayUntil() and does not + * return a value. + */ +#define vTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ) \ +{ \ + ( void ) xTaskDelayUntil( pxPreviousWakeTime, xTimeIncrement ); \ +} + + +/** + * task. h + *
+ * BaseType_t xTaskAbortDelay( TaskHandle_t xTask );
+ * 
+ * + * INCLUDE_xTaskAbortDelay must be defined as 1 in FreeRTOSConfig.h for this + * function to be available. + * + * A task will enter the Blocked state when it is waiting for an event. The + * event it is waiting for can be a temporal event (waiting for a time), such + * as when vTaskDelay() is called, or an event on an object, such as when + * xQueueReceive() or ulTaskNotifyTake() is called. If the handle of a task + * that is in the Blocked state is used in a call to xTaskAbortDelay() then the + * task will leave the Blocked state, and return from whichever function call + * placed the task into the Blocked state. + * + * There is no 'FromISR' version of this function as an interrupt would need to + * know which object a task was blocked on in order to know which actions to + * take. For example, if the task was blocked on a queue the interrupt handler + * would then need to know if the queue was locked. + * + * @param xTask The handle of the task to remove from the Blocked state. + * + * @return If the task referenced by xTask was not in the Blocked state then + * pdFAIL is returned. Otherwise pdPASS is returned. + * + * \defgroup xTaskAbortDelay xTaskAbortDelay + * \ingroup TaskCtrl + */ +BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask );
+ * 
+ * + * INCLUDE_uxTaskPriorityGet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the priority of any task. + * + * @param xTask Handle of the task to be queried. Passing a NULL + * handle results in the priority of the calling task being returned. + * + * @return The priority of xTask. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *   // Create a task, storing the handle.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+ *
+ *   // ...
+ *
+ *   // Use the handle to obtain the priority of the created task.
+ *   // It was created with tskIDLE_PRIORITY, but may have changed
+ *   // it itself.
+ *   if( uxTaskPriorityGet( xHandle ) != tskIDLE_PRIORITY )
+ *   {
+ *       // The task has changed it's priority.
+ *   }
+ *
+ *   // ...
+ *
+ *   // Is our priority higher than the created task?
+ *   if( uxTaskPriorityGet( xHandle ) < uxTaskPriorityGet( NULL ) )
+ *   {
+ *       // Our priority (obtained using NULL handle) is higher.
+ *   }
+ * }
+ * 
+ * \defgroup uxTaskPriorityGet uxTaskPriorityGet + * \ingroup TaskCtrl + */ +UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask );
+ * 
+ * + * A version of uxTaskPriorityGet() that can be used from an ISR. + */ +UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * eTaskState eTaskGetState( TaskHandle_t xTask );
+ * 
+ * + * INCLUDE_eTaskGetState must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Obtain the state of any task. States are encoded by the eTaskState + * enumerated type. + * + * @param xTask Handle of the task to be queried. + * + * @return The state of xTask at the time the function was called. Note the + * state of the task might change between the function being called, and the + * functions return value being tested by the calling task. + */ +eTaskState eTaskGetState( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState );
+ * 
+ * + * configUSE_TRACE_FACILITY must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * Populates a TaskStatus_t structure with information about a task. + * + * @param xTask Handle of the task being queried. If xTask is NULL then + * information will be returned about the calling task. + * + * @param pxTaskStatus A pointer to the TaskStatus_t structure that will be + * filled with information about the task referenced by the handle passed using + * the xTask parameter. + * + * @xGetFreeStackSpace The TaskStatus_t structure contains a member to report + * the stack high water mark of the task being queried. Calculating the stack + * high water mark takes a relatively long time, and can make the system + * temporarily unresponsive - so the xGetFreeStackSpace parameter is provided to + * allow the high water mark checking to be skipped. The high watermark value + * will only be written to the TaskStatus_t structure if xGetFreeStackSpace is + * not set to pdFALSE; + * + * @param eState The TaskStatus_t structure contains a member to report the + * state of the task being queried. Obtaining the task state is not as fast as + * a simple assignment - so the eState parameter is provided to allow the state + * information to be omitted from the TaskStatus_t structure. To obtain state + * information then set eState to eInvalid - otherwise the value passed in + * eState will be reported as the task state in the TaskStatus_t structure. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ * TaskStatus_t xTaskDetails;
+ *
+ *  // Obtain the handle of a task from its name.
+ *  xHandle = xTaskGetHandle( "Task_Name" );
+ *
+ *  // Check the handle is not NULL.
+ *  configASSERT( xHandle );
+ *
+ *  // Use the handle to obtain further information about the task.
+ *  vTaskGetInfo( xHandle,
+ *                &xTaskDetails,
+ *                pdTRUE, // Include the high water mark in xTaskDetails.
+ *                eInvalid ); // Include the task state in xTaskDetails.
+ * }
+ * 
+ * \defgroup vTaskGetInfo vTaskGetInfo + * \ingroup TaskCtrl + */ +void vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority );
+ * 
+ * + * INCLUDE_vTaskPrioritySet must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Set the priority of any task. + * + * A context switch will occur before the function returns if the priority + * being set is higher than the currently executing task. + * + * @param xTask Handle to the task for which the priority is being set. + * Passing a NULL handle results in the priority of the calling task being set. + * + * @param uxNewPriority The priority to which the task will be set. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *   // Create a task, storing the handle.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+ *
+ *   // ...
+ *
+ *   // Use the handle to raise the priority of the created task.
+ *   vTaskPrioritySet( xHandle, tskIDLE_PRIORITY + 1 );
+ *
+ *   // ...
+ *
+ *   // Use a NULL handle to raise our priority to the same value.
+ *   vTaskPrioritySet( NULL, tskIDLE_PRIORITY + 1 );
+ * }
+ * 
+ * \defgroup vTaskPrioritySet vTaskPrioritySet + * \ingroup TaskCtrl + */ +void vTaskPrioritySet( TaskHandle_t xTask, + UBaseType_t uxNewPriority ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskSuspend( TaskHandle_t xTaskToSuspend );
+ * 
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Suspend any task. When suspended a task will never get any microcontroller + * processing time, no matter what its priority. + * + * Calls to vTaskSuspend are not accumulative - + * i.e. calling vTaskSuspend () twice on the same task still only requires one + * call to vTaskResume () to ready the suspended task. + * + * @param xTaskToSuspend Handle to the task being suspended. Passing a NULL + * handle will cause the calling task to be suspended. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *   // Create a task, storing the handle.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+ *
+ *   // ...
+ *
+ *   // Use the handle to suspend the created task.
+ *   vTaskSuspend( xHandle );
+ *
+ *   // ...
+ *
+ *   // The created task will not run during this period, unless
+ *   // another task calls vTaskResume( xHandle ).
+ *
+ *   //...
+ *
+ *
+ *   // Suspend ourselves.
+ *   vTaskSuspend( NULL );
+ *
+ *   // We cannot get here unless another task calls vTaskResume
+ *   // with our handle as the parameter.
+ * }
+ * 
+ * \defgroup vTaskSuspend vTaskSuspend + * \ingroup TaskCtrl + */ +void vTaskSuspend( TaskHandle_t xTaskToSuspend ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskResume( TaskHandle_t xTaskToResume );
+ * 
+ * + * INCLUDE_vTaskSuspend must be defined as 1 for this function to be available. + * See the configuration section for more information. + * + * Resumes a suspended task. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * vTaskResume (). + * + * @param xTaskToResume Handle to the task being readied. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ * TaskHandle_t xHandle;
+ *
+ *   // Create a task, storing the handle.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &xHandle );
+ *
+ *   // ...
+ *
+ *   // Use the handle to suspend the created task.
+ *   vTaskSuspend( xHandle );
+ *
+ *   // ...
+ *
+ *   // The created task will not run during this period, unless
+ *   // another task calls vTaskResume( xHandle ).
+ *
+ *   //...
+ *
+ *
+ *   // Resume the suspended task ourselves.
+ *   vTaskResume( xHandle );
+ *
+ *   // The created task will once again get microcontroller processing
+ *   // time in accordance with its priority within the system.
+ * }
+ * 
+ * \defgroup vTaskResume vTaskResume + * \ingroup TaskCtrl + */ +void vTaskResume( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void xTaskResumeFromISR( TaskHandle_t xTaskToResume );
+ * 
+ * + * INCLUDE_xTaskResumeFromISR must be defined as 1 for this function to be + * available. See the configuration section for more information. + * + * An implementation of vTaskResume() that can be called from within an ISR. + * + * A task that has been suspended by one or more calls to vTaskSuspend () + * will be made available for running again by a single call to + * xTaskResumeFromISR (). + * + * xTaskResumeFromISR() should not be used to synchronise a task with an + * interrupt if there is a chance that the interrupt could arrive prior to the + * task being suspended - as this can lead to interrupts being missed. Use of a + * semaphore as a synchronisation mechanism would avoid this eventuality. + * + * @param xTaskToResume Handle to the task being readied. + * + * @return pdTRUE if resuming the task should result in a context switch, + * otherwise pdFALSE. This is used by the ISR to determine if a context switch + * may be required following the ISR. + * + * \defgroup vTaskResumeFromISR vTaskResumeFromISR + * \ingroup TaskCtrl + */ +BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) PRIVILEGED_FUNCTION; + +#if ( configUSE_CORE_AFFINITY == 1) + /** + * @brief Sets the core affinity mask for a task. + * + * It sets the cores on which a task can run. configUSE_CORE_AFFINITY must + * be defined as 1 for this function to be available. + * + * @param xTask The handle of the task to set the core affinity mask for. + * Passing NULL will set the core affinity mask for the calling task. + * + * @param uxCoreAffinityMask A bitwise value that indicates the cores on + * which the task can run. Cores are numbered from 0 to configNUM_CORES - 1. + * For example, to ensure that a task can run on core 0 and core 1, set + * uxCoreAffinityMask to 0x03. + * + * Example usage: + * + * // The function that creates task. + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * // Define the core affinity mask such that this task can only run + * // on core 0 and core 2. + * uxCoreAffinityMask = ( ( 1 << 0 ) | ( 1 << 2 ) ); + * + * //Set the core affinity mask for the task. + * vTaskCoreAffinitySet( xHandle, uxCoreAffinityMask ); + * } + */ + void vTaskCoreAffinitySet( const TaskHandle_t xTask, UBaseType_t uxCoreAffinityMask ); +#endif + +#if ( configUSE_CORE_AFFINITY == 1) + /** + * @brief Gets the core affinity mask for a task. + * + * configUSE_CORE_AFFINITY must be defined as 1 for this function to be + * available. + * + * @param xTask The handle of the task to get the core affinity mask for. + * Passing NULL will get the core affinity mask for the calling task. + * + * @return The core affinity mask which is a bitwise value that indicates + * the cores on which a task can run. Cores are numbered from 0 to + * configNUM_CORES - 1. For example, if a task can run on core 0 and core 1, + * the core affinity mask is 0x03. + * + * Example usage: + * + * // Task handle of the networking task - it is populated elsewhere. + * TaskHandle_t xNetworkingTaskHandle; + * + * void vAFunction( void ) + * { + * TaskHandle_t xHandle; + * UBaseType_t uxNetworkingCoreAffinityMask; + * + * // Create a task, storing the handle. + * xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, &( xHandle ) ); + * + * //Get the core affinity mask for the networking task. + * uxNetworkingCoreAffinityMask = vTaskCoreAffinityGet( xNetworkingTaskHandle ); + * + * // Here is a hypothetical scenario, just for the example. Assume that we + * // have 2 cores - Core 0 and core 1. We want to pin the application task to + * // the core different than the networking task to ensure that the + * // application task does not interfere with networking. + * if( ( uxNetworkingCoreAffinityMask & ( 1 << 0 ) ) != 0 ) + * { + * // The networking task can run on core 0, pin our task to core 1. + * vTaskCoreAffinitySet( xHandle, ( 1 << 1 ) ); + * } + * else + * { + * // Otherwise, pin our task to core 0. + * vTaskCoreAffinitySet( xHandle, ( 1 << 0 ) ); + * } + * } + */ + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ); +#endif + +/** + * @brief Disables preemption for a task. + * + * @param xTask The handle of the task to disable preemption. Passing NULL + * disables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ +void vTaskPreemptionDisable( const TaskHandle_t xTask ); + +/** + * @brief Enables preemption for a task. + * + * @param xTask The handle of the task to enable preemption. Passing NULL + * enables preemption for the calling task. + * + * Example usage: + * + * void vTaskCode( void *pvParameters ) + * { + * // Silence warnings about unused parameters. + * ( void ) pvParameters; + * + * for( ;; ) + * { + * // ... Perform some function here. + * + * // Disable preemption for this task. + * vTaskPreemptionDisable( NULL ); + * + * // The task will not be preempted when it is executing in this portion ... + * + * // ... until the preemption is enabled again. + * vTaskPreemptionEnable( NULL ); + * + * // The task can be preempted when it is executing in this portion. + * } + * } + */ +void vTaskPreemptionEnable( const TaskHandle_t xTask ); + +/*----------------------------------------------------------- +* SCHEDULER CONTROL +*----------------------------------------------------------*/ + +/** + * task. h + *
+ * void vTaskStartScheduler( void );
+ * 
+ * + * Starts the real time kernel tick processing. After calling the kernel + * has control over which tasks are executed and when. + * + * See the demo application file main.c for an example of creating + * tasks and starting the kernel. + * + * Example usage: + *
+ * void vAFunction( void )
+ * {
+ *   // Create at least one task before starting the kernel.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+ *
+ *   // Start the real time kernel with preemption.
+ *   vTaskStartScheduler ();
+ *
+ *   // Will not get here unless a task calls vTaskEndScheduler ()
+ * }
+ * 
+ * + * \defgroup vTaskStartScheduler vTaskStartScheduler + * \ingroup SchedulerControl + */ +void vTaskStartScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskEndScheduler( void );
+ * 
+ * + * NOTE: At the time of writing only the x86 real mode port, which runs on a PC + * in place of DOS, implements this function. + * + * Stops the real time kernel tick. All created tasks will be automatically + * deleted and multitasking (either preemptive or cooperative) will + * stop. Execution then resumes from the point where vTaskStartScheduler () + * was called, as if vTaskStartScheduler () had just returned. + * + * See the demo application file main. c in the demo/PC directory for an + * example that uses vTaskEndScheduler (). + * + * vTaskEndScheduler () requires an exit function to be defined within the + * portable layer (see vPortEndScheduler () in port. c for the PC port). This + * performs hardware specific operations such as stopping the kernel tick. + * + * vTaskEndScheduler () will cause all of the resources allocated by the + * kernel to be freed - but will not free resources allocated by application + * tasks. + * + * Example usage: + *
+ * void vTaskCode( void * pvParameters )
+ * {
+ *   for( ;; )
+ *   {
+ *       // Task code goes here.
+ *
+ *       // At some point we want to end the real time kernel processing
+ *       // so call ...
+ *       vTaskEndScheduler ();
+ *   }
+ * }
+ *
+ * void vAFunction( void )
+ * {
+ *   // Create at least one task before starting the kernel.
+ *   xTaskCreate( vTaskCode, "NAME", STACK_SIZE, NULL, tskIDLE_PRIORITY, NULL );
+ *
+ *   // Start the real time kernel with preemption.
+ *   vTaskStartScheduler ();
+ *
+ *   // Will only get here when the vTaskCode () task has called
+ *   // vTaskEndScheduler ().  When we get here we are back to single task
+ *   // execution.
+ * }
+ * 
+ * + * \defgroup vTaskEndScheduler vTaskEndScheduler + * \ingroup SchedulerControl + */ +void vTaskEndScheduler( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * void vTaskSuspendAll( void );
+ * 
+ * + * Suspends the scheduler without disabling interrupts. Context switches will + * not occur while the scheduler is suspended. + * + * After calling vTaskSuspendAll () the calling task will continue to execute + * without risk of being swapped out until a call to xTaskResumeAll () has been + * made. + * + * API functions that have the potential to cause a context switch (for example, + * xTaskDelayUntil(), xQueueSend(), etc.) must not be called while the scheduler + * is suspended. + * + * Example usage: + *
+ * void vTask1( void * pvParameters )
+ * {
+ *   for( ;; )
+ *   {
+ *       // Task code goes here.
+ *
+ *       // ...
+ *
+ *       // At some point the task wants to perform a long operation during
+ *       // which it does not want to get swapped out.  It cannot use
+ *       // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+ *       // operation may cause interrupts to be missed - including the
+ *       // ticks.
+ *
+ *       // Prevent the real time kernel swapping out the task.
+ *       vTaskSuspendAll ();
+ *
+ *       // Perform the operation here.  There is no need to use critical
+ *       // sections as we have all the microcontroller processing time.
+ *       // During this time interrupts will still operate and the kernel
+ *       // tick count will be maintained.
+ *
+ *       // ...
+ *
+ *       // The operation is complete.  Restart the kernel.
+ *       xTaskResumeAll ();
+ *   }
+ * }
+ * 
+ * \defgroup vTaskSuspendAll vTaskSuspendAll + * \ingroup SchedulerControl + */ +void vTaskSuspendAll( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
+ * BaseType_t xTaskResumeAll( void );
+ * 
+ * + * Resumes scheduler activity after it was suspended by a call to + * vTaskSuspendAll(). + * + * xTaskResumeAll() only resumes the scheduler. It does not unsuspend tasks + * that were previously suspended by a call to vTaskSuspend(). + * + * @return If resuming the scheduler caused a context switch then pdTRUE is + * returned, otherwise pdFALSE is returned. + * + * Example usage: + *
+ * void vTask1( void * pvParameters )
+ * {
+ *   for( ;; )
+ *   {
+ *       // Task code goes here.
+ *
+ *       // ...
+ *
+ *       // At some point the task wants to perform a long operation during
+ *       // which it does not want to get swapped out.  It cannot use
+ *       // taskENTER_CRITICAL ()/taskEXIT_CRITICAL () as the length of the
+ *       // operation may cause interrupts to be missed - including the
+ *       // ticks.
+ *
+ *       // Prevent the real time kernel swapping out the task.
+ *       vTaskSuspendAll ();
+ *
+ *       // Perform the operation here.  There is no need to use critical
+ *       // sections as we have all the microcontroller processing time.
+ *       // During this time interrupts will still operate and the real
+ *       // time kernel tick count will be maintained.
+ *
+ *       // ...
+ *
+ *       // The operation is complete.  Restart the kernel.  We want to force
+ *       // a context switch - but there is no point if resuming the scheduler
+ *       // caused a context switch already.
+ *       if( !xTaskResumeAll () )
+ *       {
+ *            taskYIELD ();
+ *       }
+ *   }
+ * }
+ * 
+ * \defgroup xTaskResumeAll xTaskResumeAll + * \ingroup SchedulerControl + */ +BaseType_t xTaskResumeAll( void ) PRIVILEGED_FUNCTION; + +/*----------------------------------------------------------- +* TASK UTILITIES +*----------------------------------------------------------*/ + +/** + * task. h + *
TickType_t xTaskGetTickCount( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * \defgroup xTaskGetTickCount xTaskGetTickCount + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCount( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
TickType_t xTaskGetTickCountFromISR( void );
+ * + * @return The count of ticks since vTaskStartScheduler was called. + * + * This is a version of xTaskGetTickCount() that is safe to be called from an + * ISR - provided that TickType_t is the natural word size of the + * microcontroller being used or interrupt nesting is either not supported or + * not being used. + * + * \defgroup xTaskGetTickCountFromISR xTaskGetTickCountFromISR + * \ingroup TaskUtils + */ +TickType_t xTaskGetTickCountFromISR( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
uint16_t uxTaskGetNumberOfTasks( void );
+ * + * @return The number of tasks that the real time kernel is currently managing. + * This includes all ready, blocked and suspended tasks. A task that + * has been deleted but not yet freed by the idle task will also be + * included in the count. + * + * \defgroup uxTaskGetNumberOfTasks uxTaskGetNumberOfTasks + * \ingroup TaskUtils + */ +UBaseType_t uxTaskGetNumberOfTasks( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
char *pcTaskGetName( TaskHandle_t xTaskToQuery );
+ * + * @return The text (human readable) name of the task referenced by the handle + * xTaskToQuery. A task can query its own name by either passing in its own + * handle, or by setting xTaskToQuery to NULL. + * + * \defgroup pcTaskGetName pcTaskGetName + * \ingroup TaskUtils + */ +char * pcTaskGetName( TaskHandle_t xTaskToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
TaskHandle_t xTaskGetHandle( const char *pcNameToQuery );
+ * + * NOTE: This function takes a relatively long time to complete and should be + * used sparingly. + * + * @return The handle of the task that has the human readable name pcNameToQuery. + * NULL is returned if no matching name is found. INCLUDE_xTaskGetHandle + * must be set to 1 in FreeRTOSConfig.h for pcTaskGetHandle() to be available. + * + * \defgroup pcTaskGetHandle pcTaskGetHandle + * \ingroup TaskUtils + */ +TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task.h + *
UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask );
+ * + * INCLUDE_uxTaskGetStackHighWaterMark2 must be set to 1 in FreeRTOSConfig.h for + * this function to be available. + * + * Returns the high water mark of the stack associated with xTask. That is, + * the minimum free stack space there has been (in words, so on a 32 bit machine + * a value of 1 means 4 bytes) since the task started. The smaller the returned + * number the closer the task has come to overflowing its stack. + * + * uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. + * + * @param xTask Handle of the task associated with the stack to be checked. + * Set xTask to NULL to check the stack of the calling task. + * + * @return The smallest amount of free stack space there has been (in words, so + * actual spaces on the stack rather than bytes) since the task referenced by + * xTask was created. + */ +configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* When using trace macros it is sometimes necessary to include task.h before + * FreeRTOS.h. When this is done TaskHookFunction_t will not yet have been defined, + * so the following two prototypes will cause a compilation error. This can be + * fixed by simply guarding against the inclusion of these two prototypes unless + * they are explicitly required by the configUSE_APPLICATION_TASK_TAG configuration + * constant. */ +#ifdef configUSE_APPLICATION_TASK_TAG + #if configUSE_APPLICATION_TASK_TAG == 1 + +/** + * task.h + *
+ * void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction );
+ * 
+ * + * Sets pxHookFunction to be the task hook function used by the task xTask. + * Passing xTask as NULL has the effect of setting the calling tasks hook + * function. + */ + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
+ * void xTaskGetApplicationTaskTag( TaskHandle_t xTask );
+ * 
+ * + * Returns the pxHookFunction value assigned to the task xTask. Do not + * call from an interrupt service routine - call + * xTaskGetApplicationTaskTagFromISR() instead. + */ + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
+ * void xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask );
+ * 
+ * + * Returns the pxHookFunction value assigned to the task xTask. Can + * be called from an interrupt service routine. + */ + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + #endif /* configUSE_APPLICATION_TASK_TAG ==1 */ +#endif /* ifdef configUSE_APPLICATION_TASK_TAG */ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + +/* Each task contains an array of pointers that is dimensioned by the + * configNUM_THREAD_LOCAL_STORAGE_POINTERS setting in FreeRTOSConfig.h. The + * kernel does not use the pointers itself, so the application writer can use + * the pointers for any purpose they wish. The following two functions are + * used to set and query a pointer respectively. */ + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) PRIVILEGED_FUNCTION; + void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) PRIVILEGED_FUNCTION; + +#endif + +#if ( configCHECK_FOR_STACK_OVERFLOW > 0 ) + + /** + * task.h + *
void vApplicationStackOverflowHook( TaskHandle_t xTask char *pcTaskName); 
+ * + * The application stack overflow hook is called when a stack overflow is detected for a task. + * + * Details on stack overflow detection can be found here: https://www.FreeRTOS.org/Stacks-and-stack-overflow-checking.html + * + * @param xTask the task that just exceeded its stack boundaries. + * @param pcTaskName A character string containing the name of the offending task. + */ + void vApplicationStackOverflowHook( TaskHandle_t xTask, + char * pcTaskName ); + /*@ requires TCB_stack_p(xTask, ?ulFreeBytesOnStack) &*& + TCB_criticalNesting_p(xTask, ?uxCriticalNesting); + @*/ + /*@ ensures TCB_stack_p(xTask, ulFreeBytesOnStack) &*& + TCB_criticalNesting_p(xTask, uxCriticalNesting); + @*/ + +#endif + +#if ( configUSE_TICK_HOOK > 0 ) + /** + * task.h + *
void vApplicationTickHook( void ); 
+ * + * This hook function is called in the system tick handler after any OS work is completed. + */ + void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */ + +#endif + +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + /** + * task.h + *
void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer, StackType_t ** ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ) 
+ * + * This function is used to provide a statically allocated block of memory to FreeRTOS to hold the Idle Task TCB. This function is required when + * configSUPPORT_STATIC_ALLOCATION is set. For more information see this URI: https://www.FreeRTOS.org/a00110.html#configSUPPORT_STATIC_ALLOCATION + * + * @param ppxIdleTaskTCBBuffer A handle to a statically allocated TCB buffer + * @param ppxIdleTaskStackBuffer A handle to a statically allocated Stack buffer for the idle task + * @param pulIdleTaskStackSize A pointer to the number of elements that will fit in the allocated stack buffer + */ + void vApplicationGetIdleTaskMemory( StaticTask_t ** ppxIdleTaskTCBBuffer, + StackType_t ** ppxIdleTaskStackBuffer, + uint32_t * pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */ +#endif + +/** + * task.h + *
+ * BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter );
+ * 
+ * + * Calls the hook function associated with xTask. Passing xTask as NULL has + * the effect of calling the Running tasks (the calling task) hook function. + * + * pvParameter is passed to the hook function for the task to interpret as it + * wants. The return value is the value returned by the task hook function + * registered by the user. + */ +BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) PRIVILEGED_FUNCTION; + +/** + * xTaskGetIdleTaskHandle() is only available if + * INCLUDE_xTaskGetIdleTaskHandle is set to 1 in FreeRTOSConfig.h. + * + * Simply returns a pointer to the array of idle task handles. + * It is not valid to call xTaskGetIdleTaskHandle() before the scheduler has been started. + */ +TaskHandle_t *xTaskGetIdleTaskHandle( void ) PRIVILEGED_FUNCTION; + +/** + * configUSE_TRACE_FACILITY must be defined as 1 in FreeRTOSConfig.h for + * uxTaskGetSystemState() to be available. + * + * uxTaskGetSystemState() populates an TaskStatus_t structure for each task in + * the system. TaskStatus_t structures contain, among other things, members + * for the task handle, task name, task priority, task state, and total amount + * of run time consumed by the task. See the TaskStatus_t structure + * definition in this file for the full member list. + * + * NOTE: This function is intended for debugging use only as its use results in + * the scheduler remaining suspended for an extended period. + * + * @param pxTaskStatusArray A pointer to an array of TaskStatus_t structures. + * The array must contain at least one TaskStatus_t structure for each task + * that is under the control of the RTOS. The number of tasks under the control + * of the RTOS can be determined using the uxTaskGetNumberOfTasks() API function. + * + * @param uxArraySize The size of the array pointed to by the pxTaskStatusArray + * parameter. The size is specified as the number of indexes in the array, or + * the number of TaskStatus_t structures contained in the array, not by the + * number of bytes in the array. + * + * @param pulTotalRunTime If configGENERATE_RUN_TIME_STATS is set to 1 in + * FreeRTOSConfig.h then *pulTotalRunTime is set by uxTaskGetSystemState() to the + * total run time (as defined by the run time stats clock, see + * https://www.FreeRTOS.org/rtos-run-time-stats.html) since the target booted. + * pulTotalRunTime can be set to NULL to omit the total run time information. + * + * @return The number of TaskStatus_t structures that were populated by + * uxTaskGetSystemState(). This should equal the number returned by the + * uxTaskGetNumberOfTasks() API function, but will be zero if the value passed + * in the uxArraySize parameter was too small. + * + * Example usage: + *
+ *  // This example demonstrates how a human readable table of run time stats
+ *  // information is generated from raw data provided by uxTaskGetSystemState().
+ *  // The human readable table is written to pcWriteBuffer
+ *  void vTaskGetRunTimeStats( char *pcWriteBuffer )
+ *  {
+ *  TaskStatus_t *pxTaskStatusArray;
+ *  volatile UBaseType_t uxArraySize, x;
+ *  uint32_t ulTotalRunTime, ulStatsAsPercentage;
+ *
+ *      // Make sure the write buffer does not contain a string.
+ * pcWriteBuffer = 0x00;
+ *
+ *      // Take a snapshot of the number of tasks in case it changes while this
+ *      // function is executing.
+ *      uxArraySize = uxTaskGetNumberOfTasks();
+ *
+ *      // Allocate a TaskStatus_t structure for each task.  An array could be
+ *      // allocated statically at compile time.
+ *      pxTaskStatusArray = pvPortMalloc( uxArraySize * sizeof( TaskStatus_t ) );
+ *
+ *      if( pxTaskStatusArray != NULL )
+ *      {
+ *          // Generate raw status information about each task.
+ *          uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalRunTime );
+ *
+ *          // For percentage calculations.
+ *          ulTotalRunTime /= 100UL;
+ *
+ *          // Avoid divide by zero errors.
+ *          if( ulTotalRunTime > 0 )
+ *          {
+ *              // For each populated position in the pxTaskStatusArray array,
+ *              // format the raw data as human readable ASCII data
+ *              for( x = 0; x < uxArraySize; x++ )
+ *              {
+ *                  // What percentage of the total run time has the task used?
+ *                  // This will always be rounded down to the nearest integer.
+ *                  // ulTotalRunTimeDiv100 has already been divided by 100.
+ *                  ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalRunTime;
+ *
+ *                  if( ulStatsAsPercentage > 0UL )
+ *                  {
+ *                      sprintf( pcWriteBuffer, "%s\t\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
+ *                  }
+ *                  else
+ *                  {
+ *                      // If the percentage is zero here then the task has
+ *                      // consumed less than 1% of the total run time.
+ *                      sprintf( pcWriteBuffer, "%s\t\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].pcTaskName, pxTaskStatusArray[ x ].ulRunTimeCounter );
+ *                  }
+ *
+ *                  pcWriteBuffer += strlen( ( char * ) pcWriteBuffer );
+ *              }
+ *          }
+ *
+ *          // The array is no longer needed, free the memory it consumes.
+ *          vPortFree( pxTaskStatusArray );
+ *      }
+ *  }
+ *  
+ */ +UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + uint32_t * const pulTotalRunTime ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
void vTaskList( char *pcWriteBuffer );
+ * + * configUSE_TRACE_FACILITY and configUSE_STATS_FORMATTING_FUNCTIONS must + * both be defined as 1 for this function to be available. See the + * configuration section of the FreeRTOS.org website for more information. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Lists all the current tasks, along with their current state and stack + * usage high water mark. + * + * Tasks are reported as blocked ('B'), ready ('R'), deleted ('D') or + * suspended ('S'). + * + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays task: + * names, states, priority, stack usage and task number. + * Stack usage specified as the number of unused StackType_t words stack can hold + * on top of stack - not the number of bytes. + * + * vTaskList() has a dependency on the sprintf() C library function that might + * bloat the code size, use a lot of stack, and provide different results on + * different platforms. An alternative, tiny, third party, and limited + * functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly through a + * call to vTaskList(). + * + * @param pcWriteBuffer A buffer into which the above mentioned details + * will be written, in ASCII form. This buffer is assumed to be large + * enough to contain the generated report. Approximately 40 bytes per + * task should be sufficient. + * + * \defgroup vTaskList vTaskList + * \ingroup TaskUtils + */ +void vTaskList( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
void vTaskGetRunTimeStats( char *pcWriteBuffer );
+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * NOTE 1: This function will disable interrupts for its duration. It is + * not intended for normal application runtime use but as a debug aid. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * Calling vTaskGetRunTimeStats() writes the total execution time of each + * task into a buffer, both as an absolute count value and as a percentage + * of the total system execution time. + * + * NOTE 2: + * + * This function is provided for convenience only, and is used by many of the + * demo applications. Do not consider it to be part of the scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that displays the + * amount of time each task has spent in the Running state in both absolute and + * percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library function + * that might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, and + * limited functionality implementation of sprintf() is provided in many of the + * FreeRTOS/Demo sub-directories in a file called printf-stdarg.c (note + * printf-stdarg.c does not provide a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() directly + * to get access to raw stats data, rather than indirectly through a call to + * vTaskGetRunTimeStats(). + * + * @param pcWriteBuffer A buffer into which the execution times will be + * written, in ASCII form. This buffer is assumed to be large enough to + * contain the generated report. Approximately 40 bytes per task should + * be sufficient. + * + * \defgroup vTaskGetRunTimeStats vTaskGetRunTimeStats + * \ingroup TaskUtils + */ +void vTaskGetRunTimeStats( char * pcWriteBuffer ) PRIVILEGED_FUNCTION; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + +/** + * task. h + *
uint32_t ulTaskGetIdleRunTimeCounter( void );
+ * + * configGENERATE_RUN_TIME_STATS and configUSE_STATS_FORMATTING_FUNCTIONS + * must both be defined as 1 for this function to be available. The application + * must also then provide definitions for + * portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and portGET_RUN_TIME_COUNTER_VALUE() + * to configure a peripheral timer/counter and return the timers current count + * value respectively. The counter should be at least 10 times the frequency of + * the tick count. + * + * Setting configGENERATE_RUN_TIME_STATS to 1 will result in a total + * accumulated execution time being stored for each task. The resolution + * of the accumulated time value depends on the frequency of the timer + * configured by the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() macro. + * While uxTaskGetSystemState() and vTaskGetRunTimeStats() writes the total + * execution time of each task into a buffer, ulTaskGetIdleRunTimeCounter() + * returns the total execution time of just the idle task. + * + * @return The total run time of the idle task. This is the amount of time the + * idle task has actually been executing. The unit of time is dependent on the + * frequency configured using the portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() and + * portGET_RUN_TIME_COUNTER_VALUE() macros. + * + * \defgroup ulTaskGetIdleRunTimeCounter ulTaskGetIdleRunTimeCounter + * \ingroup TaskUtils + */ +uint32_t ulTaskGetIdleRunTimeCounter( void ) PRIVILEGED_FUNCTION; + +/** + * task. h + *
BaseType_t xTaskNotifyIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction );
+ *
BaseType_t xTaskNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction );
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these + * functions to be available. + * + * Sends a direct to task notification to a task, with an optional value and + * action. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * A task can use xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() to + * [optionally] block to wait for a notification to be pending. The task does + * not consume any CPU time while it is in the Blocked state. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their + * un-indexed equivalents). If the task was already in the Blocked state to + * wait for a notification when the notification arrives then the task will + * automatically be removed from the Blocked state (unblocked) and the + * notification cleared. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotify() is the original API function, and remains backward + * compatible by always operating on the notification value at index 0 in the + * array. Calling xTaskNotify() is equivalent to calling xTaskNotifyIndexed() + * with the uxIndexToNotify parameter set to 0. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param uxIndexToNotify The index within the target task's array of + * notification values to which the notification is to be sent. uxIndexToNotify + * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. xTaskNotify() does + * not have this parameter and always sends notifications to index 0. + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The target notification value is bitwise ORed with ulValue. + * xTaskNotifyIndexed() always returns pdPASS in this case. + * + * eIncrement - + * The target notification value is incremented. ulValue is not used and + * xTaskNotifyIndexed() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The target notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification at the + * same array index (the task already had a notification pending at that index). + * xTaskNotifyIndexed() always returns pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending at the + * same array index then the target notification value is set to ulValue and + * xTaskNotifyIndexed() will return pdPASS. If the task being notified already + * had a notification pending at the same array index then no action is + * performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification at the specified array index without the + * notification value at that index being updated. ulValue is not used and + * xTaskNotifyIndexed() always returns pdPASS in this case. + * + * pulPreviousNotificationValue - + * Can be used to pass out the subject task's notification value before any + * bits are modified by the notify function. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotifyIndexed xTaskNotifyIndexed + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) PRIVILEGED_FUNCTION; +#define xTaskNotify( xTaskToNotify, ulValue, eAction ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), NULL ) +#define xTaskNotifyIndexed( xTaskToNotify, uxIndexToNotify, ulValue, eAction ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL ) + +/** + * task. h + *
BaseType_t xTaskNotifyAndQueryIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
+ *
BaseType_t xTaskNotifyAndQuery( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotifyValue );
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * xTaskNotifyAndQueryIndexed() performs the same operation as + * xTaskNotifyIndexed() with the addition that it also returns the subject + * task's prior notification value (the notification value at the time the + * function is called rather than when the function returns) in the additional + * pulPreviousNotifyValue parameter. + * + * xTaskNotifyAndQuery() performs the same operation as xTaskNotify() with the + * addition that it also returns the subject task's prior notification value + * (the notification value as it was at the time the function is called, rather + * than when the function returns) in the additional pulPreviousNotifyValue + * parameter. + * + * \defgroup xTaskNotifyAndQueryIndexed xTaskNotifyAndQueryIndexed + * \ingroup TaskNotifications + */ +#define xTaskNotifyAndQuery( xTaskToNotify, ulValue, eAction, pulPreviousNotifyValue ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) +#define xTaskNotifyAndQueryIndexed( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotifyValue ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotifyValue ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
+ *
BaseType_t xTaskNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, BaseType_t *pxHigherPriorityTaskWoken );
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these + * functions to be available. + * + * A version of xTaskNotifyIndexed() that can be used from an interrupt service + * routine (ISR). + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * A task can use xTaskNotifyWaitIndexed() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTakeIndexed() to [optionally] block + * to wait for a notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their + * un-indexed equivalents). If the task was already in the Blocked state to + * wait for a notification when the notification arrives then the task will + * automatically be removed from the Blocked state (unblocked) and the + * notification cleared. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotifyFromISR() is the original API function, and remains + * backward compatible by always operating on the notification value at index 0 + * within the array. Calling xTaskNotifyFromISR() is equivalent to calling + * xTaskNotifyIndexedFromISR() with the uxIndexToNotify parameter set to 0. + * + * @param uxIndexToNotify The index within the target task's array of + * notification values to which the notification is to be sent. uxIndexToNotify + * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. xTaskNotifyFromISR() + * does not have this parameter and always sends notifications to index 0. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param ulValue Data that can be sent with the notification. How the data is + * used depends on the value of the eAction parameter. + * + * @param eAction Specifies how the notification updates the task's notification + * value, if at all. Valid values for eAction are as follows: + * + * eSetBits - + * The task's notification value is bitwise ORed with ulValue. xTaskNotify() + * always returns pdPASS in this case. + * + * eIncrement - + * The task's notification value is incremented. ulValue is not used and + * xTaskNotify() always returns pdPASS in this case. + * + * eSetValueWithOverwrite - + * The task's notification value is set to the value of ulValue, even if the + * task being notified had not yet processed the previous notification (the + * task already had a notification pending). xTaskNotify() always returns + * pdPASS in this case. + * + * eSetValueWithoutOverwrite - + * If the task being notified did not already have a notification pending then + * the task's notification value is set to ulValue and xTaskNotify() will + * return pdPASS. If the task being notified already had a notification + * pending then no action is performed and pdFAIL is returned. + * + * eNoAction - + * The task receives a notification without its notification value being + * updated. ulValue is not used and xTaskNotify() always returns pdPASS in + * this case. + * + * @param pxHigherPriorityTaskWoken xTaskNotifyFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * xTaskNotifyFromISR() sets this value to pdTRUE then a context switch should + * be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * @return Dependent on the value of eAction. See the description of the + * eAction parameter. + * + * \defgroup xTaskNotifyIndexedFromISR xTaskNotifyIndexedFromISR + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define xTaskNotifyFromISR( xTaskToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) \ + xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pxHigherPriorityTaskWoken ) \ + xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), NULL, ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyAndQueryIndexedFromISR( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
+ *
BaseType_t xTaskNotifyAndQueryFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken );
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * xTaskNotifyAndQueryIndexedFromISR() performs the same operation as + * xTaskNotifyIndexedFromISR() with the addition that it also returns the + * subject task's prior notification value (the notification value at the time + * the function is called rather than at the time the function returns) in the + * additional pulPreviousNotifyValue parameter. + * + * xTaskNotifyAndQueryFromISR() performs the same operation as + * xTaskNotifyFromISR() with the addition that it also returns the subject + * task's prior notification value (the notification value at the time the + * function is called rather than at the time the function returns) in the + * additional pulPreviousNotifyValue parameter. + * + * \defgroup xTaskNotifyAndQueryIndexedFromISR xTaskNotifyAndQueryIndexedFromISR + * \ingroup TaskNotifications + */ +#define xTaskNotifyAndQueryIndexedFromISR( xTaskToNotify, uxIndexToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \ + xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) +#define xTaskNotifyAndQueryFromISR( xTaskToNotify, ulValue, eAction, pulPreviousNotificationValue, pxHigherPriorityTaskWoken ) \ + xTaskGenericNotifyFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulValue ), ( eAction ), ( pulPreviousNotificationValue ), ( pxHigherPriorityTaskWoken ) ) + +/** + * task. h + *
+ * BaseType_t xTaskNotifyWaitIndexed( UBaseType_t uxIndexToWaitOn, uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
+ *
+ * BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait );
+ * 
+ * + * Waits for a direct to task notification to be pending at a given index within + * an array of direct to task notifications. + * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * A notification sent to a task will remain pending until it is cleared by the + * task calling xTaskNotifyWaitIndexed() or ulTaskNotifyTakeIndexed() (or their + * un-indexed equivalents). If the task was already in the Blocked state to + * wait for a notification when the notification arrives then the task will + * automatically be removed from the Blocked state (unblocked) and the + * notification cleared. + * + * A task can use xTaskNotifyWaitIndexed() to [optionally] block to wait for a + * notification to be pending, or ulTaskNotifyTakeIndexed() to [optionally] block + * to wait for a notification value to have a non-zero value. The task does + * not consume any CPU time while it is in the Blocked state. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotifyWait() is the original API function, and remains backward + * compatible by always operating on the notification value at index 0 in the + * array. Calling xTaskNotifyWait() is equivalent to calling + * xTaskNotifyWaitIndexed() with the uxIndexToWaitOn parameter set to 0. + * + * @param uxIndexToWaitOn The index within the calling task's array of + * notification values on which the calling task will wait for a notification to + * be received. uxIndexToWaitOn must be less than + * configTASK_NOTIFICATION_ARRAY_ENTRIES. xTaskNotifyWait() does + * not have this parameter and always waits for notifications on index 0. + * + * @param ulBitsToClearOnEntry Bits that are set in ulBitsToClearOnEntry value + * will be cleared in the calling task's notification value before the task + * checks to see if any notifications are pending, and optionally blocks if no + * notifications are pending. Setting ulBitsToClearOnEntry to ULONG_MAX (if + * limits.h is included) or 0xffffffffUL (if limits.h is not included) will have + * the effect of resetting the task's notification value to 0. Setting + * ulBitsToClearOnEntry to 0 will leave the task's notification value unchanged. + * + * @param ulBitsToClearOnExit If a notification is pending or received before + * the calling task exits the xTaskNotifyWait() function then the task's + * notification value (see the xTaskNotify() API function) is passed out using + * the pulNotificationValue parameter. Then any bits that are set in + * ulBitsToClearOnExit will be cleared in the task's notification value (note + * *pulNotificationValue is set before any bits are cleared). Setting + * ulBitsToClearOnExit to ULONG_MAX (if limits.h is included) or 0xffffffffUL + * (if limits.h is not included) will have the effect of resetting the task's + * notification value to 0 before the function exits. Setting + * ulBitsToClearOnExit to 0 will leave the task's notification value unchanged + * when the function exits (in which case the value passed out in + * pulNotificationValue will match the task's notification value). + * + * @param pulNotificationValue Used to pass the task's notification value out + * of the function. Note the value passed out will not be effected by the + * clearing of any bits caused by ulBitsToClearOnExit being non-zero. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for a notification to be received, should a notification + * not already be pending when xTaskNotifyWait() was called. The task + * will not consume any processing time while it is in the Blocked state. This + * is specified in kernel ticks, the macro pdMS_TO_TICKS( value_in_ms ) can be + * used to convert a time specified in milliseconds to a time specified in + * ticks. + * + * @return If a notification was received (including notifications that were + * already pending when xTaskNotifyWait was called) then pdPASS is + * returned. Otherwise pdFAIL is returned. + * + * \defgroup xTaskNotifyWaitIndexed xTaskNotifyWaitIndexed + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWaitOn, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +#define xTaskNotifyWait( ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ) \ + xTaskGenericNotifyWait( tskDEFAULT_INDEX_TO_NOTIFY, ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) ) +#define xTaskNotifyWaitIndexed( uxIndexToWaitOn, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait ) \ + xTaskGenericNotifyWait( ( uxIndexToWaitOn ), ( ulBitsToClearOnEntry ), ( ulBitsToClearOnExit ), ( pulNotificationValue ), ( xTicksToWait ) ) + +/** + * task. h + *
BaseType_t xTaskNotifyGiveIndexed( TaskHandle_t xTaskToNotify, UBaseType_t uxIndexToNotify );
+ *
BaseType_t xTaskNotifyGive( TaskHandle_t xTaskToNotify );
+ * + * Sends a direct to task notification to a particular index in the target + * task's notification array in a manner similar to giving a counting semaphore. + * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these + * macros to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * xTaskNotifyGiveIndexed() is a helper macro intended for use when task + * notifications are used as light weight and faster binary or counting + * semaphore equivalents. Actual FreeRTOS semaphores are given using the + * xSemaphoreGive() API function, the equivalent action that instead uses a task + * notification is xTaskNotifyGiveIndexed(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTakeIndexed() API function rather than the + * xTaskNotifyWaitIndexed() API function. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotifyGive() is the original API function, and remains backward + * compatible by always operating on the notification value at index 0 in the + * array. Calling xTaskNotifyGive() is equivalent to calling + * xTaskNotifyGiveIndexed() with the uxIndexToNotify parameter set to 0. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param uxIndexToNotify The index within the target task's array of + * notification values to which the notification is to be sent. uxIndexToNotify + * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. xTaskNotifyGive() + * does not have this parameter and always sends notifications to index 0. + * + * @return xTaskNotifyGive() is a macro that calls xTaskNotify() with the + * eAction parameter set to eIncrement - so pdPASS is always returned. + * + * \defgroup xTaskNotifyGiveIndexed xTaskNotifyGiveIndexed + * \ingroup TaskNotifications + */ +#define xTaskNotifyGive( xTaskToNotify ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( 0 ), eIncrement, NULL ) +#define xTaskNotifyGiveIndexed( xTaskToNotify, uxIndexToNotify ) \ + xTaskGenericNotify( ( xTaskToNotify ), ( uxIndexToNotify ), ( 0 ), eIncrement, NULL ) + +/** + * task. h + *
void vTaskNotifyGiveIndexedFromISR( TaskHandle_t xTaskHandle, UBaseType_t uxIndexToNotify, BaseType_t *pxHigherPriorityTaskWoken );
+ *
void vTaskNotifyGiveFromISR( TaskHandle_t xTaskHandle, BaseType_t *pxHigherPriorityTaskWoken );
+ * + * A version of xTaskNotifyGiveIndexed() that can be called from an interrupt + * service routine (ISR). + * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for more details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this macro + * to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * vTaskNotifyGiveIndexedFromISR() is intended for use when task notifications + * are used as light weight and faster binary or counting semaphore equivalents. + * Actual FreeRTOS semaphores are given from an ISR using the + * xSemaphoreGiveFromISR() API function, the equivalent action that instead uses + * a task notification is vTaskNotifyGiveIndexedFromISR(). + * + * When task notifications are being used as a binary or counting semaphore + * equivalent then the task being notified should wait for the notification + * using the ulTaskNotificationTakeIndexed() API function rather than the + * xTaskNotifyWaitIndexed() API function. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotifyFromISR() is the original API function, and remains + * backward compatible by always operating on the notification value at index 0 + * within the array. Calling xTaskNotifyGiveFromISR() is equivalent to calling + * xTaskNotifyGiveIndexedFromISR() with the uxIndexToNotify parameter set to 0. + * + * @param xTaskToNotify The handle of the task being notified. The handle to a + * task can be returned from the xTaskCreate() API function used to create the + * task, and the handle of the currently running task can be obtained by calling + * xTaskGetCurrentTaskHandle(). + * + * @param uxIndexToNotify The index within the target task's array of + * notification values to which the notification is to be sent. uxIndexToNotify + * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. + * xTaskNotifyGiveFromISR() does not have this parameter and always sends + * notifications to index 0. + * + * @param pxHigherPriorityTaskWoken vTaskNotifyGiveFromISR() will set + * *pxHigherPriorityTaskWoken to pdTRUE if sending the notification caused the + * task to which the notification was sent to leave the Blocked state, and the + * unblocked task has a priority higher than the currently running task. If + * vTaskNotifyGiveFromISR() sets this value to pdTRUE then a context switch + * should be requested before the interrupt is exited. How a context switch is + * requested from an ISR is dependent on the port - see the documentation page + * for the port in use. + * + * \defgroup vTaskNotifyGiveIndexedFromISR vTaskNotifyGiveIndexedFromISR + * \ingroup TaskNotifications + */ +void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) PRIVILEGED_FUNCTION; +#define vTaskNotifyGiveFromISR( xTaskToNotify, pxHigherPriorityTaskWoken ) \ + vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( pxHigherPriorityTaskWoken ) ); +#define vTaskNotifyGiveIndexedFromISR( xTaskToNotify, uxIndexToNotify, pxHigherPriorityTaskWoken ) \ + vTaskGenericNotifyGiveFromISR( ( xTaskToNotify ), ( uxIndexToNotify ), ( pxHigherPriorityTaskWoken ) ); + +/** + * task. h + *
+ * uint32_t ulTaskNotifyTakeIndexed( UBaseType_t uxIndexToWaitOn, BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
+ *
+ * uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait );
+ * 
+ * + * Waits for a direct to task notification on a particular index in the calling + * task's notification array in a manner similar to taking a counting semaphore. + * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for this + * function to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * Events can be sent to a task using an intermediary object. Examples of such + * objects are queues, semaphores, mutexes and event groups. Task notifications + * are a method of sending an event directly to a task without the need for such + * an intermediary object. + * + * A notification sent to a task can optionally perform an action, such as + * update, overwrite or increment one of the task's notification values. In + * that way task notifications can be used to send data to a task, or be used as + * light weight and fast binary or counting semaphores. + * + * ulTaskNotifyTakeIndexed() is intended for use when a task notification is + * used as a faster and lighter weight binary or counting semaphore alternative. + * Actual FreeRTOS semaphores are taken using the xSemaphoreTake() API function, + * the equivalent action that instead uses a task notification is + * ulTaskNotifyTakeIndexed(). + * + * When a task is using its notification value as a binary or counting semaphore + * other tasks should send notifications to it using the xTaskNotifyGiveIndexed() + * macro, or xTaskNotifyIndex() function with the eAction parameter set to + * eIncrement. + * + * ulTaskNotifyTakeIndexed() can either clear the task's notification value at + * the array index specified by the uxIndexToWaitOn parameter to zero on exit, + * in which case the notification value acts like a binary semaphore, or + * decrement the notification value on exit, in which case the notification + * value acts like a counting semaphore. + * + * A task can use ulTaskNotifyTakeIndexed() to [optionally] block to wait for + * a notification. The task does not consume any CPU time while it is in the + * Blocked state. + * + * Where as xTaskNotifyWaitIndexed() will return when a notification is pending, + * ulTaskNotifyTakeIndexed() will return when the task's notification value is + * not zero. + * + * **NOTE** Each notification within the array operates independently - a task + * can only block on one notification within the array at a time and will not be + * unblocked by a notification sent to any other array index. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. ulTaskNotifyTake() is the original API function, and remains backward + * compatible by always operating on the notification value at index 0 in the + * array. Calling ulTaskNotifyTake() is equivalent to calling + * ulTaskNotifyTakeIndexed() with the uxIndexToWaitOn parameter set to 0. + * + * @param uxIndexToWaitOn The index within the calling task's array of + * notification values on which the calling task will wait for a notification to + * be non-zero. uxIndexToWaitOn must be less than + * configTASK_NOTIFICATION_ARRAY_ENTRIES. xTaskNotifyTake() does + * not have this parameter and always waits for notifications on index 0. + * + * @param xClearCountOnExit if xClearCountOnExit is pdFALSE then the task's + * notification value is decremented when the function exits. In this way the + * notification value acts like a counting semaphore. If xClearCountOnExit is + * not pdFALSE then the task's notification value is cleared to zero when the + * function exits. In this way the notification value acts like a binary + * semaphore. + * + * @param xTicksToWait The maximum amount of time that the task should wait in + * the Blocked state for the task's notification value to be greater than zero, + * should the count not already be greater than zero when + * ulTaskNotifyTake() was called. The task will not consume any processing + * time while it is in the Blocked state. This is specified in kernel ticks, + * the macro pdMS_TO_TICKS( value_in_ms ) can be used to convert a time + * specified in milliseconds to a time specified in ticks. + * + * @return The task's notification count before it is either cleared to zero or + * decremented (see the xClearCountOnExit parameter). + * + * \defgroup ulTaskNotifyTakeIndexed ulTaskNotifyTakeIndexed + * \ingroup TaskNotifications + */ +uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWaitOn, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +#define ulTaskNotifyTake( xClearCountOnExit, xTicksToWait ) \ + ulTaskGenericNotifyTake( ( tskDEFAULT_INDEX_TO_NOTIFY ), ( xClearCountOnExit ), ( xTicksToWait ) ) +#define ulTaskNotifyTakeIndexed( uxIndexToWaitOn, xClearCountOnExit, xTicksToWait ) \ + ulTaskGenericNotifyTake( ( uxIndexToWaitOn ), ( xClearCountOnExit ), ( xTicksToWait ) ) + +/** + * task. h + *
+ * BaseType_t xTaskNotifyStateClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToCLear );
+ *
+ * BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask );
+ * 
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these + * functions to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * If a notification is sent to an index within the array of notifications then + * the notification at that index is said to be 'pending' until it is read or + * explicitly cleared by the receiving task. xTaskNotifyStateClearIndexed() + * is the function that clears a pending notification without reading the + * notification value. The notification value at the same array index is not + * altered. Set xTask to NULL to clear the notification state of the calling + * task. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. xTaskNotifyStateClear() is the original API function, and remains + * backward compatible by always operating on the notification value at index 0 + * within the array. Calling xTaskNotifyStateClear() is equivalent to calling + * xTaskNotifyStateClearIndexed() with the uxIndexToNotify parameter set to 0. + * + * @param xTask The handle of the RTOS task that will have a notification state + * cleared. Set xTask to NULL to clear a notification state in the calling + * task. To obtain a task's handle create the task using xTaskCreate() and + * make use of the pxCreatedTask parameter, or create the task using + * xTaskCreateStatic() and store the returned value, or use the task's name in + * a call to xTaskGetHandle(). + * + * @param uxIndexToClear The index within the target task's array of + * notification values to act upon. For example, setting uxIndexToClear to 1 + * will clear the state of the notification at index 1 within the array. + * uxIndexToClear must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. + * ulTaskNotifyStateClear() does not have this parameter and always acts on the + * notification at index 0. + * + * @return pdTRUE if the task's notification state was set to + * eNotWaitingNotification, otherwise pdFALSE. + * + * \defgroup xTaskNotifyStateClearIndexed xTaskNotifyStateClearIndexed + * \ingroup TaskNotifications + */ +BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) PRIVILEGED_FUNCTION; +#define xTaskNotifyStateClear( xTask ) \ + xTaskGenericNotifyStateClear( ( xTask ), ( tskDEFAULT_INDEX_TO_NOTIFY ) ) +#define xTaskNotifyStateClearIndexed( xTask, uxIndexToClear ) \ + xTaskGenericNotifyStateClear( ( xTask ), ( uxIndexToClear ) ) + +/** + * task. h + *
+ * uint32_t ulTaskNotifyValueClearIndexed( TaskHandle_t xTask, UBaseType_t uxIndexToClear, uint32_t ulBitsToClear );
+ *
+ * uint32_t ulTaskNotifyValueClear( TaskHandle_t xTask, uint32_t ulBitsToClear );
+ * 
+ * + * See https://www.FreeRTOS.org/RTOS-task-notifications.html for details. + * + * configUSE_TASK_NOTIFICATIONS must be undefined or defined as 1 for these + * functions to be available. + * + * Each task has a private array of "notification values" (or 'notifications'), + * each of which is a 32-bit unsigned integer (uint32_t). The constant + * configTASK_NOTIFICATION_ARRAY_ENTRIES sets the number of indexes in the + * array, and (for backward compatibility) defaults to 1 if left undefined. + * Prior to FreeRTOS V10.4.0 there was only one notification value per task. + * + * ulTaskNotifyValueClearIndexed() clears the bits specified by the + * ulBitsToClear bit mask in the notification value at array index uxIndexToClear + * of the task referenced by xTask. + * + * Backward compatibility information: + * Prior to FreeRTOS V10.4.0 each task had a single "notification value", and + * all task notification API functions operated on that value. Replacing the + * single notification value with an array of notification values necessitated a + * new set of API functions that could address specific notifications within the + * array. ulTaskNotifyValueClear() is the original API function, and remains + * backward compatible by always operating on the notification value at index 0 + * within the array. Calling ulTaskNotifyValueClear() is equivalent to calling + * ulTaskNotifyValueClearIndexed() with the uxIndexToClear parameter set to 0. + * + * @param xTask The handle of the RTOS task that will have bits in one of its + * notification values cleared. Set xTask to NULL to clear bits in a + * notification value of the calling task. To obtain a task's handle create the + * task using xTaskCreate() and make use of the pxCreatedTask parameter, or + * create the task using xTaskCreateStatic() and store the returned value, or + * use the task's name in a call to xTaskGetHandle(). + * + * @param uxIndexToClear The index within the target task's array of + * notification values in which to clear the bits. uxIndexToClear + * must be less than configTASK_NOTIFICATION_ARRAY_ENTRIES. + * ulTaskNotifyValueClear() does not have this parameter and always clears bits + * in the notification value at index 0. + * + * @param ulBitsToClear Bit mask of the bits to clear in the notification value of + * xTask. Set a bit to 1 to clear the corresponding bits in the task's notification + * value. Set ulBitsToClear to 0xffffffff (UINT_MAX on 32-bit architectures) to clear + * the notification value to 0. Set ulBitsToClear to 0 to query the task's + * notification value without clearing any bits. + * + * + * @return The value of the target task's notification value before the bits + * specified by ulBitsToClear were cleared. + * \defgroup ulTaskNotifyValueClear ulTaskNotifyValueClear + * \ingroup TaskNotifications + */ +uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) PRIVILEGED_FUNCTION; +#define ulTaskNotifyValueClear( xTask, ulBitsToClear ) \ + ulTaskGenericNotifyValueClear( ( xTask ), ( tskDEFAULT_INDEX_TO_NOTIFY ), ( ulBitsToClear ) ) +#define ulTaskNotifyValueClearIndexed( xTask, uxIndexToClear, ulBitsToClear ) \ + ulTaskGenericNotifyValueClear( ( xTask ), ( uxIndexToClear ), ( ulBitsToClear ) ) + +/** + * task.h + *
+ * void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut );
+ * 
+ * + * Capture the current time for future use with xTaskCheckForTimeOut(). + * + * @param pxTimeOut Pointer to a timeout object into which the current time + * is to be captured. The captured time includes the tick count and the number + * of times the tick count has overflowed since the system first booted. + * \defgroup vTaskSetTimeOutState vTaskSetTimeOutState + * \ingroup TaskCtrl + */ +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
+ * BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait );
+ * 
+ * + * Determines if pxTicksToWait ticks has passed since a time was captured + * using a call to vTaskSetTimeOutState(). The captured time includes the tick + * count and the number of times the tick count has overflowed. + * + * @param pxTimeOut The time status as captured previously using + * vTaskSetTimeOutState. If the timeout has not yet occurred, it is updated + * to reflect the current time status. + * @param pxTicksToWait The number of ticks to check for timeout i.e. if + * pxTicksToWait ticks have passed since pxTimeOut was last updated (either by + * vTaskSetTimeOutState() or xTaskCheckForTimeOut()), the timeout has occurred. + * If the timeout has not occurred, pxTicksToWait is updated to reflect the + * number of remaining ticks. + * + * @return If timeout has occurred, pdTRUE is returned. Otherwise pdFALSE is + * returned and pxTicksToWait is updated to reflect the number of remaining + * ticks. + * + * @see https://www.FreeRTOS.org/xTaskCheckForTimeOut.html + * + * Example Usage: + *
+ *  // Driver library function used to receive uxWantedBytes from an Rx buffer
+ *  // that is filled by a UART interrupt. If there are not enough bytes in the
+ *  // Rx buffer then the task enters the Blocked state until it is notified that
+ *  // more data has been placed into the buffer. If there is still not enough
+ *  // data then the task re-enters the Blocked state, and xTaskCheckForTimeOut()
+ *  // is used to re-calculate the Block time to ensure the total amount of time
+ *  // spent in the Blocked state does not exceed MAX_TIME_TO_WAIT. This
+ *  // continues until either the buffer contains at least uxWantedBytes bytes,
+ *  // or the total amount of time spent in the Blocked state reaches
+ *  // MAX_TIME_TO_WAIT – at which point the task reads however many bytes are
+ *  // available up to a maximum of uxWantedBytes.
+ *
+ *  size_t xUART_Receive( uint8_t *pucBuffer, size_t uxWantedBytes )
+ *  {
+ *  size_t uxReceived = 0;
+ *  TickType_t xTicksToWait = MAX_TIME_TO_WAIT;
+ *  TimeOut_t xTimeOut;
+ *
+ *      // Initialize xTimeOut.  This records the time at which this function
+ *      // was entered.
+ *      vTaskSetTimeOutState( &xTimeOut );
+ *
+ *      // Loop until the buffer contains the wanted number of bytes, or a
+ *      // timeout occurs.
+ *      while( UART_bytes_in_rx_buffer( pxUARTInstance ) < uxWantedBytes )
+ *      {
+ *          // The buffer didn't contain enough data so this task is going to
+ *          // enter the Blocked state. Adjusting xTicksToWait to account for
+ *          // any time that has been spent in the Blocked state within this
+ *          // function so far to ensure the total amount of time spent in the
+ *          // Blocked state does not exceed MAX_TIME_TO_WAIT.
+ *          if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) != pdFALSE )
+ *          {
+ *              //Timed out before the wanted number of bytes were available,
+ *              // exit the loop.
+ *              break;
+ *          }
+ *
+ *          // Wait for a maximum of xTicksToWait ticks to be notified that the
+ *          // receive interrupt has placed more data into the buffer.
+ *          ulTaskNotifyTake( pdTRUE, xTicksToWait );
+ *      }
+ *
+ *      // Attempt to read uxWantedBytes from the receive buffer into pucBuffer.
+ *      // The actual number of bytes read (which might be less than
+ *      // uxWantedBytes) is returned.
+ *      uxReceived = UART_read_from_receive_buffer( pxUARTInstance,
+ *                                                  pucBuffer,
+ *                                                  uxWantedBytes );
+ *
+ *      return uxReceived;
+ *  }
+ * 
+ * \defgroup xTaskCheckForTimeOut xTaskCheckForTimeOut + * \ingroup TaskCtrl + */ +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) PRIVILEGED_FUNCTION; + +/** + * task.h + *
+ * BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp );
+ * 
+ * + * This function corrects the tick count value after the application code has held + * interrupts disabled for an extended period resulting in tick interrupts having + * been missed. + * + * This function is similar to vTaskStepTick(), however, unlike + * vTaskStepTick(), xTaskCatchUpTicks() may move the tick count forward past a + * time at which a task should be removed from the blocked state. That means + * tasks may have to be removed from the blocked state as the tick count is + * moved. + * + * @param xTicksToCatchUp The number of tick interrupts that have been missed due to + * interrupts being disabled. Its value is not computed automatically, so must be + * computed by the application writer. + * + * @return pdTRUE if moving the tick count forward resulted in a task leaving the + * blocked state and a context switch being performed. Otherwise pdFALSE. + * + * \defgroup xTaskCatchUpTicks xTaskCatchUpTicks + * \ingroup TaskCtrl + */ +BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) PRIVILEGED_FUNCTION; + + +/*----------------------------------------------------------- +* SCHEDULER INTERNALS AVAILABLE FOR PORTING PURPOSES +*----------------------------------------------------------*/ + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Called from the real time kernel tick (either preemptive or cooperative), + * this increments the tick count and checks if any tasks that are blocked + * for a finite period required removing from a blocked list and placing on + * a ready list. If a non-zero value is returned then a context switch is + * required because either: + * + A task was removed from a blocked list because its timeout had expired, + * or + * + Time slicing is in use and there is a task of equal priority to the + * currently running task. + */ +BaseType_t xTaskIncrementTick( void ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes the calling task from the ready list and places it both + * on the list of tasks waiting for a particular event, and the + * list of delayed tasks. The task will be removed from both lists + * and replaced on the ready list should either the event occur (and + * there be no higher priority tasks waiting on the same event) or + * the delay period expires. + * + * The 'unordered' version replaces the event list item value with the + * xItemValue value, and inserts the list item at the end of the list. + * + * The 'ordered' version uses the existing event list item value (which is the + * owning task's priority) to insert the list item into the event list in task + * priority order. + * + * @param pxEventList The list containing tasks that are blocked waiting + * for the event to occur. + * + * @param xItemValue The item value to use for the event list item when the + * event list is not ordered by task priority. + * + * @param xTicksToWait The maximum amount of time that the task should wait + * for the event to occur. This is specified in kernel ticks, the constant + * portTICK_PERIOD_MS can be used to convert kernel ticks into a real time + * period. + */ +void vTaskPlaceOnEventList( List_t * const pxEventList, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, + const TickType_t xItemValue, + const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * This function performs nearly the same function as vTaskPlaceOnEventList(). + * The difference being that this function does not permit tasks to block + * indefinitely, whereas vTaskPlaceOnEventList() does. + * + */ +void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, + TickType_t xTicksToWait, + const BaseType_t xWaitIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS AN + * INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED. + * + * Removes a task from both the specified event list and the list of blocked + * tasks, and places it on a ready queue. + * + * xTaskRemoveFromEventList()/vTaskRemoveFromUnorderedEventList() will be called + * if either an event occurs to unblock a task, or the block timeout period + * expires. + * + * xTaskRemoveFromEventList() is used when the event list is in task priority + * order. It removes the list item from the head of the event list as that will + * have the highest priority owning task of all the tasks on the event list. + * vTaskRemoveFromUnorderedEventList() is used when the event list is not + * ordered and the event list items hold something other than the owning tasks + * priority. In this case the event list item value is updated to the value + * passed in the xItemValue parameter. + * + * @return pdTRUE if the task being removed has a higher priority than the task + * making the call, otherwise pdFALSE. + */ +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) PRIVILEGED_FUNCTION; +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, + const TickType_t xItemValue ) PRIVILEGED_FUNCTION; + +/* + * THIS FUNCTION MUST NOT BE USED FROM APPLICATION CODE. IT IS ONLY + * INTENDED FOR USE WHEN IMPLEMENTING A PORT OF THE SCHEDULER AND IS + * AN INTERFACE WHICH IS FOR THE EXCLUSIVE USE OF THE SCHEDULER. + * + * Sets the pointer to the current TCB to the TCB of the highest priority task + * that is ready to run. + */ +portDONT_DISCARD void vTaskSwitchContext( BaseType_t xCoreID ) PRIVILEGED_FUNCTION; + +/* + * THESE FUNCTIONS MUST NOT BE USED FROM APPLICATION CODE. THEY ARE USED BY + * THE EVENT BITS MODULE. + */ +TickType_t uxTaskResetEventItemValue( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the calling task. + */ +TaskHandle_t xTaskGetCurrentTaskHandle( void ) PRIVILEGED_FUNCTION; + +/* + * Return the handle of the task running on specified core. + */ +TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) PRIVILEGED_FUNCTION; + +/* + * Shortcut used by the queue implementation to prevent unnecessary call to + * taskYIELD(); + */ +void vTaskMissedYield( void ) PRIVILEGED_FUNCTION; + +/* + * Returns the scheduler state as taskSCHEDULER_RUNNING, + * taskSCHEDULER_NOT_STARTED or taskSCHEDULER_SUSPENDED. + */ +BaseType_t xTaskGetSchedulerState( void ) PRIVILEGED_FUNCTION; + +/* + * Raises the priority of the mutex holder to that of the calling task should + * the mutex holder have a priority less than the calling task. + */ +BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * Set the priority of a task back to its proper priority in the case that it + * inherited a higher priority while it was holding a semaphore. + */ +BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) PRIVILEGED_FUNCTION; + +/* + * If a higher priority task attempting to obtain a mutex caused a lower + * priority task to inherit the higher priority task's priority - but the higher + * priority task then timed out without obtaining the mutex, then the lower + * priority task will disinherit the priority again - but only down as far as + * the highest priority task that is still waiting for the mutex (if there were + * more than one task waiting for the mutex). + */ +void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, + UBaseType_t uxHighestPriorityWaitingTask ) PRIVILEGED_FUNCTION; + +/* + * Get the uxTCBNumber assigned to the task referenced by the xTask parameter. + */ +UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +/* + * Set the uxTaskNumber of the task referenced by the xTask parameter to + * uxHandle. + */ +void vTaskSetTaskNumber( TaskHandle_t xTask, + const UBaseType_t uxHandle ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * If tickless mode is being used, or a low power mode is implemented, then + * the tick interrupt will not execute during idle periods. When this is the + * case, the tick count value maintained by the scheduler needs to be kept up + * to date with the actual execution time by being skipped forward by a time + * equal to the idle period. + */ +void vTaskStepTick( const TickType_t xTicksToJump ) PRIVILEGED_FUNCTION; + +/* + * Only available when configUSE_TICKLESS_IDLE is set to 1. + * Provided for use within portSUPPRESS_TICKS_AND_SLEEP() to allow the port + * specific sleep function to determine if it is ok to proceed with the sleep, + * and if it is ok to proceed, if it is ok to sleep indefinitely. + * + * This function is necessary because portSUPPRESS_TICKS_AND_SLEEP() is only + * called with the scheduler suspended, not from within a critical section. It + * is therefore possible for an interrupt to request a context switch between + * portSUPPRESS_TICKS_AND_SLEEP() and the low power mode actually being + * entered. eTaskConfirmSleepModeStatus() should be called from a short + * critical section between the timer being stopped and the sleep mode being + * entered to ensure it is ok to proceed into the sleep mode. + */ +eSleepModeStatus eTaskConfirmSleepModeStatus( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Increment the mutex held count when a mutex is + * taken and return the handle of the task that has taken the mutex. + */ +TaskHandle_t pvTaskIncrementMutexHeldCount( void ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as vTaskSetTimeOutState(), but without a critical + * section. + */ +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) PRIVILEGED_FUNCTION; + +/* + * For internal use only. Same as portYIELD_WITHIN_API() in single core FreeRTOS. + * For SMP this is not defined by the port. + */ +void vTaskYieldWithinAPI( void ); + +/* *INDENT-OFF* */ +#ifdef __cplusplus + } +#endif +/* *INDENT-ON* */ +#endif /* INC_TASK_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/paths.sh b/Test/VeriFast/tasks/vTaskSwitchContext/paths.sh new file mode 100755 index 00000000000..d9e8f2cc825 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/paths.sh @@ -0,0 +1,218 @@ +# Returns the absolute path to the directory containing the VeriFast proofs +# concerning `vTaskSwitchContext` in `tasks.c`. +# +# Expected arguments: +# $1 : Absolute path to the repository's base directory. +function vf_proof_base_dir() { + REPO_BASE_DIR="$1" + echo "$REPO_BASE_DIR/Test/VeriFast/tasks/vTaskSwitchContext" +} + +# Returns the absolute path to the directory containing modified versions of +# FreeRTOS source files. The VeriFast proofs use these modified verions instead +# of the original files. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function vf_proof_mod_src_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/src" +} + +# Returns the absolute path to the directory containing modified versions of +# FreeRTOS header files. The VeriFast proofs use these modified verions instead +# of the original files. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function vf_proof_mod_header_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/include" +} + +# Returns the absolute path to the directory containing everything related to +# the setup of the VeriFast proofs. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function vf_proof_setup_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/proof_setup" +} + +# Returns the absolute path to the directory containing all lemmas and +# definitions used written for the VeriFast proofs. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function vf_proof_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/proof" +} + +# Returns the absolute path to the version of `tasks.c` containing the VeriFast +# proof annotations. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function vf_annotated_tasks_c() { + REPO_BASE_DIR="$1" + VF_MOD_SRC_DIR=`vf_proof_mod_src_dir $REPO_BASE_DIR` + + echo "$VF_MOD_SRC_DIR/tasks.c" +} + +# Returns the absolute path to the directory the unmodified FreeRTOS headers. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function prod_header_dir() { + REPO_BASE_DIR="$1" + + echo "$REPO_BASE_DIR/include" +} + +# Returns the absolute path to the directory the unmodified FreeRTOS source +# files. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function prod_src_dir() { + REPO_BASE_DIR="$1" + + echo "$REPO_BASE_DIR" +} + +# Returns the absolute path to the unmodified version of `tasks.c`. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function prod_tasks_c() { + REPO_BASE_DIR="$1" + PROD_SRC_DIR=`prod_src_dir $REPO_BASE_DIR` + + echo "$PROD_SRC_DIR/tasks.c" +} + + +# Returns the absolute path to the directory containing the preprocessing scripts. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pp_script_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/preprocessing_scripts" +} + +# Returns the absolute path to the preprocesor's output direcotry. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pp_out_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/preprocessed_files" +} + +# Returns the absolute path to the preprocesor's log direcotry. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pp_log_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/pp_log" +} + +# Returns the absolute path to the preprocessed version of `tasks.c` containing +# the VeriFast proof annotations. This is the file that is processed by +# VeriFast. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pp_vf_tasks_c() { + REPO_BASE_DIR="$1" + PP_OUT_DIR=`pp_out_dir $REPO_BASE_DIR` + + echo "$PP_OUT_DIR/tasks_vf_pp.c" +} + +# Returns the absolute path to the preprocessed unmodified version of `tasks.c`. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pp_prod_tasks_c() { + REPO_BASE_DIR="$1" + PP_OUT_DIR=`pp_out_dir $REPO_BASE_DIR` + + echo "$PP_OUT_DIR/tasks_prod_pp.c" +} + +# Returns the absolute path to the pico sdk. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function pico_sdk_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/sdks/pico-sdk" +} + +# Returns the absolute path to the smp_demo_dir. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function smp_demo_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/demos/FreeRTOS-SMP-Demos" +} + + +# Returns the absolute path to directory where the statistic reports are stored. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function stats_dir() { + REPO_BASE_DIR="$1" + VF_PROOF_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + echo "$VF_PROOF_DIR/stats" +} + + +# Ensures that all potentially relevant output direcories exist. +# +# Expected arguments: +# $1 : Absolute path to the repository's base +function ensure_output_dirs_exist() { + REPO_BASE_DIR="$1" + + PP_OUT_DIR=`pp_out_dir $REPO_BASE_DIR` + STATS_DIR=`stats_dir $REPO_BASE_DIR` + PP_LOG_DIR=`pp_log_dir $REPO_BASE_DIR` + + if [ ! -d "$PP_OUT_DIR" ]; then + mkdir "$PP_OUT_DIR" + fi + if [ ! -d "$STATS_DIR" ]; then + mkdir "$STATS_DIR" + fi + if [ ! -d "$PP_LOG_DIR" ]; then + mkdir "$PP_LOG_DIR" + fi +} diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/pp_flags.sh b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/pp_flags.sh new file mode 100755 index 00000000000..40b9015165f --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/pp_flags.sh @@ -0,0 +1,134 @@ +#!/bin/bash + +# This script defines common command line arguments for the preprocessor. + +# This script expects the following arguments: +# $1 : Absolute path to the base directory of this repository. +# $2 : Absolute path to the VeriFast proof directory. +# $3 : Absolute path to the VeriFast installation directory. + + +REPO_BASE_DIR="$1" +VF_PROOF_BASE_DIR="$2" +VF_DIR="$3" + + +# Load functions used to compute paths. +. "$VF_PROOF_BASE_DIR/paths.sh" + +PICO_SDK_DIR=`pico_sdk_dir $REPO_BASE_DIR` +SMP_DEMO_DIR=`smp_demo_dir $REPO_BASE_DIR` +VF_PROOF_MOD_HEADER_DIR=`vf_proof_mod_header_dir $REPO_BASE_DIR` +VF_PROOF_MOD_SRC_DIR=`vf_proof_mod_src_dir $REPO_BASE_DIR` +PROOF_SETUP_DIR=`vf_proof_setup_dir $REPO_BASE_DIR` +PROOF_FILES_DIR=`vf_proof_dir $REPO_BASE_DIR` + + + +declare -a BUILD_FLAGS +BUILD_FLAGS=( + -DFREE_RTOS_KERNEL_SMP=1 + -DLIB_FREERTOS_KERNEL=1 + -DLIB_PICO_BIT_OPS=1 + -DLIB_PICO_BIT_OPS_PICO=1 + -DLIB_PICO_DIVIDER=1 + -DLIB_PICO_DIVIDER_HARDWARE=1 + -DLIB_PICO_DOUBLE=1 + -DLIB_PICO_DOUBLE_PICO=1 + -DLIB_PICO_FLOAT=1 + -DLIB_PICO_FLOAT_PICO=1 + -DLIB_PICO_INT64_OPS=1 + -DLIB_PICO_INT64_OPS_PICO=1 + -DLIB_PICO_MALLOC=1 + -DLIB_PICO_MEM_OPS=1 + -DLIB_PICO_MEM_OPS_PICO=1 + -DLIB_PICO_MULTICORE=1 + -DLIB_PICO_PLATFORM=1 + -DLIB_PICO_PRINTF=1 + -DLIB_PICO_PRINTF_PICO=1 + -DLIB_PICO_RUNTIME=1 + -DLIB_PICO_STANDARD_LINK=1 + -DLIB_PICO_STDIO=1 + -DLIB_PICO_STDIO_UART=1 + -DLIB_PICO_STDLIB=1 + -DLIB_PICO_SYNC=1 + -DLIB_PICO_SYNC_CORE=1 + -DLIB_PICO_SYNC_CRITICAL_SECTION=1 + -DLIB_PICO_SYNC_MUTEX=1 + -DLIB_PICO_SYNC_SEM=1 + -DLIB_PICO_TIME=1 + -DLIB_PICO_UTIL=1 + -DPICO_BOARD=\"pico\" + -DPICO_BUILD=1 + -DPICO_CMAKE_BUILD_TYPE=\"Release\" + -DPICO_COPY_TO_RAM=0 + -DPICO_CXX_ENABLE_EXCEPTIONS=0 + -DPICO_NO_FLASH=0 + -DPICO_NO_HARDWARE=0 + -DPICO_ON_DEVICE=1 + -DPICO_STACK_SIZE=0x1000 + -DPICO_TARGET_NAME=\"on_core_one\" + -DPICO_USE_BLOCKED_RAM=0 + -DmainRUN_FREE_RTOS_ON_CORE=1 +) + +declare -a PICO_INCLUDE_FLAGS +PICO_INCLUDE_FLAGS=( + -I"$PICO_SDK_DIR/src/boards/include" + -I"$PICO_SDK_DIR/src/common/pico_base/include" + -I"$PICO_SDK_DIR/src/common/pico_binary_info/include" + -I"$PICO_SDK_DIR/src/common/pico_bit_ops/include" + -I"$PICO_SDK_DIR/src/common/pico_divider/include" + -I"$PICO_SDK_DIR/src/common/pico_stdlib/include" + -I"$PICO_SDK_DIR/src/common/pico_sync/include" + -I"$PICO_SDK_DIR/src/common/pico_time/include" + -I"$PICO_SDK_DIR/src/common/pico_util/include" + -I"$PICO_SDK_DIR/src/rp2040/hardware_regs/include" + -I"$PICO_SDK_DIR/src/rp2040/hardware_structs/include" + -I"$PICO_SDK_DIR/src/rp2_common/boot_stage2/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_base/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_claim/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_clocks/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_divider/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_exception/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_gpio/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_irq/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_pll/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_resets/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_sync/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_timer/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_uart/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_vreg/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_watchdog/include" + -I"$PICO_SDK_DIR/src/rp2_common/hardware_xosc/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_bootrom/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_double/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_float/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_int64_ops/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_malloc/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_multicore/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_platform/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_printf/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_runtime/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_stdio/include" + -I"$PICO_SDK_DIR/src/rp2_common/pico_stdio_uart/include" +) + +declare -a RP2040_INCLUDE_FLAGS +RP2040_INLCUDE_FLAGS=( + -I"$SMP_DEMO_DIR/FreeRTOS/Demo/CORTEX_M0+_RP2040/OnEitherCore" + -I"$SMP_DEMO_DIR/FreeRTOS/Demo/CORTEX_M0+_RP2040/build/generated/pico_base" + -I"$REPO_BASE_DIR/portable/ThirdParty/GCC/RP2040/include" + -I"$REPO_BASE_DIR/portable/ThirdParty/GCC/RP2040" +) + +declare -a VERIFAST_FLAGS +VERIFAST_FLAGS=( + -DVERIFAST + -DVERIFAST_SKIP_BITVECTOR_PROOF__STACK_ALIGNMENT + -I"$VF_DIR/bin" + -I"$VF_PROOF_MOD_HEADER_DIR" + -I"$VF_PROOF_MOD_SRC_DIR" + -I"$PROOF_SETUP_DIR" + -I"$PROOF_FILES_DIR" +) \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/prepare_file_for_VeriFast.sh b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/prepare_file_for_VeriFast.sh new file mode 100755 index 00000000000..8cb4394ff0b --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/prepare_file_for_VeriFast.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# This script expects the following command line arguments: +# $1 : Absolute path to the source file that should be prepared for VeriFast. +# $2 : Absolute path to which the result shall be written. +# $3 : Absolute path under which preprocessor error shall be logged. +# $4 : Absolute path to the root dir of this repository +# $5 : Absolute path to the root of the directory containing the VeriFast proofs +# $6 : Absolute path to the VeriFast directory + +SRC_FILE="$1" +OUT_FILE="$2" +FILE_PP_ERR_LOG="$3" +REPO_BASE_DIR="$4" +VF_PROOF_BASE_DIR="$5" +VF_DIR="$6" + + +# Load functions used to compute paths. +. "$VF_PROOF_BASE_DIR/paths.sh" + + +PP_SCRIPT_DIR=`pp_script_dir $REPO_BASE_DIR` +PP_LOG_DIR=`pp_log_dir $REPO_BASE_DIR` +FILE_PP_LOG="$PP_LOG_DIR/pp.c" +FILE_RW_LOG="$PP_LOG_DIR/rw.c" + + +# Ensure that log directory exists +if [ ! -d "$PP_LOG_DIR" ]; then + mkdir "$PP_LOG_DIR" +fi + + +# Preprocessing the source file +# Output is written to '$FILE_PP_LOG' and error report is written to +# '$FILE_PP_ERR_LOG'. +"$PP_SCRIPT_DIR/preprocess_file_for_verification.sh" $SRC_FILE \ + $FILE_PP_LOG $FILE_PP_ERR_LOG \ + $REPO_BASE_DIR $VF_PROOF_BASE_DIR $VF_DIR + +cp "$FILE_PP_LOG" "$FILE_RW_LOG" +"$PP_SCRIPT_DIR/vf_rewrite.sh" "$FILE_RW_LOG" +cp "$FILE_RW_LOG" "$OUT_FILE" diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_diff.sh b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_diff.sh new file mode 100755 index 00000000000..4a04b087f27 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_diff.sh @@ -0,0 +1,57 @@ +#!/bin/bash + + +# This script preprocesses a given source file. Include paths are configured to +# fit 'tasks.c', but it might also be useful for other source files. +# The preprocessor is configured such that `diff`-ing results produced by this +# script (from different versions of the same file) yields useful results. +# +# This script expects the following arguments: +# $1 : Absolute path to the source file to be preprocessed. +# $2 : Absolute path of the preprocessor's output file. +# $3 : Absolute path to which the error report will be written. +# $4 : Absolute path to the base directory of this repository. +# $5 : Absolute path to the VeriFast proof directory. + + +SRC_FILE="$1" +OUT_FILE="$2" +ERR_FILE="$3" +REPO_BASE_DIR="$4" +VF_PROOF_BASE_DIR="$5" + + +# Load functions used to compute paths. +. "$VF_PROOF_BASE_DIR/paths.sh" + +# Load variables storing preprocessor flags. +. "`pp_script_dir $REPO_BASE_DIR`/pp_flags.sh" "$REPO_BASE_DIR" "$VF_PROOF_BASE_DIR" + +PROD_HEADER_DIR=`prod_header_dir $REPO_BASE_DIR` + + +# Relevant clang flags: +# -E : Run preprocessor +# -C : Include comments in output +# -P : Surpresses line/file pragmas +# -D NDEBUG : Deactivate assertions. + +# Note: +# The implementation of the `assert` macro is platform dependent and is defined +# in the system header `assert.h`. A preprocessed assertion might contain +# a reference to the location of the assertion in the source code (e.g. on OS X). +# This causes false positives when `diff`-ing preprocessed files. Hence, we +# deactivate assertions. + +echo Preprocessing file: +echo \"$SRC_FILE\" +echo Output will be written to: +echo \"$OUT_FILE\" +echo Errors will be reported in: +echo \"$ERR_FILE\" +echo +clang -E -P -D NDEBUG \ +${BUILD_FLAGS[@]} ${RP2040_INLCUDE_FLAGS[@]} ${PICO_INCLUDE_FLAGS[@]} \ +-I"$PROD_HEADER_DIR" \ +-c "$SRC_FILE" \ +1>"$OUT_FILE" 2>"$ERR_FILE" \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_verification.sh b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_verification.sh new file mode 100755 index 00000000000..a056731f565 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/preprocess_file_for_verification.sh @@ -0,0 +1,50 @@ +#!/bin/bash + + +# This script preprocesses a given source file annotated with VeriFast proof +# steps. Include paths are configured to fit 'tasks.c', but it might also be +# useful for other source files. The preprocessor is configured to include the +# proper proof files from VeriFast's standard library and to also include +# source code guarded by 'VERIFAST' defines. +# +# This script expects the following arguments: +# $1 : Absolute path to the source file to be preprocessed. +# $2 : Absolute path of the preprocessor's output file. +# $3 : Absolute path to which the error report will be written. +# $4 : Absolute path to the base directory of this repository. +# $5 : Absolute path to the VeriFast proof directory. +# $6 : Absolute path to the VeriFast installation directory. + + +SRC_FILE="$1" +OUT_FILE="$2" +ERR_FILE="$3" +REPO_BASE_DIR="$4" +VF_PROOF_BASE_DIR="$5" +VF_DIR="$6" + + + +# Load functions used to compute paths. +. "$VF_PROOF_BASE_DIR/paths.sh" + +# Load variables storing preprocessor flags. +. "`pp_script_dir $REPO_BASE_DIR`/pp_flags.sh" "$REPO_BASE_DIR" "$VF_PROOF_BASE_DIR" "$VF_DIR" + + +# Relevant clang flags: +# -E : Run preprocessor +# -C : Include comments in output +# -P : Surpresses line/file pragmas + +echo start preprocessor +clang -E -C \ +\ +${BUILD_FLAGS[@]} \ +${VERIFAST_FLAGS[@]} \ +${RP2040_INLCUDE_FLAGS[@]} \ +${PICO_INCLUDE_FLAGS[@]} \ +-I`prod_header_dir $REPO_BASE_DIR` \ +\ +-c "$SRC_FILE" \ +1>"$OUT_FILE" 2>"$ERR_FILE" diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/vf_rewrite.sh b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/vf_rewrite.sh new file mode 100755 index 00000000000..2898a831227 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/preprocessing_scripts/vf_rewrite.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# This script rewrites a given source in-pace such that the result can be +# processed by VeriFast. Each rewrite below concerns a specific construct +# VeriFast cannot handle. When VeriFast will be extended to handle a +# problematic construct we encountered, the corresponding rewirte below can be +# deleted. +# +# This scirpt expects the following arguments: +# $1 : The absolute path to the source file to be rewritten in place. +# +# Note: Callers are responsible to back up the rewritten source file beforehand. + + +SOURCE_FILE="$1" + + +# IMPORTANT: +# None of the provided regexes must contain the unescaped character '|' +# +# $1 : sed 'find' regex +# $2 : sed 'replace' regex +rewrite() +{ + FIND_REGEX=$1 + REPLACE_REGEX=$2 + echo "Rewrite pattern: \"$FIND_REGEX\" -> \"$REPLACE_REGEX\"" + sed -i "" "s|$FIND_REGEX|$REPLACE_REGEX|g" $SOURCE_FILE + echo +} + + +echo "Commenting out line/file pragmas" +rewrite "^#" "// &" + +echo "Fixing order of 'long', 'unsigned'" +echo "Reported issue 338:" +echo "https://github.com/verifast/verifast/issues/338" +rewrite "long unsigned int" "unsigned long int" + +echo "Delete fixed-sized array typedefs" +echo "Reported issue 339:" +echo "https://github.com/verifast/verifast/issues/339" +rewrite "typedef .*\[[0-9]*\];" "" + +echo "Delete attributes" +echo "Reported issue 340:" +echo "https://github.com/verifast/verifast/issues/340" +rewrite "__attribute__(([_a-z]*))" "" +# Note: `\s` or `:space:` not work on MacOs. +rewrite "__attribute__( ( [_a-z]* ) )" "" + +echo "Delete void casts (used to suppress compiler warnings)" +echo "Reported issue 335" +echo "https://github.com/verifast/verifast/issues/335" +rewrite "( void ) memset" "memset" + +echo "Removing const qualifiers from pointers" +echo "Reported issue 333:" +echo "https://github.com/verifast/verifast/issues/333" +rewrite "[*] const" "*" +rewrite "const [*]" "*" + +echo "Uncomment special includes to allow VeriFast proofs to refer to config macros" +rewrite "//VF_include #include" "#include" +rewrite "//VF_macro #" "#" diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof/README.md new file mode 100644 index 00000000000..f01ef2d94f4 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/README.md @@ -0,0 +1,55 @@ +This directory contains the bulk of VeriFast formalizations and proofs. + + +# Directory Structure +``` +├── lock_predicates.h +│ Contains the formalization of the lock invariants, i.e., the invariants +│ associated with: Masking interrupts, the task lock and the ISR lock. +│ This file also contains the lemmas to prove that the task state updates +│ in `prvSelectHighestPriorityTask` preserve the lock invariants. +│ +├── port_locking_contracts.h +│ Contains VeriFast function contracts for macros with port-specific +│ definitions used to invoke synchronization mechanisms, e.g., masking +│ interrupts and acquiring locks. These port-specific definitions often +│ contain inline assembly VeriFast cannot reason about. The contracts allow us +│ to abstract the semantics of the assembly. +│ +├── ready_list_predicates.h +│ Contains the predicates describing the ready lists as well as lemmas to +│ reason about ready lists. +│ +├── stack_predicates.h +│ Contains the formalization of the stack layout used in the RP2040 port. +│ +├── task_predicates.h +│ Contains predicates describing task control blocks. +│ +├── task_running_states.h +│ `tasks.c` defines macros that are used to denote task run states. +│ The proof headers in this directory cannot refer to these macros. +│ This header contains auxiliary definitions used to expose the run state +│ macros to the proof headers. +│ +├── verifast_lists_extended.h +│ Contains list axioms and lemmas that would naturally fit into VeriFast's +│ standard list library `listex.gh`. +│ +├── README.md +│ +├── single_core_proofs +│ Contains the old list formalization and proofs written by +│ Aalok Thakkar and Nathan Chong in 2020 for the single-core +│ setup. +│ │ +│ ├── scp_common.h +│ │ Contains auxiliary definitions and lemmas. +│ │ +│ └── scp_list_predicates.h +│ Contains the formalizaton of doubly linked lists and list items. +│ +└── single_core_proofs_extended + Contains new proofs extending the single-core list + formalization. +``` diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/lock_predicates.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/lock_predicates.h new file mode 100644 index 00000000000..868d0ef90ec --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/lock_predicates.h @@ -0,0 +1,599 @@ +#ifndef LOCK_PREDICATES_H +#define LOCK_PREDICATES_H + +#include "task_running_states.h" + + +#include "verifast_lists_extended.h" + + +/* ---------------------------------------------------------------------- + * Locking discipline explained: + * FreeRTOS uses the following synchronization mechanisms: + * - Deactivating interrupts: + * Some data is only meant to be accessed on a specific core C. Such data + * may only be accessed after interrupts on core C have been deactivated. + * For instance the global array `pxCurrentTCBs` in `tasks.c` has an entry for + * every core. `pxCurrentTCBs[C]` stores a pointer to the TCB of the task + * running on core C. Core C is always allowed to read `pxCurrentTCBs[C]`. + * However, writing requires the interrupts on core C to be deactivated. + * + * The resources protected by disabling interrupts are represented by the + * predicate `coreLocalInterruptInv_p` defined below. + * + * - task lock: + * The task lock is used to protect ciritical sections and resources from + * being accessed by multiple tasks simultaneously. The resources protected + * by the task lock are represented by the abstract predicate `taskLockInv_p` + * defined below. This proof does not deal with resources or code segments + * only protected by the task lock. Hence, we leave the predicate abstract. + * + * - ISR lock: + * The ISR/ interrupt lock is used to protect critical sections and resources + * from being accessed by multiple interrupts simultaneously. The resources + * protected by the ISR lock are represented by the abstract predicate + * `isrLockInv_p` defined below. This proof does not deal with resources or + * code segments only protected by the ISR lock. Hence, we leave the predicate + * abstract. + * + * - task lock + ISR lock: + * Access to certain resources and ciritical sections are protected by both + * the task lock and the ISR lock. For these, it is crucial that we first + * acquire the task lock and then the ISR lock. Likewise, we must release them + * in opposite order. Failure to comply with this order may lead to deadlocks. + * The resources protected by both locks are the main resources this proof + * deals with. These include the ready lists and the certain access rights + * to the tasks' run states. The access rights protected by both locks are + * represented by the predicate `taskISRLockInv_p` defined below. + * Once both locks have been acquired in the right order, this lock invariant + * can be produced by calling the lemma `produce_taskISRLockInv`. Before the + * locks can be released, the invariant must be consumed by calling + * `consume_taskISRLockInv`. Both lemmas are defined below. +*/ + + +/* ---------------------------------------------------------------------- + * Core local data and access restrictions. + * Some data in FreeRTOS such as the pointer to TCB of the task running + * on core `C` may only be accessed from core `C`. Such core-local data is + * protected by deactivating interrupts. + */ + +/*@ +// Represents the state of interrupts (i.e. activated or deactivated) on a +// specific core. The state corresponds to the value of the special register +// used for interrupt masking. +predicate interruptState_p(uint32_t coreID, uint32_t state); + +// Given an interrupt state (i.e. the value of the special register used to +// control interrupt masking), this function returns whether the state expresses +// that interrupts are deactivated. +fixpoint bool interruptsDisabled_f(uint32_t); + + +// This predicate expresses that the core we are currently reasoning about +// (expressed by constant `coreID_f`) is allowed to access the core-local data +// protected by interrupt masking. +predicate coreLocalInterruptInv_p() = + // Read permission to the entry of `pxCurrentTCBs` that stores a pointer + // to the task currenlty running on this core + [0.5]pointer(&pxCurrentTCBs[coreID_f], ?currentTCB) + &*& + // Write permission to the entry of `xYieldPendings` for the current core + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) + &*& + // Write permission to the "critical nesting" field of the task + // currently scheduled on this core. The field allows us to check whether + // the task is currently in a critical section. Necessary to check whether, + // we are allowed to context switch. + TCB_criticalNesting_p(currentTCB, ?gCriticalNesting); +@*/ + + +/* ---------------------------------------------------------------------- + * Predicates relevant for all locks + */ + +/*@ +// This predicate is used to remember which locks we're currently holding. Each +// Each constists of a pair `(f,id)`. `f` is the fraction of the lock we held +// before acquiring. Remembering the fraction is important to ensure that we +// reproduce the right fraction of the lock predicate when we release the lock. +// Otherwise, we can run into inconsistencies. +// `id` is the ID of the acquired lock, i.e., either `taskLockID_f` or +// `isrLockID_f`. +predicate locked_p(list< pair > lockHistory); +@*/ + + + +/* ---------------------------------------------------------------------- + * Task lock + */ + +/*@ +fixpoint int taskLockID_f(); + +// Represents an unacquired task lock. +predicate taskLock_p(); + +// Represents the invariant associated with the the task lock, i.e., +// access permissions to the resources and code regions protected by the lock. +// These are not relevant to the context-switch proof. Therefore, we leave the +// predicate abstract. +predicate taskLockInv_p(); +@*/ + +/* ---------------------------------------------------------------------- + * ISR lock + */ + +/*@ +fixpoint int isrLockID_f(); + +// Represents an unacquired ISR lock. +predicate isrLock_p(); + +// Represents the invariant associated with the the ISR lock, i.e., +// access permissions to the resources and code regions protected by the lock. +// These are not relevant to the context-switch proof. Therefore, we leave the +// predicate abstract. +predicate isrLockInv_p(); +@*/ + + +/* ---------------------------------------------------------------------- + * Resources protected by both locks. + * Note that the task lock may never be acquired after the ISR lock. + */ + +/*@ +fixpoint int taskISRLockID_f(); + +// Represents the access rights protected by both the task and the ISR lock. +// Note that FreeRTOS' locking discipline demands the the task lock must be +// acquired before the ISR lock. Once, both locks have been acquired in the +// right order, ths invariant can be produced by invoking the lemma +// `produce_taskISRLockInv` and it can be consumed by invoking +// `consume_taskISRLockInv`. The lemmas ensure that we follow the locking +// discipline. +// +// This invariant expresses fine grained access rights to the following: +// - some global variables: +// + Read permission to the entry of `pxCurrentTCBs` that stores a pointer +// to the task currenly running on the core `coreID_f` our proof currently +// considers. Together with the read permission from +// `coreLocalInterruptInv_p` we get write access to this entry once +// interrupts have been deactivated and both locks have been acquired. +// + Write permission to `uxSchedulerSuspended`. +// + Write permission to `xSchedulerRunning`. +// + Write permission to `uxTopReadyPriority`. This variable stores to top +// priority for which there is a task that is ready to be scheduled. +// - Write access to the ready lists. +// - Fine-grained access permissions for task run states: +// + (RP-All) Read permission for every task. +// + (RP-Current) Read permission for task currently scheduled on this core. +// Together, (RP-All) and (RP-Current) give us a write permission for +// task scheduled on this core. +// + (RP-Unsched) Read permissions for unscheduled tasks. +// Together, (RP-All) and (RP-Unsched) give us write permissions for all +// unscheduled tasks. +// Note that these permissions do not allow us to change the run state of any +// task that is currently scheduled on another core. +predicate taskISRLockInv_p() = + _taskISRLockInv_p(_); + + +// Auxiliary predicate. Equal to the lock invariant above but exposes +// some details. +predicate _taskISRLockInv_p(UBaseType_t gTopReadyPriority) = + // Access to global variables + [0.5]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*& + integer_((void*) &uxSchedulerSuspended, sizeof(UBaseType_t), false, _) &*& + integer_(&xSchedulerRunning, sizeof(BaseType_t), true, _) + &*& + // top ready priority must be in range + integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, gTopReadyPriority) &*& + 0 <= gTopReadyPriority &*& gTopReadyPriority < configMAX_PRIORITIES + &*& + // tasks / TCBs + exists_in_taskISRLockInv_p(?gTasks, ?gStates) + &*& + // (RP-All) Read permissions for every task + // and recording of task states in state list + // (∀t ∈ gTasks. + // [1/2]TCB_runState_p(t, _)) + // ∧ + // ∀i. ∀t. gTasks[i] == t -> gStates[i] == t->xTaskRunState + foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates)) + &*& + // (RP-Current) Read permission for task currently scheduled on this core + // (RP-All) + (RP-Current) => Write permission for scheduled task + [1/2]TCB_runState_p(gCurrentTCB, ?gCurrentTCB_state) &*& + (gCurrentTCB_state == coreID_f() || gCurrentTCB_state == taskTASK_YIELDING) &*& + nth(index_of(gCurrentTCB, gTasks), gStates) == gCurrentTCB_state + &*& + // (RP-Unsched) Read permissions for unscheduled tasks + // (RP-All) + (RP-Unsched) => Write permissions for unscheduled tasks + // ∀t ∈ tasks. t->xTaskState == taskTASK_NOT_RUNNING + // -> [1/2]shared_TCB_p(t, taskTASK_NOT_RUNNING) + foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) + &*& + readyLists_p(?gCellLists, ?gOwnerLists) + &*& + // gTasks contains all relevant tasks + mem(gCurrentTCB, gTasks) == true + &*& + // ∀l ∈ gOwnerLists. l ⊆ gTasks + forall(gOwnerLists, (superset)(gTasks)) == true; + + +lemma void produce_taskISRLockInv(); +requires locked_p(?heldLocks) &*& + heldLocks == cons(?i, cons(?t, nil)) &*& + i == pair(?f_isr, isrLockID_f()) &*& + t == pair(?f_task, taskLockID_f()); +ensures locked_p( cons( pair(_, taskISRLockID_f()), heldLocks) ) &*& + taskISRLockInv_p(); + + +lemma void consume_taskISRLockInv(); +requires locked_p( cons( pair(_, taskISRLockID_f()), ?otherLocks) ) &*& + taskISRLockInv_p(); +ensures locked_p(otherLocks); + + + +// Auxiliary predicate to assing names to existentially quantified variables. +// Having multiple `exists` chunks on the heap makes matching against their +// arguments ambiguous in most cases. +predicate exists_in_taskISRLockInv_p(list gTasks, + list gStates) = + exists(gTasks) &*& + exists(gStates) &*& + length(gTasks) == length(gStates) &*& + distinct(gTasks) == true; + +// Auxiliary function that allows us to partially apply the list argument. +// +// Notes: +// - Partial application of fixpoint functions in VeriFast is not documented. +// The syntax for partially application is `()()` +// - VeriFast only supports partially applying the first argument, e.g., +// `(mem)(0)` is allowed but `(mem)(_)(nil)` is not. +fixpoint bool mem_list_elem(list xs, t x) { + return mem(x, xs); +} + +// Auxiliary predicate to allow foreach-quantification about fraction +// and reflection of `t->xTaskRunState` in state list. +predicate_ctor readOnly_TCB_runState_p + (list tasks, list states) + (TCB_t* t;) = + mem(t, tasks) == true &*& + [1/2]TCB_runState_p(t, nth(index_of(t, tasks), states)); + +predicate_ctor readOnly_TCB_runState_IF_not_running_p + (list tasks, list states) + (TCB_t* t;) = + mem(t, tasks) == true &*& + nth(index_of(t, tasks), states) == taskTASK_NOT_RUNNING + ? [1/2]TCB_runState_p(t, taskTASK_NOT_RUNNING) + : true; +@*/ + + + + + +// ----------------------------------------------------------------------- +// The following lemmas are necessary to prove that state updates preserve +// the lock invariant. + +/*@ +lemma void update_readOnly_TCB_runState(TCB_t* t, + list tasks, + list states, + int updatedIndex, + TaskRunning_t s) +requires readOnly_TCB_runState_p(tasks, states)(t) &*& + updatedIndex != index_of(t, tasks) &*& + mem(t, tasks) == true &*& + length(tasks) == length(states); +ensures readOnly_TCB_runState_p(tasks, update(updatedIndex, s, states))(t); +{ + list states2 = update(updatedIndex, s, states); + int t_index = index_of(t, tasks); + + if( updatedIndex < 0 || updatedIndex >= length(states) ) { + update_out_of_bounds(updatedIndex, s, states); + } else { + open readOnly_TCB_runState_p(tasks, states)(t); + open [1/2]TCB_runState_p(t, nth(t_index, states)); + + mem_index_of(t, tasks); + nth_update(t_index, updatedIndex, s, states); + assert( nth(t_index, states) == nth(t_index, states2) ); + + close [1/2]TCB_runState_p(t, nth(t_index, states2)); + close readOnly_TCB_runState_p(tasks, states2)(t); + } +} + + +lemma void update_foreach_readOnly_TCB_runState(TCB_t* updatedTask, + list tasks, + list subTasks, + list states, + list states2, + TaskRunning_t s) +requires + mem(updatedTask, tasks) == true &*& + length(tasks) == length(states) &*& + foreach(subTasks, readOnly_TCB_runState_p(tasks, states)) &*& + states2 == update(index_of(updatedTask, tasks), s, states) &*& + distinct(tasks) == true &*& + mem(updatedTask, subTasks) == false &*& + subset(subTasks, tasks) == true; +ensures + foreach(subTasks, readOnly_TCB_runState_p(tasks, states2)); +{ + switch(subTasks) { + case nil: + open foreach(nil, readOnly_TCB_runState_p(tasks, states)); + close foreach(nil, readOnly_TCB_runState_p(tasks, states2)); + case cons(h, rest): + int index = index_of(updatedTask, tasks); + open foreach(subTasks, readOnly_TCB_runState_p(tasks, states)); + assert( updatedTask != h ); + index_of_different(updatedTask, h, tasks); + assert( index != index_of(h, tasks) ); + update_readOnly_TCB_runState(h, tasks, states, index, s); + assert( mem(updatedTask, rest) == false ); + update_foreach_readOnly_TCB_runState(updatedTask, tasks, rest, + states, states2, s); + close foreach(subTasks, readOnly_TCB_runState_p(tasks, states2)); + } +} + + +lemma void close_updated_foreach_readOnly_TCB_runState(TCB_t* updatedTask, + list tasks, + list states, + list states2, + TaskRunning_t s) +requires + mem(updatedTask, tasks) == true &*& + length(states) == length(tasks) &*& + distinct(tasks) == true &*& + foreach(remove(updatedTask, tasks), readOnly_TCB_runState_p(tasks, states)) &*& + states2 == update(index_of(updatedTask, tasks), s, states) &*& + [1/2]TCB_runState_p(updatedTask, s); +ensures + foreach(tasks, readOnly_TCB_runState_p(tasks, states2)); +{ + distinct_mem_remove(updatedTask, tasks); + remove_result_subset(updatedTask, tasks); + + close readOnly_TCB_runState_p(tasks, states2)(updatedTask); + update_foreach_readOnly_TCB_runState(updatedTask, tasks, + remove(updatedTask, tasks), + states, states2, s); + foreach_unremove(updatedTask, tasks); +} + + +lemma void stopUpdate_foreach_readOnly_TCB_runState_IF_not_running + (TCB_t* stoppedTask, list tasks, list subTasks, + list states, list states2) +requires + distinct(tasks) == true &*& + distinct(subTasks) == true &*& + length(tasks) == length(states) &*& + foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states)) &*& + states2 == update(index_of(stoppedTask, tasks), taskTASK_NOT_RUNNING, states) &*& + nth(index_of(stoppedTask, tasks), states) != taskTASK_NOT_RUNNING &*& + subset(subTasks, tasks) == true &*& + mem(stoppedTask, tasks) == true &*& + mem(stoppedTask, subTasks) + ? [1/2]TCB_runState_p(stoppedTask, taskTASK_NOT_RUNNING) + : true; +ensures + foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states2)); +{ + switch(subTasks) { + case nil: + open foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, states)); + close foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, states2)); + case cons(h, t): + if( h == stoppedTask ) { + assert( remove(stoppedTask, subTasks) == t ); + distinct_mem_remove(stoppedTask, subTasks); + assert( mem(stoppedTask, t) == false ); + mem_index_of(stoppedTask, tasks); + } else { + mem_index_of(stoppedTask, tasks); + nth_update(index_of(h, tasks), index_of(stoppedTask, tasks), taskTASK_NOT_RUNNING, states); + index_of_different(h, stoppedTask, tasks); + assert( index_of(h, tasks) != index_of(stoppedTask, tasks) ); + assert( nth(index_of(h, tasks), states) == nth(index_of(h, tasks), states2) ); + } + + nth_update(index_of(stoppedTask, tasks), index_of(stoppedTask, tasks), taskTASK_NOT_RUNNING, states); + open foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states)); + open readOnly_TCB_runState_IF_not_running_p(tasks, states)(h); + assert( nth(index_of(stoppedTask, tasks), states2) == taskTASK_NOT_RUNNING ); + close readOnly_TCB_runState_IF_not_running_p(tasks, states2)(h); + stopUpdate_foreach_readOnly_TCB_runState_IF_not_running + (stoppedTask, tasks, t, states, states2); + close foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states2)); + } +} + +lemma void updateUnaffectedStates_in_foreach_readOnly_TCB_runState_IF_not_running + (TCB_t* updatedTask, list tasks, list subTasks, + list states, list updatedStates, + TaskRunning_t s) +requires + distinct(tasks) == true &*& + distinct(subTasks) == true &*& + length(tasks) == length(states) &*& + mem(updatedTask, tasks) == true &*& + mem(updatedTask, subTasks) == false &*& + subset(subTasks, tasks) == true &*& + foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states)) &*& + updatedStates == update(index_of(updatedTask, tasks), s, states); +ensures + foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)); +{ + switch(subTasks) { + case nil: + open foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, states)); + close foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)); + case cons(h, t): + open foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, states)); + + // Prove that update preserves state of `h`. + index_of_different(h, updatedTask, tasks); + nth_update(index_of(h, tasks), index_of(updatedTask, tasks), s, states); + assert( nth(index_of(h, tasks), states) == nth(index_of(h, tasks), updatedStates) ); + + open readOnly_TCB_runState_IF_not_running_p(tasks, states)(h); + close readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)(h); + updateUnaffectedStates_in_foreach_readOnly_TCB_runState_IF_not_running + (updatedTask, tasks, t, states, updatedStates, s); + close foreach(subTasks, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)); + } +} + +lemma void startUpdate_foreach_readOnly_TCB_runState_IF_not_running + (TCB_t* startedTask, list tasks, + list states, list updatedStates, + int coreID) +requires + distinct(tasks) == true &*& + length(tasks) == length(states) &*& + mem(startedTask, tasks) == true &*& + foreach(remove(startedTask, tasks), readOnly_TCB_runState_IF_not_running_p(tasks, states)) &*& + updatedStates == update(index_of(startedTask, tasks), coreID, states) &*& + 0 <= coreID &*& coreID < configNUM_CORES; +ensures + foreach(tasks, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)); +{ + distinct_remove(startedTask, tasks); + distinct_mem_remove(startedTask, tasks); + remove_result_subset(startedTask, tasks); + updateUnaffectedStates_in_foreach_readOnly_TCB_runState_IF_not_running + (startedTask, tasks, remove(startedTask, tasks), states, updatedStates, + coreID); + + assert( foreach(remove(startedTask, tasks), readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)) ); + close readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)(startedTask); + foreach_unremove(startedTask, tasks); +} + +lemma void scheduleRunning_in_foreach_readOnly_TCB_runState_IF_not_running + (TCB_t* runningTask, list tasks, + list states, list updatedStates, + int coreID) +requires + distinct(tasks) == true &*& + length(tasks) == length(states) &*& + mem(runningTask, tasks) == true &*& + (nth(index_of(runningTask, tasks), states) == coreID + || nth(index_of(runningTask, tasks), states) == taskTASK_YIELDING) + &*& + foreach(tasks, readOnly_TCB_runState_IF_not_running_p(tasks, states)) &*& + updatedStates == update(index_of(runningTask, tasks), coreID, states) &*& + 0 <= coreID &*& coreID < configNUM_CORES; +ensures + foreach(tasks, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)) &*& + nth(index_of(runningTask, tasks), updatedStates) == coreID; +{ + switch(tasks) { + case nil: + open foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, states)); + close foreach(nil, readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)); + case cons(h, t): + foreach_remove(runningTask, tasks); + + distinct_remove(runningTask, tasks); + distinct_mem_remove(runningTask, tasks); + remove_result_subset(runningTask, tasks); + updateUnaffectedStates_in_foreach_readOnly_TCB_runState_IF_not_running + (runningTask, tasks, remove(runningTask, tasks), + states, updatedStates, coreID); + + open readOnly_TCB_runState_IF_not_running_p(tasks, states)(runningTask); + close readOnly_TCB_runState_IF_not_running_p(tasks, updatedStates)(runningTask); + + foreach_unremove(runningTask, tasks); + } +} +@*/ + + +/*@ +lemma list def_state1(list tasks, + list states, + TCB_t* currentTask, + TCB_t* readyTask) +requires + distinct(tasks) == true &*& + length(tasks) == length(states) &*& + currentTask != readyTask &*& + mem(currentTask, tasks) == true &*& + mem(readyTask, tasks) == true &*& + nth(index_of(readyTask, tasks), states) == taskTASK_NOT_RUNNING; +ensures + result == update(index_of(currentTask, tasks), taskTASK_NOT_RUNNING, states) &*& + nth(index_of(readyTask, tasks), result) == taskTASK_NOT_RUNNING &*& + nth(index_of(currentTask, tasks), result) == taskTASK_NOT_RUNNING; +{ + list states1 = + update(index_of(currentTask, tasks), taskTASK_NOT_RUNNING, states); + + mem_index_of(currentTask, tasks); + mem_index_of(readyTask, tasks); + nth_update(index_of(readyTask, tasks), index_of(currentTask, tasks), taskTASK_NOT_RUNNING, states); + + return states1; +} + +lemma list def_state2(list tasks, + list states, + TCB_t* currentTask, + TCB_t* readyTask, + int coreID) +requires + distinct(tasks) == true &*& + length(tasks) == length(states) &*& + currentTask != readyTask &*& + mem(currentTask, tasks) == true &*& + mem(readyTask, tasks) == true &*& + nth(index_of(readyTask, tasks), states) == taskTASK_NOT_RUNNING &*& + nth(index_of(currentTask, tasks), states) != taskTASK_NOT_RUNNING &*& + 0 <= coreID &*& coreID < configNUM_CORES; +ensures + result == + update(index_of(readyTask, tasks), coreID, + update(index_of(currentTask, tasks), taskTASK_NOT_RUNNING, states)) + &*& + nth(index_of(readyTask, tasks), result) == coreID &*& + nth(index_of(currentTask, tasks), result) == taskTASK_NOT_RUNNING; +{ + list states1 = def_state1(tasks, states, currentTask, readyTask); + + list states2 = + update(index_of(readyTask, tasks), coreID, states1); + + return states2; +} +@*/ + + + + + + +#endif /* LOCK_PREDICATES_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/port_locking_contracts.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/port_locking_contracts.h new file mode 100644 index 00000000000..a7dc6fe7987 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/port_locking_contracts.h @@ -0,0 +1,159 @@ +#ifndef PORT_CONTRACTS_H +#define PORT_CONTRACTS_H + +/* This file defines function contracts for the macros used to invoke + * synchronization mechanisms, e.g., masking interrupts and acquiring locks. + * The definitions of these macros are port-specific and involve inline + * assembly. VeriFast cannot reason about assembly. Hence, we have to + * abstract the assembly's semantics with these contracts. + * + * Note that we cannot verify that the contracts' correctness. We have to treat + * their correctness as a proof assumption. + * + * Moreover, together with the invariants defined in the proof header + * `lock_predicates.h`, the below contracts define the locking discipline that + * our proof relies on. The file `lock_predicates.h` contains a more detailed + * explanation of the locking discipline. + * + * In short: + * - Data that is only meant to be accessed by the a specific core is protected + * by deactivating interrupts on this core. Access permissions are expressed + * by `coreLocalInterruptInv_p`. + * - The task lock and the ISR lock (i.e. interrupt lock) themselves protect + * data and code regions irrelevant to the switch-context proof. Hence, + * the respective invariants are left abstract, cf. `taskLockInv_p` and + * `isrLockInv_p`. + * - FreeRTOS' locking discipline demands that the task lock is acquired before + * and released after the ISR lock. The contracts defined below ensure that + * we follow this locking discipline. + * - The ready lists and the task run states (i.e. the data most important to + * the context-switch proof) is protected by a combination of the task lock + * and the ISR lock. That is, this data must only be accessed when both + * locks have been acquired in the right order. The invariant + * `taskISRLockInv_p` expresses these access rights. `lock_predicates.h` + * defines lemmas to produce and consume this invariant. The lemmas ensure + * that we only produce the invariant when both locks have been acquired in + * the right order. + */ + +// We want our proofs to hold for an arbitrary number of cores. +#undef portGET_CORE_ID +#define portGET_CORE_ID() VF__get_core_num() + +/* FreeRTOS core id is always zero based.*/ +static uint VF__get_core_num(void); +//@ requires true; +/*@ ensures 0 <= result &*& result < configNUM_CORES &*& + result == coreID_f(); +@*/ + +/*@ +// This contant allows proofs to talk about the ID of the core that the +// function we verify is running on. The verified function's contract must +// ensure that this constant holds the value of the current core. +fixpoint uint coreID_f(); + +lemma void coreID_f_range(); +requires true; +ensures 0 <= coreID_f() &*& coreID_f() < configNUM_CORES; +@*/ + + + + +/* In FreeRTOS interrupts are masked to protect core-local data. + * The invariant `coreLocalInterruptInv_p` expresses what data the masking + * of interrupts protects on a specific core, cf., `lock_predicates.h`. + * + * Deactivating the interrupts on the current core produces the invariant + * `coreLocalInterruptInv_p()` and thereby gives us the permission to access + * the protected data. + */ +#undef portDISABLE_INTERRUPTS +#define portDISABLE_INTERRUPTS VF__portDISABLE_INTERRUPTS +uint32_t VF__portDISABLE_INTERRUPTS(); +//@ requires interruptState_p(?coreID, ?state); +/*@ ensures result == state &*& + interruptState_p(coreID, ?newState) &*& + interruptsDisabled_f(newState) == true &*& + interruptsDisabled_f(state) == true + ? newState == state + : coreLocalInterruptInv_p(); +@*/ + + +/* This macro is used to restore the interrupt state (activated or deactivated) + * to a specific value. When an invokation sets the state from deactivated to + * activated, the invariant `coreLocalInterruptInv_p()` is consumed. + * Thereby, we lose the permission to access the core-local data protected + * by the deactivation of interrupts on this core. + */ +#undef portRESTORE_INTERRUPTS +#define portRESTORE_INTERRUPTS(ulState) VF__portRESTORE_INTERRUPTS(ulState) +void VF__portRESTORE_INTERRUPTS(uint32_t ulState); +/*@ requires interruptState_p(?coreID, ?tmpState) &*& + (interruptsDisabled_f(tmpState) == true && interruptsDisabled_f(ulState) == false) + ? coreLocalInterruptInv_p() + : true; + @*/ +/*@ ensures interruptState_p(coreID, ulState); +@*/ + + +/* This macro is used to acquire the task lock. The task lock on its own + * protects data and core regions that are not relevant to the context-switch + * proof. Hence, an invocation produces an abstract invariant `taskLockInv_p()` + * and updates the locking history `locked_p(...)` to log that the task log + * has been acquired. + * + * FreeRTOS' locking discipline requires that the task lock must be acquired + * before the ISR lock. The precondition `locked_p(nil)` only allows + * invocations of this macro when no lock has been acquired, yet. + */ +#undef portGET_TASK_LOCK +#define portGET_TASK_LOCK VF__portGET_TASK_LOCK +void VF__portGET_TASK_LOCK(); +//@ requires [?f]taskLock_p() &*& locked_p(nil); +//@ ensures taskLockInv_p() &*& locked_p( cons( pair(f, taskLockID_f()), nil) ); + + +/* This macro is used to release the task lock. An invocation consumes the + * task lock invariant `taskLockInv_p` and updates the locking history + * `locked_p(...)` to reflect the release. + * + * FreeRTOS' locking discipline demands that the task lock must be acquired + * before and released after the ISR lock. The precondition + * `locked_p( cons( pair(?f, taskLockID_f()), nil) )` only allows calls to this + * macro when we can prove that we only hold the task lock. + * */ +#undef portRELEASE_TASK_LOCK +#define portRELEASE_TASK_LOCK VF__portRELEASE_TASK_LOCK +void VF__portRELEASE_TASK_LOCK(); +//@ requires taskLockInv_p() &*& locked_p( cons( pair(?f, taskLockID_f()), nil) ); +//@ ensures [f]taskLock_p() &*& locked_p(nil); + + +/* This macro is used to acquire the ISR lock (i.e. interrupt lock). An + * invocation produces the abstract ISR lock invariant `isrLock_p` and + * updates the locking history `locked_p(...)` to reflect that the lock has + * been acquired. + */ +#undef portGET_ISR_LOCK +#define portGET_ISR_LOCK VF__portGET_ISR_LOCK +void VF__portGET_ISR_LOCK(); +//@ requires [?f]isrLock_p() &*& locked_p(?heldLocks); +//@ ensures isrLockInv_p() &*& locked_p( cons( pair(f, isrLockID_f()), heldLocks) ); + + +/* This macro is used to release the ISR lock (i.e. interrupt lock). A call + * consumes the ISR lock invariant and updates the locking history + * `locked_p(...)` to reflect the release. + */ +#undef portRELEASE_ISR_LOCK +#define portRELEASE_ISR_LOCK VF__portRELEASE_ISR_LOCK +void VF__portRELEASE_ISR_LOCK(); +//@ requires isrLockInv_p() &*& locked_p( cons( pair(?f, isrLockID_f()), ?heldLocks) ); +//@ ensures [f]isrLock_p() &*& locked_p(heldLocks); + + +#endif /* PORT_CONTRACTS_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/ready_list_predicates.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/ready_list_predicates.h new file mode 100644 index 00000000000..3e74d976fa4 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/ready_list_predicates.h @@ -0,0 +1,389 @@ +#ifndef READY_LIST_PREDICATES_H +#define READY_LIST_PREDICATES_H + +#include "single_core_proofs/scp_list_predicates.h" + + +#include "verifast_lists_extended.h" + + +/*@ +// This predicate represents the global ready lists, i.e., the global array +// `pxReadyTasksLists` in `tasks.c`. +// Each index `p` stores a cyclic doubly linked list containing all tasks +// of priority `p` that are ready to run. +predicate readyLists_p(list > gCellLists, + list > gOwnerLists) = + configMAX_PRIORITIES == length(gCellLists) &*& + List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, + gCellLists, gOwnerLists) &*& + length(gCellLists) == length(gOwnerLists); + + +predicate List_array_p(List_t* array, int size, + list > cellLists, + list > ownerLists) = + size >= 0 &*& + length(cellLists) == size &*& + length(ownerLists) == length(cellLists) &*& + size > 0 + ? ( + cellLists == cons(?gCells, ?gTailCellLists) &*& + ownerLists == cons(?gOwners, ?gTailOwnerLists) &*& + pointer_within_limits(array) == true &*& + xLIST(array, ?gLen, ?gIndex, ?gListEnd, gCells, ?gVals, + gOwners) + &*& + gLen < INT_MAX &*& + List_array_p(array + 1, size - 1, gTailCellLists, gTailOwnerLists) + ) + : ( + cellLists == nil &*& + ownerLists == nil + ); + +lemma void List_array_size_positive(List_t* pxArray) +requires List_array_p(pxArray, ?gSize, ?gCellLists, ?gOwnerLists); +ensures + List_array_p(pxArray, gSize, gCellLists, gOwnerLists) &*& + gSize >= 0 &*& + gSize == length(gCellLists) &*& + length(gCellLists) == length(gOwnerLists); +{ + open List_array_p(pxArray, gSize, gCellLists, gOwnerLists); + close List_array_p(pxArray, gSize, gCellLists, gOwnerLists); +} + +lemma void List_array_split(List_t* array, int index) +requires + List_array_p(array, ?gSize, ?gCellLists, ?gOwnerLists) &*& + 0 <= index &*& index < gSize; +ensures + List_array_p(array, index, ?gPrefCellLists, ?gPrefOwnerLists) &*& + gPrefCellLists == take(index, gCellLists) &*& + gPrefOwnerLists == take(index, gOwnerLists) &*& + pointer_within_limits(array) == true &*& + xLIST(array + index, ?gLen, _, _, ?gCells, ?gVals, ?gOwners) &*& + gLen < INT_MAX &*& + gCells == nth(index, gCellLists) &*& + gOwners == nth(index, gOwnerLists) &*& + mem(gOwners, gOwnerLists) == true &*& + List_array_p(array + index + 1, gSize-index-1, ?gSufCellLists, ?gSufOwnerLists) &*& + gSufCellLists == drop(index+1, gCellLists) &*& + gSufOwnerLists == drop(index+1, gOwnerLists); +{ + open List_array_p(array, gSize, gCellLists, gOwnerLists); + + if( index > 0 ) { + List_array_split(array + 1, index - 1); + } + + close List_array_p(array, index, take(index, gCellLists), take(index, gOwnerLists)); +} + +lemma void List_array_join(List_t* array) +requires + List_array_p(array, ?gPrefSize, ?gPrefCellLists, ?gPrefOwnerLists) &*& + xLIST(array + gPrefSize, ?gLen, _, _, ?gCells, _, ?gOwners) &*& + gLen < INT_MAX &*& + pointer_within_limits(array + gPrefSize) == true &*& + List_array_p(array + gPrefSize + 1, ?gSufSize, ?gSufCellLists, ?gSufOwnerLists); +ensures + List_array_p(array, ?gSize, ?gCellLists, ?gOwnerLists) &*& + gSize == length(gCellLists) &*& + length(gCellLists) == length(gOwnerLists) &*& + gSize == gPrefSize + 1 + gSufSize &*& + gCellLists == append(gPrefCellLists, cons(gCells, gSufCellLists)) &*& + gOwnerLists == append(gPrefOwnerLists, cons(gOwners, gSufOwnerLists)); +{ + open List_array_p(array, gPrefSize, gPrefCellLists, gPrefOwnerLists); + List_array_size_positive(array + gPrefSize + 1); + + if( gPrefSize > 0 ) { + List_array_join(array + 1); + } + + close List_array_p(array, gPrefSize + 1 + gSufSize, + append(gPrefCellLists, cons(gCells, gSufCellLists)), + append(gPrefOwnerLists, cons(gOwners, gSufOwnerLists))); +} +@*/ + + + + + +/*@ +lemma void List_array_p_index_within_limits(List_t* array, int index) +requires List_array_p(array, ?gSize, ?gCellLists, ?gOwnerLists) &*& + 0 <= index &*& index < gSize; +ensures List_array_p(array, gSize, gCellLists, gOwnerLists) &*& + pointer_within_limits(&array[index]) == true; +{ + open List_array_p(array, gSize, gCellLists, gOwnerLists); + if( index > 0) { + List_array_p_index_within_limits(&array[1], index-1); + } + close List_array_p(array, gSize, gCellLists, gOwnerLists); +} +@*/ + + + +// ------------------------------------------------------------------------- +// Lemmas to close the ready list predicate in different scenarios. +/*@ +lemma void closeUnchanged_readyLists(list > cellLists, + list > ownerLists) +requires + configMAX_PRIORITIES == length(cellLists) &*& + configMAX_PRIORITIES == length(ownerLists) &*& + List_array_p(&pxReadyTasksLists, ?gIndex, ?gPrefCellLists, ?gPrefOwnerLists) &*& + gIndex < length(cellLists) &*& + xLIST(&pxReadyTasksLists + gIndex, ?gLen, _, _, ?gCells, ?gVals, ?gOwners) &*& + gLen < INT_MAX &*& + gCells == nth(gIndex, cellLists) &*& + gOwners == nth(gIndex, ownerLists) &*& + pointer_within_limits(&pxReadyTasksLists + gIndex) == true &*& + List_array_p(&pxReadyTasksLists + gIndex + 1, configMAX_PRIORITIES - gIndex - 1, + ?gSufCellLists, ?gSufOwnerLists) &*& + gPrefCellLists == take(gIndex, cellLists) &*& + gSufCellLists == drop(gIndex+1, cellLists) &*& + gPrefOwnerLists == take(gIndex, ownerLists) &*& + gSufOwnerLists == drop(gIndex+1, ownerLists); +ensures + readyLists_p(cellLists, ownerLists); +{ + // Prove `0 <= gIndex`: + open List_array_p(&pxReadyTasksLists, gIndex, gPrefCellLists, gPrefOwnerLists); + close List_array_p(&pxReadyTasksLists, gIndex, gPrefCellLists, gPrefOwnerLists); + assert( 0 <= gIndex ); + + List_array_join(&pxReadyTasksLists); + assert( List_array_p(&pxReadyTasksLists, ?gSize, ?gCellLists2, ?gOwnerLists2) ); + + append_take_nth_drop(gIndex, cellLists); + append_take_nth_drop(gIndex, ownerLists); + assert( gSize == configMAX_PRIORITIES ); + assert( gCellLists2 == cellLists ); + assert( gOwnerLists2 == ownerLists ); + + close readyLists_p(cellLists, ownerLists); +} + +lemma void closeReordered_readyLists(list > cellLists, + list > ownerLists, + list reorderedCells, + list reorderedOwners, + list tasks) +requires + configMAX_PRIORITIES == length(cellLists) &*& + configMAX_PRIORITIES == length(ownerLists) &*& + List_array_p(&pxReadyTasksLists, ?gIndex, ?gPrefCellLists, ?gPrefOwnerLists) &*& + gIndex < length(cellLists) &*& + xLIST(&pxReadyTasksLists + gIndex, ?gLen, _, _, reorderedCells, _, reorderedOwners) &*& + gLen < INT_MAX &*& + length(reorderedCells) == length(nth(gIndex, cellLists)) &*& + length(reorderedOwners) == length(nth(gIndex, ownerLists)) &*& + pointer_within_limits(&pxReadyTasksLists + gIndex) == true &*& + List_array_p(&pxReadyTasksLists + gIndex + 1, configMAX_PRIORITIES - gIndex - 1, + ?gSufCellLists, ?gSufOwnerLists) &*& + gPrefCellLists == take(gIndex, cellLists) &*& + gSufCellLists == drop(gIndex+1, cellLists) &*& + gPrefOwnerLists == take(gIndex, ownerLists) &*& + gSufOwnerLists == drop(gIndex+1, ownerLists) &*& + forall(ownerLists, (superset)(tasks)) == true &*& + forall(reorderedOwners, (mem_list_elem)(tasks)) == true; +ensures + readyLists_p(?gReorderedCellLists, ?gReorderedOwnerLists) &*& + forall(gReorderedOwnerLists, (superset)(tasks)) == true; +{ + // Prove that `gIndex != 0 -> gIndex > 0` + if(gIndex != 0) { + open List_array_p(&pxReadyTasksLists, gIndex, gPrefCellLists, gPrefOwnerLists); + close List_array_p(&pxReadyTasksLists, gIndex, gPrefCellLists, gPrefOwnerLists); + assert( gIndex > 0 ); + } + + List_array_join(&pxReadyTasksLists); + assert( List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, + ?gReorderedCellLists, ?gReorderedOwnerLists) ); + + if(gIndex == 0) { + assert( nth(0, gReorderedCellLists) == reorderedCells ); + } else { + nth_take(0, gIndex, cellLists); + assert( nth(0, gReorderedCellLists) == nth(0, gPrefCellLists) ); + assert( nth(0, gPrefCellLists) == nth(0, cellLists) ); + } + assert( length(nth(0, gReorderedCellLists)) == length(nth(0, cellLists)) ); + + close readyLists_p(gReorderedCellLists, gReorderedOwnerLists); + + + // Below we prove `forall(gReorderedOwnerLists, (superset)(tasks)) == true` + forall_take(ownerLists, (superset)(tasks), gIndex); + forall_drop(ownerLists, (superset)(tasks), gIndex+1); + assert( forall(gPrefOwnerLists, (superset)(tasks)) == true ); + assert( forall(gSufOwnerLists, (superset)(tasks)) == true ); + forall_mem_implies_superset(tasks, reorderedOwners); + assert( superset(tasks, reorderedOwners) == true ); + assert( forall(singleton(reorderedOwners), (superset)(tasks)) == true ); + assert( forall(cons(reorderedOwners, gSufOwnerLists), (superset)(tasks)) == true ); + + forall_append(gPrefOwnerLists, cons(reorderedOwners, gSufOwnerLists), + (superset)(tasks)); +} +@*/ + + +/*@ +predicate VF_reordeReadyList__ghost_args(list tasks, + list > cellLists, + list > ownerLists, + int offset) + = true; +@*/ + +void VF_reordeReadyList(List_t* pxReadyList, ListItem_t * pxTaskItem) +/*@ requires + // ghost arguments + VF_reordeReadyList__ghost_args(?gTasks, ?gCellLists, ?gOwnerLists, ?gOffset) + &*& + length(gCellLists) == configMAX_PRIORITIES &*& + length(gOwnerLists) == configMAX_PRIORITIES &*& + 0 <= gOffset &*& gOffset < length(gCellLists) + &*& + // current ready list + xLIST(pxReadyList, ?gSize, ?gIndex, ?gEnd, ?gCells, ?gVals, ?gOwners) &*& + pxReadyList == &pxReadyTasksLists + gOffset &*& + pointer_within_limits(pxReadyList) == true &*& + gSize < INT_MAX &*& + gEnd != pxTaskItem &*& + mem(pxTaskItem, gCells) == true &*& + gCells == nth(gOffset, gCellLists) &*& + gOwners == nth(gOffset, gOwnerLists) + &*& + // prefix and suffix of ready lists array + List_array_p(&pxReadyTasksLists, gOffset, ?gPrefCellLists, ?gPrefOwnerLists) &*& + List_array_p(&pxReadyTasksLists + gOffset + 1, configMAX_PRIORITIES - gOffset - 1, + ?gSufCellLists, ?gSufOwnerLists) + &*& + gPrefCellLists == take(gOffset, gCellLists) &*& + gSufCellLists == drop(gOffset+1, gCellLists) &*& + gPrefOwnerLists == take(gOffset, gOwnerLists) &*& + gSufOwnerLists == drop(gOffset+1, gOwnerLists) &*& + forall(gOwnerLists, (superset)(gTasks)) == true &*& + subset(gOwners, gTasks) == true; +@*/ +/*@ ensures + readyLists_p(?gReorderedCellLists, ?gReorderedOwnerLists) &*& + length(gReorderedCellLists) == length(gCellLists) &*& + length(gReorderedOwnerLists) == length(gOwnerLists) &*& + length(gReorderedCellLists) == length(gReorderedOwnerLists) &*& + forall(gReorderedOwnerLists, (superset)(gTasks)) == true; + @*/ +{ + //@ open VF_reordeReadyList__ghost_args(_, _, _, _); + + // Proving `∀o ∈ gOwners. o ∈ gTasks` + //@ forall_mem(gOwners, gOwnerLists, (superset)(gTasks)); + //@ assert( superset(gTasks, gOwners) == true ); + //@ subset_implies_forall_mem(gOwners, gTasks); + //@ assert( forall(gOwners, (mem_list_elem)(gTasks)) == true ); + + // Proving `length(gCells) == length(gOwners) == gSize + 1`: + //@ open xLIST(pxReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + //@ close xLIST(pxReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + //@ assert( length(gCells) == length(gOwners) ); + //@ assert( length(gCells) == gSize +1 ); + + //@ close exists(pxReadyList); + uxListRemove( pxTaskItem ); + //@ assert( xLIST(pxReadyList, gSize-1, ?gIndex2, gEnd, ?gCells2, ?gVals2, ?gOwners2) ); + //@ assert( xLIST_ITEM(pxTaskItem, _, _, _, ?gTaskItem_owner, _) ); + + // Proving `length(gCell2) == length(gOwners2) == gSize` and `gIndex2 ∈ gCells2`: + //@ open xLIST(pxReadyList, gSize-1, gIndex2, gEnd, gCells2, gVals2, gOwners2); + //@ close xLIST(pxReadyList, gSize-1, gIndex2, gEnd, gCells2, gVals2, gOwners2); + //@ assert( length(gCells2) == gSize ); + //@ assert( length(gOwners2) == gSize ); + //@ assert( mem(gIndex2, gCells2) == true ); + + // Proving `gTaskItem_owner ∈ gOwners`: + //@ assert( gTaskItem_owner == nth(index_of(pxTaskItem, gCells), gOwners) ); + //@ mem_index_of(pxTaskItem, gCells); + //@ nth_implies_mem(index_of(pxTaskItem, gCells), gOwners); + //@ assert( mem(gTaskItem_owner, gOwners) == true ); + + // Proving `gTaskItem_owner ∈ gTasks`: + //@ forall_mem(gTaskItem_owner, gOwners, (mem_list_elem)(gTasks)); + //@ assert( mem(gTaskItem_owner, gTasks) == true ); + + // Proving `gOwners2 ⊆ gTasks` + //@ assert( forall(gOwners, (mem_list_elem)(gTasks)) == true ); + //@ forall_remove_nth(index_of(pxTaskItem, gCells), gOwners, (mem_list_elem)(gTasks)); + //@ assert( forall(gOwners2, (mem_list_elem)(gTasks)) == true ); + //@ forall_mem_implies_superset(gTasks, gOwners2); + //@ assert( subset(gOwners2, gTasks) == true ); + + vListInsertEnd( pxReadyList, pxTaskItem ); + //@ assert( xLIST(pxReadyList, gSize, ?gIndex3, gEnd, ?gCells3, ?gVals3, ?gOwners3) ); + + // Proving `gOwners3 ⊆ gTasks` and `length(gOwners3) == length(gOwners)`: + // We must handle the case split introduced by postcondition of `vListInsertEnd`. + /*@ + if( gIndex2 == gEnd ) { + assert( gCells3 == append(gCells2, singleton(pxTaskItem)) ); + assert( gOwners3 == append(gOwners2, singleton(gTaskItem_owner)) ); + + assert( subset(singleton(gTaskItem_owner), gTasks) == true ); + subset_append(gOwners2, singleton(gTaskItem_owner), gTasks); + } else { + int i = index_of(gIndex2, gCells2); + assert( gCells3 == append(take(i, gCells2), + append(singleton(pxTaskItem), + drop(i, gCells2))) ); + list ot = append(singleton(gTaskItem_owner), drop(i, gOwners2)); + assert( gOwners3 == append(take(i, gOwners2), ot) ); + + + // Proving `take(i, gOwners2) ⊆ gTasks`: + subset_take(i, gOwners2); + assert( subset(take(i, gOwners2), gOwners2) == true ); + assert( subset(gOwners2, gTasks) == true ); + subset_trans(take(i, gOwners2), gOwners2, gTasks); + assert( subset(take(i, gOwners2), gTasks) == true ); + + // Proving `drop(i, gOwners2) ⊆ gTasks`: + subset_drop(i, gOwners2); + subset_trans(drop(i, gOwners2), gOwners2, gTasks); + assert( subset(drop(i, gOwners2), gTasks) == true ); + + // Proving `gOwners3 ⊆ gTasks`: + subset_append(singleton(gTaskItem_owner), drop(i, gOwners2), gTasks); + subset_append(take(i, gOwners2), ot, gTasks); + assert( subset(gOwners3, gTasks) == true ); + + // Proving `length(gOwners3) == length(gOwners)`: + mem_index_of(gIndex2, gCells2); + append_take_nth_drop(i, gOwners2); + assert( length(gOwners3) == gSize+1 ); + } + @*/ + //@ assert( subset(gOwners3, gTasks) == true ); + //@ assert( length(gOwners3) == length(gOwners) ); + + //@ subset_implies_forall_mem(gOwners3, gTasks); + //@ assert( forall(gOwners3, (mem_list_elem)(gTasks)) == true ); + + //@ closeReordered_readyLists(gCellLists, gOwnerLists, gCells3, gOwners3, gTasks); + + // Proving that reordering preserves the length of cell lists and owner lists: + //@ open readyLists_p(?gReorderedCellLists, ?gReorderedOwnerLists); + //@ close readyLists_p(gReorderedCellLists, gReorderedOwnerLists); + //@ assert( length(gReorderedCellLists) == length(gCellLists) ); + //@ assert( length(gReorderedOwnerLists) == length(gOwnerLists) ); + //@ assert( length(gReorderedCellLists) == length(gReorderedOwnerLists) ); +} +#endif /* READY_LIST_PREDICATES_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/README.md new file mode 100644 index 00000000000..b749058621a --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/README.md @@ -0,0 +1,3 @@ +This directory contains proof artifacts written by Aalok Thakkar and Nathan Chong. +See the following pull request: +https://github.com/FreeRTOS/FreeRTOS/pull/836 diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_common.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_common.h new file mode 100644 index 00000000000..5923b6d89da --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_common.h @@ -0,0 +1,630 @@ +/* + * The code below has been taken from: + * pull request: + * https://github.com/FreeRTOS/FreeRTOS/pull/836 + * file: + * FreeRTOS/Test/VeriFast/include/proof/list.h + * + * The file has been converted from a ghost header + * into a regular header. + * It has also been renamed from `common.h` into + * `scp_common.h`. + * The include guards have been updated accordingly. + * + * All changes to the proofs, predicates, etc. + * are guarded by a check that `VERIFAST_SINGLE_CORE` is + * NOT defined. + */ + + + +/* + * FreeRTOS V202112.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifndef SCP_COMMON_H +#define SCP_COMMON_H + +/*@ +#include + +fixpoint list rotate_left(int n, list xs) { + return append(drop(n, xs), take(n, xs)); +} + +fixpoint list singleton(t x) { + return cons(x, nil); +} + +lemma void note(bool b) + requires b; + ensures b; +{} + +lemma_auto void rotate_length(int n, list xs) + requires 0 <= n && n <= length(xs); + ensures length(rotate_left(n, xs)) == length(xs); +{} + +lemma void take_length_eq(int k, list xs, list ys) + requires 0 <= k && k <= length(xs) && take(k, xs) == ys; + ensures length(ys) == k; +{} + +lemma void leq_bound(int x, int b) + requires b <= x && x <= b; + ensures x == b; +{} + +lemma void mul_mono_l_strict(int x, int y, int n) + requires 0 < n &*& x < y; + ensures x * n < y * n; +{ + for (int i = 1; i < n; i++) + invariant i <= n &*& x * i < y * i; + decreases n - i; + {} +} + +lemma void div_leq(int x, int y, int n) + requires 0 < n && x * n <= y * n; + ensures x <= y; +{ + assert x * n <= y * n; + if (x <= y) { + mul_mono_l(x,y,n); + } else { + mul_mono_l_strict(y,x,n); //< contradiction + } +} + +lemma void div_lt(int x, int y, int n) + requires 0 < n && x * n < y * n; + ensures x < y; +{ + assert x * n <= y * n; + if (x == y) { + } else if (x <= y) { + mul_mono_l(x,y,n); + } else { + assert y < x; + mul_mono_l(y,x,n); //< contradiction + } +} + +lemma_auto void mod_same(int n) + requires 0 < n; + ensures n % n == 0; +{ + div_rem_nonneg(n, n); + if (n / n < 1) {} else if (n / n > 1) { + mul_mono_l(2, n/n, n); + } else {} +} + +lemma void mod_lt(int x, int n) + requires 0 <= x && x < n; + ensures x % n == x; +{ + div_rem_nonneg(x, n); + if (x / n > 0) { + mul_mono_l(1, x / n, n); + } else { + } +} + +lemma void mod_plus_one(int x, int y, int n) + requires 0 <= y && 0 < n && x == (y % n); + ensures ((x+1) % n) == ((y+1) % n); +{ + div_rem_nonneg(y, n); + div_rem_nonneg(y+1, n); + div_rem_nonneg(y%n+1, n); + if (y%n+1 < n) { + mod_lt(y%n+1, n); + assert y%n == y - y/n*n; + assert (y+1)%n == y + 1 - (y + 1)/n*n; + if ((y+1)/n > y/n) { + mul_mono_l(y/n + 1, (y+1)/n, n); + } else if ((y+1)/n < y/n) { + mul_mono_l((y+1)/n + 1, y/n, n); + } + assert y - (y+1)/n*n == y - y/n*n; + assert y+1 - (y+1)/n*n == y - y/n*n + 1; + assert (y+1)%n == y%n + 1; + } else { + assert y%n+1 == n; + assert (y%n+1)%n == 0; + if (y/n + 1 < (y+1)/n) { + mul_mono_l(y/n + 2, (y+1)/n, n); + } else if (y/n + 1 > (y+1)/n) { + mul_mono_l((y+1)/n, y/n, n); + } + assert (y+1)/n == y/n + 1; + note((y+1)/n*n == (y/n + 1)*n); + assert (y+1)%n == 0; + } + assert (y%n+1)%n == (y+1)%n; +} + +lemma void mod_mul(int x, int n, int y) + requires 0 < n && 0 <= x && 0 <= y; + ensures (x*n + y)%n == y%n; +{ + mul_mono_l(0, x, n); + div_rem_nonneg(x*n+y, n); + div_rem_nonneg(y, n); + + if ((x*n+y)/n > x + y/n) { + mul_mono_l(x + y/n + 1, (x*n+y)/n, n); + } else if ((x*n+y)/n < x + y/n) { + mul_mono_l((x*n+y)/n + 1, x + y/n, n); + } + note((x*n + y)/n == x + y/n); + note((x*n + y)/n*n == (x + y/n)*n); +} + +lemma void mod_plus_distr(int x, int y, int n) + requires 0 < n && 0 <= x && 0 <= y; + ensures ((x % n) + y) % n == (x + y) % n; +{ + div_rem_nonneg(x, n); + div_rem_nonneg(x%n+y, n); + div_rem_nonneg(x+y, n); + + assert x == x/n*n + x%n; + mod_mul(x/n, n, x%n + y); +} + +lemma_auto void mod_mod(int x, int n) + requires 0 < n && 0 <= x; + ensures (x % n) % n == (x % n); +{ + mod_plus_distr(x, 0, n); +} + +lemma void mod_plus(int x, int y, int n); + requires 0 < n && 0 <= x && 0 <= y; + ensures (x + y) % n == ((x % n) + (y % n)) % n; + +lemma_auto void mod_range(int x, int n) + requires 0 <= x && 0 < n; + ensures 0 <= (x % n) && (x % n) < n; +{ + div_rem_nonneg(x, n); +} + +lemma void head_append(list xs, list ys) + requires 0 < length(xs); + ensures head(append(xs, ys)) == head(xs); +{ + switch(xs) + { + case cons(c, cs): + case nil: + } +} + +lemma void drop_take_singleton(int i, list xs) + requires 0 < i && i < length(xs); + ensures drop(i-1, take(i, xs)) == singleton(nth(i-1, xs)); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (i == 1) { + } else { + drop_take_singleton(i-1, xs0); + } + } +} + +lemma void take_singleton(int i, list xs) + requires 0 <= i && i < length(xs); + ensures append(take(i, xs), singleton(nth(i, xs))) == take(i+1, xs); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (i == 0) { + } else { + take_singleton(i-1, xs0); + } + } +} + +lemma void drop_update_le(int i, int j, t x, list xs) + requires 0 <= i && i <= j && j < length(xs); + ensures drop(i, update(j, x, xs)) == update(j - i, x, drop(i, xs)); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (i == 0) { + } else { + drop_update_le(i - 1, j - 1, x, xs0); + } + } +} + +lemma void drop_update_ge(int i, int j, t x, list xs) + requires 0 <= j && j < i && i < length(xs); + ensures drop(i, update(j, x, xs)) == drop(i, xs); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (j == 0) { + } else { + drop_update_ge(i - 1, j - 1, x, xs0); + } + } +} + +lemma void take_update_le(int i, int j, t x, list xs) + requires 0 <= i && i <= j; + ensures take(i, update(j, x, xs)) == take(i, xs); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (i == 0) { + } else { + take_update_le(i - 1, j - 1, x, xs0); + } + } +} + +lemma void take_update_ge(int i, int j, t x, list xs) + requires 0 <= j && j < i && i <= length(xs); + ensures take(i, update(j, x, xs)) == update(j, x, take(i, xs)); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (j == 0) { + } else { + take_update_ge(i - 1, j - 1, x, xs0); + } + } +} + +lemma void update_eq_append(int i, t x, list xs) + requires 0 <= i && i < length(xs); + ensures update(i, x, xs) == append(take(i, xs), cons(x, drop(i + 1, xs))); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (i == 0) { + } else { + update_eq_append(i - 1, x, xs0); + } + } +} + +lemma void take_append_ge(int n, list xs, list ys) + requires length(xs) <= n; + ensures take(n, append(xs, ys)) == append(xs, take(n - length(xs), ys)); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + take_append_ge(n - 1, xs0, ys); + } +} + +lemma void drop_drop(int m, int n, list xs) + requires 0 <= m && 0 <= n; + ensures drop(m, drop(n, xs)) == drop(m+n, xs); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (n == 0) {} else { + drop_drop(m, n-1, xs0); + } + } +} + +lemma void take_take(int m, int n, list xs) + requires 0 <= m && m <= n && n <= length(xs); + ensures take(m, take(n, xs)) == take(m, xs); +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (m == 0) {} else { + take_take(m - 1, n - 1, xs0); + } + } +} + +lemma_auto void take_head(list xs) + requires 0 < length(xs); + ensures take(1, xs) == singleton(head(xs)); +{ + switch(xs) { + case nil: + case cons(x0, xs0): + } +} + +// Following lemma from `verifast/bin/rt/_list.java` +lemma void remove_remove_nth(list xs, t x) + requires mem(x, xs) == true; + ensures remove(x, xs) == remove_nth(index_of(x, xs), xs); +{ + switch (xs) { + case nil: + case cons(h, tl): + if (x == h) { + assert index_of(x, xs) == 0; + } else { + remove_remove_nth(tl, x); + } + } +} + +lemma void mem_take_false(t x, int n, list xs) + requires mem(x, xs) == false; + ensures mem(x, take(n, xs)) == false; +{ + switch (xs) { + case nil: + case cons(x0, xs0): + if (x0 != x && n != 0) + mem_take_false(x, n - 1, xs0); + } +} + +// Following lemma from `verifast/bin/rt/_list.java`. Renamed to +// avoid clash with listex.c's nth_drop lemma. +lemma void nth_drop2(list vs, int i) +requires 0 <= i && i < length(vs); +ensures nth(i, vs) == head(drop(i, vs)); +{ + switch (vs) { + case nil: + case cons(v, vs0): + if (i == 0) { + } else { + nth_drop2(vs0, i - 1); + } + } +} + +lemma void enq_lemma(int k, int i, list xs, list ys, t z) + requires 0 <= k && 0 <= i && 0 < length(xs) && k < length(xs) && i < length(xs) && take(k, rotate_left(i, xs)) == ys; + ensures take(k+1, rotate_left(i, update((i+k)%length(xs), z, xs))) == append(ys, cons(z, nil)); +{ + int j = (i+k)%length(xs); + assert take(k, append(drop(i, xs), take(i, xs))) == ys; + if (i + k < length(xs)) { + mod_lt(i + k, length(xs)); + assert j == i + k; + drop_update_le(i, i + k, z, xs); + assert drop(i, update(i + k, z, xs)) == update(k, z, drop(i, xs)); + take_update_le(i, i + k, z, xs); + assert take(i, update(i + k, z, xs)) == take(i, xs); + take_append(k+1, update(k, z, drop(i, xs)), take(i, xs)); + assert take(k+1, append(update(k, z, drop(i, xs)), take(i, xs))) == take(k+1, update(k, z, drop(i, xs))); + update_eq_append(k, z, drop(i, xs)); + assert update(k, z, drop(i, xs)) == append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs)))); + take_append_ge(k+1, take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs)))); + assert take(k+1, append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))))) == + append(take(k, drop(i, xs)), {z}); + take_append(k, drop(i, xs), take(i, xs)); + assert take(k+1, append(take(k, drop(i, xs)), cons(z, drop(k + 1, drop(i, xs))))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + assert take(k+1, update(k, z, drop(i, xs))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + assert take(k+1, append(update(k, z, drop(i, xs)), take(i, xs))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + assert take(k+1, append(drop(i, update(i + k, z, xs)), take(i, update(i + k, z, xs)))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + + } else { + assert i + k < 2 * length(xs); + div_rem_nonneg(i + k, length(xs)); + if ((i + k) / length(xs) > 1) { + mul_mono_l(2, (i + k) / length(xs), length(xs)); + } else if ((i + k) / length(xs) < 1) { + mul_mono_l((i + k) / length(xs), 0, length(xs)); + } + assert j == i + k - length(xs); + assert j < i; + drop_update_ge(i, j, z, xs); + assert drop(i, update(j, z, xs)) == drop(i, xs); + take_update_ge(i, j, z, xs); + assert update(j, z, take(i, xs)) == take(i, update(j, z, xs)); + take_append_ge(k+1, drop(i, xs), take(i, update(j, z, xs))); + assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) == + append(drop(i, xs), take(j+1, update(j, z, take(i, xs)))); + update_eq_append(j, z, take(i, xs)); + assert update(j, z, take(i, xs)) == append(take(j, take(i, xs)), cons(z, drop(j + 1, take(i, xs)))); + take_take(j, i, xs); + assert update(j, z, take(i, xs)) == append(take(j, xs), cons(z, drop(j+1, take(i, xs)))); + take_append_ge(j+1, take(j, xs), cons(z, drop(j+1, take(i, xs)))); + assert append(drop(i, xs), take(j+1, update(j, z, take(i, xs)))) == + append(drop(i, xs), append(take(j, xs), {z})); + take_append_ge(k, drop(i, xs), take(i, xs)); + append_assoc(drop(i, xs), take(j, xs), {z}); + assert append(drop(i, xs), append(take(j, xs), {z})) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + assert append(drop(i, xs), take(j+1, update(j, z, take(i, xs)))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + } + assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) == + append(take(k, append(drop(i, xs), take(i, xs))), {z}); + assert take(k+1, append(drop(i, update(j, z, xs)), take(i, update(j, z, xs)))) == append(ys, {z}); +} + +lemma void front_enq_lemma(int k, int i, list xs, list ys, t z) + requires 0 < length(xs) && 0 <= k && k < length(xs) && 0 <= i && i < length(xs) && take(k, rotate_left((i+1)%length(xs), xs)) == ys; + ensures take(k+1, rotate_left(i, update(i, z, xs))) == cons(z, ys); +{ + int n = length(xs); + if (i+1 < n) { + mod_lt(i+1, n); + assert i < n; + assert take(k+1, rotate_left(i, update(i, z, xs))) == + take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs)))); + drop_update_le(i, i, z, xs); + take_update_le(i, i, z, xs); + assert take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs)))) == + take(k+1, append(update(0, z, drop(i, xs)), take(i, xs))); + update_eq_append(0, z, drop(i, xs)); + assert update(0, z, drop(i, xs)) == cons(z, drop(1, drop(i, xs))); + drop_drop(1, i, xs); + assert take(k+1, append(update(0, z, drop(i, xs)), take(i, xs))) == + take(k+1, append(cons(z, drop(i+1, xs)), take(i, xs))); + assert take(k+1, append(cons(z, drop(i+1, xs)), take(i, xs))) == + cons(z, take(k, append(drop(i+1, xs), take(i, xs)))); + + assert ys == take(k, rotate_left(i+1, xs)); + assert ys == take(k, append(drop(i+1, xs), take(i+1, xs))); + if (k <= length(drop(i+1, xs))) { + take_append(k, drop(i+1, xs), take(i+1, xs)); + take_append(k, drop(i+1, xs), take(i, xs)); + } else { + take_append_ge(k, drop(i+1, xs), take(i+1, xs)); + take_append_ge(k, drop(i+1, xs), take(i, xs)); + + assert (i+1) + k < 2 * n; + div_rem_nonneg((i+1) + k, n); + if (((i+1) + k) / n > 1) { + mul_mono_l(2, ((i+1) + k) / n, n); + } else if (((i+1) + k) / n < 1) { + mul_mono_l(((i+1) + k) / n, 0, n); + } + int j = ((i+1)+k)%n; + assert j <= i; + int l = length(drop(i+1, xs)); + assert l == n - i - 1; + take_take(k - l, i + 1, xs); + take_take(k - l, i, xs); + } + } else { + assert i == (n-1); + assert (i + 1) % n == 0; + drop_update_le(i, i, z, xs); + update_eq_append(0, z, xs); + assert take(k+1, rotate_left(i, update(i, z, xs))) == + take(k+1, append(drop(i, update(i, z, xs)), take(i, update(i, z, xs)))); + drop_update_le(i, i, z, xs); + assert take(k+1, rotate_left(i, update(i, z, xs))) == + take(k+1, append(update(0, z, drop(i, xs)), take(i, update(i, z, xs)))); + update_eq_append(0, z, drop(i, xs)); + assert take(k+1, rotate_left(i, update(i, z, xs))) == + take(k+1, append(cons(z, drop(1, drop(i, xs))), take(i, update(i, z, xs)))); + drop_drop(1, i, xs); + assert take(k+1, rotate_left(i, update(i, z, xs))) == + take(k+1, append(cons(z, nil), take(i, update(i, z, xs)))); + take_update_le(i, i, z, xs); + assert take(k+1, rotate_left(i, update(i, z, xs))) == + cons(z, take(k, take(i, xs))); + take_take(k, i, xs); + assert take(k+1, rotate_left(i, update(i, z, xs))) == cons(z, ys); + } +} + +lemma void deq_lemma(int k, int i, list xs, list ys, t z) + requires 0 < k && k <= length(xs) && 0 <= i && i < length(xs) && take(k, rotate_left(i, xs)) == ys && z == head(ys); + ensures take(k-1, rotate_left((i+1)%length(xs), xs)) == tail(ys); +{ + int j = (i+1)%length(xs); + drop_n_plus_one(i, xs); + assert tail(take(k, append(drop(i, xs), take(i, xs)))) == take(k-1, append(drop(i+1, xs), take(i, xs))); + if (i+1 < length(xs)) { + mod_lt(i+1, length(xs)); + assert j == i+1; + if (k-1 <= length(xs)-j) { + take_append(k-1, drop(j, xs), take(j, xs)); + take_append(k-1, drop(j, xs), take(i, xs)); + } else { + assert k+i > length(xs); + take_append_ge(k-1, drop(j, xs), take(j, xs)); + take_append_ge(k-1, drop(j, xs), take(i, xs)); + assert k-1-(length(xs)-j) == k+i-length(xs); + assert k+i-length(xs) <= i; + take_take(k+i-length(xs), j, xs); + take_take(k+i-length(xs), i, xs); + assert take(k+i-length(xs), take(j, xs)) == take(k+i-length(xs), take(i, xs)); + } + } else { + assert i+1 == length(xs); + assert (i+1)%length(xs) == 0; + assert j == 0; + assert append(drop(j, xs), take(j, xs)) == xs; + assert append(drop(i+1, xs), take(i, xs)) == take(i, xs); + take_append_ge(k-1, drop(i+1, xs), take(i, xs)); + take_take(k-1, i, xs); + } + assert take(k-1, append(drop(j, xs), take(j, xs))) == take(k-1, append(drop(i+1, xs), take(i, xs))); + assert take(k-1, append(drop(j, xs), take(j, xs))) == tail(take(k, append(drop(i, xs), take(i, xs)))); +} + +lemma void deq_value_lemma(int k, int i, list xs, list ys) + requires 0 < k && k <= length(ys) && 0 <= i && i < length(xs) && take(k, rotate_left(i, xs)) == ys; + ensures nth(i, xs) == head(ys); +{ + drop_n_plus_one(i, xs); + assert nth(i, xs) == head(take(k, append(drop(i, xs), take(i, xs)))); +} + +lemma void combine_list_no_change(listprefix, t x, listsuffix, int i, list xs) + requires 0 <= i && i < length(xs) && prefix == take(i, xs) && x == nth(i, xs) && suffix == drop(i+1, xs); + ensures xs == append(prefix, cons(x, suffix)); +{ + drop_n_plus_one(i, xs); +} + +// Following lemma from `verifast/examples/vstte2010/problem4.java`. +lemma void update_rewrite(list vs, t v, int pos) + requires 0 <= pos && pos < length(vs); + ensures update(pos, v, vs) == append(take(pos, vs), cons(v, (drop(pos+1, vs)))); +{ + switch(vs) { + case nil: + case cons(h, t): + if (pos == 0) { + } else { + update_rewrite(t, v, pos - 1); + } + } +} + +lemma void combine_list_update(listprefix, t x, listsuffix, int i, list xs) + requires 0 <= i && i < length(xs) && prefix == take(i, xs) && suffix == drop(i+1, xs); + ensures update(i, x, xs) == append(prefix, cons(x, suffix)); +{ + update_rewrite(xs, x, i); +} + +@*/ + + +#endif /* SCP_COMMON_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_list_predicates.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_list_predicates.h new file mode 100644 index 00000000000..43bb5142f97 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs/scp_list_predicates.h @@ -0,0 +1,740 @@ +/* + * The code below has been taken from: + * pull request: + * https://github.com/FreeRTOS/FreeRTOS/pull/836 + * file: + * FreeRTOS/Test/VeriFast/include/proof/list.h + * + * The file has been renamed from `list.h` into + * `scp_list_predicates.h` to avoid naming conflicts. + * The include guards have been updated accordingly. + * + * All changes to the proofs, predicates, etc. + * are guarded by a check that `VERIFAST_SINGLE_CORE` is + * NOT defined. + */ + + + + +/* + * FreeRTOS V202112.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +/* *INDENT-OFF* */ + +#ifndef SCP_LIST_PREDICATES_H +#define SCP_LIST_PREDICATES_H + +#ifndef VERIFAST_SINGLE_CORE + /* Reasons for rewrite: + * - "common.gh" was converted into regular header "scp_common.h" + * - Using existing proof setup instead of definitions below. + */ + #include "scp_common.h" +#else + #define VERIFAST + #include + #include + //@#include "common.gh" + + typedef size_t TickType_t; + typedef size_t UBaseType_t; + typedef ssize_t BaseType_t; + + #define pdTRUE 1 + #define pdFALSE 0 + + /* Empty/no-op macros */ + #define mtCOVERAGE_TEST_MARKER() + #define mtCOVERAGE_TEST_DELAY() + #define listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ) + #define listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ) + #define listTEST_LIST_INTEGRITY( pxList ) + #define listTEST_LIST_ITEM_INTEGRITY( pxListItem ) + #define listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxListItem ) + #define listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxListItem ) +#endif /* VERIFAST_SINGLE_CORE */ + +/* Max value stored in sentinel xListEnd element */ +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: Match RP2040 port. */ + //VF_macro #define portMAX_DELAY 0xffffffffUL + + /* Verify that the preprocessor and our VeriFast proofs evaluate + * `portMAX_DELAY` to the same values. + */ + void validate_portMAX_DELAY_value() + //@ requires true; + //@ ensures true; + { + //@ TickType_t gVal = portMAX_DELAY; + TickType_t val = portMAX_DELAY; + //@ assert(val == gVal); + } +#else + #define portMAX_DELAY UINT_MAX +#endif /* VERIFAST_SINGLE_CORE */ + + +#ifdef VERIFAST_SINGLE_CORE + /* Reason for deletion: + * structs already defined in FreeRTOS header "list.h" + */ + + struct xLIST; + + struct xLIST_ITEM { + TickType_t xItemValue; + struct xLIST_ITEM * pxNext; + struct xLIST_ITEM * pxPrevious; + void * pvOwner; + struct xLIST *pxContainer; + }; + typedef struct xLIST_ITEM ListItem_t; + + typedef struct xLIST { + UBaseType_t uxNumberOfItems; + struct xLIST_ITEM *pxIndex; + #ifdef VERIFAST /*< ***change MiniList_t to ListItem_t*** */ + struct xLIST_ITEM xListEnd; + #else + MiniListItem_t xListEnd; + #endif + } List_t; +#endif /* VERIFAST_SINGLE_CORE */ + +#ifndef VERIFAST_SINGLE_CORE + /* Reasons for rewrite: + * - Breaking change in VeriFast. VeriFast now ensures that no uninitialised + * values are read. `x |-> _` is interpreted as "uninitialised", + * `x |-> ?v` is interpreted as "initialised". + * - In order to verify the scheduler, we have to reason about each node's + * owner. Hence, the predicate has to expose it. + */ + /*@ + predicate xLIST_ITEM( + struct xLIST_ITEM *n, + TickType_t xItemValue, + struct xLIST_ITEM *pxNext, + struct xLIST_ITEM *pxPrevious, + void* pxOwner, + struct xLIST *pxContainer;) = + n->xItemValue |-> xItemValue &*& + n->pxNext |-> pxNext &*& + n->pxPrevious |-> pxPrevious &*& + n->pvOwner |-> pxOwner &*& + n->pxContainer |-> pxContainer; + @*/ +#else + /*@ + predicate xLIST_ITEM( + struct xLIST_ITEM *n, + TickType_t xItemValue, + struct xLIST_ITEM *pxNext, + struct xLIST_ITEM *pxPrevious, + struct xLIST *pxContainer;) = + n->xItemValue |-> xItemValue &*& + n->pxNext |-> pxNext &*& + n->pxPrevious |-> pxPrevious &*& + n->pvOwner |-> _ &*& + n->pxContainer |-> pxContainer; + @*/ +#endif /* VERIFAST_SINGLE_CORE */ + + + +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * In order to verify the scheduler, we have to reason about each node's + * owner. Hence, the predicate has to expose it. + */ + + /* Ferreira et al. (STTT'14) doubly-linked list segment (DLS). */ + /*@ + predicate DLS( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + list owners, + struct xLIST *pxContainer) = + n == m + ? cells == cons(n, nil) &*& + vals == cons(?v, nil) &*& + owners == cons(?ow, nil) &*& + xLIST_ITEM(n, v, mnext, nprev, ow, pxContainer) + : cells == cons(n, ?cells0) &*& + vals == cons(?v, ?vals0) &*& + owners == cons(?ow, ?owners0) &*& + xLIST_ITEM(n, v, ?o, nprev, ow, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, owners0, pxContainer); + @*/ +#else + /* Ferreira et al. (STTT'14) doubly-linked list segment (DLS). */ + /*@ + predicate DLS( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + struct xLIST *pxContainer) = + n == m + ? cells == cons(n, nil) &*& + vals == cons(?v, nil) &*& + xLIST_ITEM(n, v, mnext, nprev, pxContainer) + : cells == cons(n, ?cells0) &*& + vals == cons(?v, ?vals0) &*& + xLIST_ITEM(n, v, ?o, nprev, pxContainer) &*& DLS(o, n, mnext, m, cells0, vals0, pxContainer); + @*/ +#endif /* VERIFAST_SINGLE_CORE */ + + +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * Predicates `xLIST_ITEM` and `DLS` have been extended to expose node + * owners. Proofs using these predicates must be adapted as well. + */ + + /*@ + lemma void dls_star_item( + struct xLIST_ITEM *n, + struct xLIST_ITEM *m, + struct xLIST_ITEM *o) + requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?owners, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?ow, ?l2); + ensures DLS(n, nprev, mnext, m, cells, vals, owners, l) &*& xLIST_ITEM(o, v, onext, oprev, ow, l2) &*& mem(o, cells) == false; + { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + if (n == m) { + assert xLIST_ITEM(n, _, _, _, _, _); + open xLIST_ITEM(n, _, _, _, _, _); + open xLIST_ITEM(o, _, _, _, _, _); + assert n != o; + close xLIST_ITEM(o, _, _, _, _, _); + close xLIST_ITEM(n, _, _, _, _, _); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } + else { + assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + dls_star_item(nnext, m, o); + open xLIST_ITEM(n, _, _, _, _, _); + open xLIST_ITEM(o, _, _, _, _, _); + assert n != o; + close xLIST_ITEM(o, _, _, _, _, _); + close xLIST_ITEM(n, _, _, _, _, _); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } + } + + + lemma void dls_distinct( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?owners, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, owners, l) &*& distinct(cells) == true; + { + if (n == m) { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } else { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + dls_distinct(nnext, n, mnext, m, tail(cells)); + dls_star_item(nnext, m, n); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } + } + @*/ +#else + /*@ + lemma void dls_star_item( + struct xLIST_ITEM *n, + struct xLIST_ITEM *m, + struct xLIST_ITEM *o) + requires DLS(n, ?nprev, ?mnext, m, ?cells, ?vals, ?l) &*& xLIST_ITEM(o, ?v, ?onext, ?oprev, ?l2); + ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& xLIST_ITEM(o, v, onext, oprev, l2) &*& mem(o, cells) == false; + { + open DLS(n, nprev, mnext, m, cells, vals, l); + if (n == m) { + assert xLIST_ITEM(n, _, _, _, _); + open xLIST_ITEM(n, _, _, _, _); + open xLIST_ITEM(o, _, _, _, _); + assert n != o; + close xLIST_ITEM(o, _, _, _, _); + close xLIST_ITEM(n, _, _, _, _); + close DLS(n, nprev, mnext, m, cells, vals, l); + } + else { + assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l); + dls_star_item(nnext, m, o); + open xLIST_ITEM(n, _, _, _, _); + open xLIST_ITEM(o, _, _, _, _); + assert n != o; + close xLIST_ITEM(o, _, _, _, _); + close xLIST_ITEM(n, _, _, _, _); + close DLS(n, nprev, mnext, m, cells, vals, l); + } + } + + + lemma void dls_distinct( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& distinct(cells) == true; + { + if (n == m) { + open DLS(n, nprev, mnext, m, cells, vals, l); + close DLS(n, nprev, mnext, m, cells, vals, l); + } else { + open DLS(n, nprev, mnext, m, cells, vals, l); + assert DLS(?nnext, n, mnext, m, tail(cells), tail(vals), l); + dls_distinct(nnext, n, mnext, m, tail(cells)); + dls_star_item(nnext, m, n); + close DLS(n, nprev, mnext, m, cells, vals, l); + } + } + @*/ +#endif /* VERIFAST_SINGLE_CORE */ + +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * In order to verify the scheduler, we have to reason about each node's + * owner. Hence, the predicate has to expose it. + */ + + /*@ + predicate xLIST( + struct xLIST *l, + int uxNumberOfItems, + struct xLIST_ITEM *pxIndex, + struct xLIST_ITEM *xListEnd, + listcells, + listvals, + list owners) = + l->uxNumberOfItems |-> uxNumberOfItems &*& + l->pxIndex |-> pxIndex &*& + mem(pxIndex, cells) == true &*& + xListEnd == &(l->xListEnd) &*& + xListEnd == head(cells) &*& + portMAX_DELAY == head(vals) &*& + struct_xLIST_ITEM_padding(&l->xListEnd) &*& + length(cells) == length(vals) &*& + length(owners) == length(cells) &*& + uxNumberOfItems + 1 == length(cells) &*& + DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, owners, l); + @*/ +#else + /*@ + predicate xLIST( + struct xLIST *l, + int uxNumberOfItems, + struct xLIST_ITEM *pxIndex, + struct xLIST_ITEM *xListEnd, + listcells, + listvals) = + l->uxNumberOfItems |-> uxNumberOfItems &*& + l->pxIndex |-> pxIndex &*& + mem(pxIndex, cells) == true &*& + xListEnd == &(l->xListEnd) &*& + xListEnd == head(cells) &*& + portMAX_DELAY == head(vals) &*& + struct_xLIST_ITEM_padding(&l->xListEnd) &*& + length(cells) == length(vals) &*& + uxNumberOfItems + 1 == length(cells) &*& + DLS(xListEnd, ?endprev, xListEnd, endprev, cells, vals, l); + @*/ +#endif /* VERIFAST_SINGLE_CORE */ + + + +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * Predicates `xLIST_ITEM`, `DLS` and `xLIST` have been extended to expose + * node owners. Proofs using these predicates must be adapted as well. + */ + + + /*@ + lemma void xLIST_distinct_cells(struct xLIST *l) + requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals, ?owners); + ensures xLIST(l, n, idx, end, cells, vals, owners) &*& distinct(cells) == true; + { + open xLIST(l, n, idx, end, cells, vals, owners); + assert DLS(end, ?endprev, end, _, cells, vals, owners, l); + dls_distinct(end, endprev, end, endprev, cells); + close xLIST(l, n, idx, end, cells, vals, owners); + } + + lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x) + requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals, ?owners) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?ow, ?l2); + ensures xLIST(l, n, idx, end, cells, vals, owners) &*& xLIST_ITEM(x, v, xnext, xprev, ow, l2) &*& mem(x, cells) == false; + { + open xLIST(l, n, idx, end, cells, vals, owners); + assert DLS(end, ?endprev, end, _, cells, vals, owners, l); + dls_distinct(end, endprev, end, endprev, cells); + dls_star_item(end, endprev, x); + close xLIST(l, n, idx, end, cells, vals, owners); + } + + lemma void dls_first_mem( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?owners, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, owners, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0; + { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + if (n == m) { + assert cells == cons(n, nil); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } else { + assert cells == cons(n, ?tail); + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } + } + + lemma void dls_not_empty( + struct xLIST_ITEM *n, + struct xLIST_ITEM *m, + list cells, + struct xLIST_ITEM *x) + requires DLS(n, m, n, m, cells, ?vals, ?owners, ?l) &*& mem(x, cells) == true &*& x != n; + ensures DLS(n, m, n, m, cells, vals, owners, l) &*& n != m; + { + open DLS(n, m, n, m, cells, vals, owners, l); + close DLS(n, m, n, m, cells, vals, owners, l); + } + + lemma void dls_last_mem( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?owners, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, owners, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1; + { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + if (n == m) { + // trivial + } else { + open xLIST_ITEM(n, _, ?nnext, _, _, l); + assert DLS(?o, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + dls_last_mem(o, n, mnext, m, tail(cells)); + close xLIST_ITEM(n, _, nnext, _, _, l); + } + close DLS(n, nprev, mnext, m, cells, vals, owners, l); + } + + + lemma void split( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + struct xLIST_ITEM *x, + int i) + requires DLS(n, nprev, mnext, m, cells, vals, ?owners, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i; + ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), take(i, owners), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), drop(i, owners), l) &*& xprev == nth(i-1, cells); + { + open DLS(n, nprev, mnext, m, cells, vals, owners, l); + assert n != m; + assert xLIST_ITEM(n, ?v, ?nnext, _, ?ow, _); + assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + if (nnext == x) { + close DLS(n, nprev, x, n, singleton(n), singleton(v), singleton(ow), l); + open DLS(x, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + open xLIST_ITEM(x, _, ?xnext, ?xprev, ?xow, l); + close xLIST_ITEM(x, _, xnext, xprev, xow, l); + close DLS(x, n, mnext, m, tail(cells), tail(vals), tail(owners), l); + } else { + assert nnext != x; + split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1); + assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), take(i-1, tail(owners)), l); + dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells))); + dls_star_item(nnext, xprev, n); + dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells))); + assert n != xprev; + close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), take(i, owners), l); + } + } + + lemma void join( + struct xLIST_ITEM *n1, + struct xLIST_ITEM *nprev1, + struct xLIST_ITEM *mnext1, + struct xLIST_ITEM *m1, + list cells1, + list vals1, + struct xLIST_ITEM *n2, + struct xLIST_ITEM *nprev2, + struct xLIST_ITEM *mnext2, + struct xLIST_ITEM *m2, + list cells2, + list vals2) + requires + DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?owners1, ?l) &*& + DLS(n2, nprev2, mnext2, m2, cells2, vals2, ?owners2, l) &*& + mnext1 == n2 &*& m1 == nprev2; + ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), append(owners1, owners2), l); + { + if (n1 == m1) { + dls_first_mem(n1, nprev1, mnext1, m1, cells1); + dls_last_mem(n2, nprev2, mnext2, m2, cells2); + open DLS(n1, nprev1, mnext1, m1, cells1, vals1, owners1, l); + dls_star_item(n2, m2, n1); + close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), append(owners1, owners2) ,l); + } else { + open DLS(n1, nprev1, mnext1, m1, cells1, vals1, owners1, l); + assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, ?owners1_tail, l); + join(o, n1, mnext1, m1, cells1_tail, vals1_tail, + n2, nprev2, mnext2, m2, cells2, vals2); + assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), append(owners1_tail, owners2), l); + dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2)); + dls_star_item(o, m2, n1); + close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), append(owners1, owners2), l); + } + } + @*/ +#else + /*@ + lemma void xLIST_distinct_cells(struct xLIST *l) + requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals); + ensures xLIST(l, n, idx, end, cells, vals) &*& distinct(cells) == true; + { + open xLIST(l, n, idx, end, cells, vals); + assert DLS(end, ?endprev, end, _, cells, vals, l); + dls_distinct(end, endprev, end, endprev, cells); + close xLIST(l, n, idx, end, cells, vals); + } + + lemma void xLIST_star_item(struct xLIST *l, struct xLIST_ITEM *x) + requires xLIST(l, ?n, ?idx, ?end, ?cells, ?vals) &*& xLIST_ITEM(x, ?v, ?xnext, ?xprev, ?l2); + ensures xLIST(l, n, idx, end, cells, vals) &*& xLIST_ITEM(x, v, xnext, xprev, l2) &*& mem(x, cells) == false; + { + open xLIST(l, n, idx, end, cells, vals); + assert DLS(end, ?endprev, end, _, cells, vals, l); + dls_distinct(end, endprev, end, endprev, cells); + dls_star_item(end, endprev, x); + close xLIST(l, n, idx, end, cells, vals); + } + + lemma void dls_first_mem( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(n, cells) == true &*& index_of(n, cells) == 0; + { + open DLS(n, nprev, mnext, m, cells, vals, l); + if (n == m) { + assert cells == cons(n, nil); + close DLS(n, nprev, mnext, m, cells, vals, l); + } else { + assert cells == cons(n, ?tail); + close DLS(n, nprev, mnext, m, cells, vals, l); + } + } + + lemma void dls_not_empty( + struct xLIST_ITEM *n, + struct xLIST_ITEM *m, + list cells, + struct xLIST_ITEM *x) + requires DLS(n, m, n, m, cells, ?vals, ?l) &*& mem(x, cells) == true &*& x != n; + ensures DLS(n, m, n, m, cells, vals, l) &*& n != m; + { + open DLS(n, m, n, m, cells, vals, l); + close DLS(n, m, n, m, cells, vals, l); + } + + lemma void dls_last_mem( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells) + requires DLS(n, nprev, mnext, m, cells, ?vals, ?l); + ensures DLS(n, nprev, mnext, m, cells, vals, l) &*& mem(m, cells) == true &*& index_of(m, cells) == length(cells) - 1; + { + open DLS(n, nprev, mnext, m, cells, vals, l); + if (n == m) { + // trivial + } else { + open xLIST_ITEM(n, _, ?nnext, _, l); + assert DLS(?o, n, mnext, m, tail(cells), tail(vals), l); + dls_last_mem(o, n, mnext, m, tail(cells)); + close xLIST_ITEM(n, _, nnext, _, l); + } + close DLS(n, nprev, mnext, m, cells, vals, l); + } + + + lemma void split( + struct xLIST_ITEM *n, + struct xLIST_ITEM *nprev, + struct xLIST_ITEM *mnext, + struct xLIST_ITEM *m, + list cells, + list vals, + struct xLIST_ITEM *x, + int i) + requires DLS(n, nprev, mnext, m, cells, vals, ?l) &*& x != n &*& mem(x, cells) == true &*& index_of(x,cells) == i; + ensures DLS(n, nprev, x, ?xprev, take(i, cells), take(i, vals), l) &*& DLS(x, xprev, mnext, m, drop(i, cells), drop(i, vals), l) &*& xprev == nth(i-1, cells); + { + open DLS(n, nprev, mnext, m, cells, vals, l); + assert n != m; + assert xLIST_ITEM(n, ?v, ?nnext, _, _); + assert DLS(nnext, n, mnext, m, tail(cells), tail(vals), l); + if (nnext == x) { + close DLS(n, nprev, x, n, singleton(n), singleton(v), l); + open DLS(x, n, mnext, m, tail(cells), tail(vals), l); + open xLIST_ITEM(x, _, ?xnext, ?xprev, l); + close xLIST_ITEM(x, _, xnext, xprev, l); + close DLS(x, n, mnext, m, tail(cells), tail(vals), l); + } else { + assert nnext != x; + split(nnext, n, mnext, m, tail(cells), tail(vals), x, i - 1); + assert DLS(nnext, n, x, ?xprev, take(i-1, tail(cells)), take(i-1, tail(vals)), l); + dls_distinct(nnext, n, x, xprev, take(i-1, tail(cells))); + dls_star_item(nnext, xprev, n); + dls_last_mem(nnext, n, x, xprev, take(i-1, tail(cells))); + assert n != xprev; + close DLS(n, nprev, x, xprev, take(i, cells), take(i, vals), l); + } + } + + lemma void join( + struct xLIST_ITEM *n1, + struct xLIST_ITEM *nprev1, + struct xLIST_ITEM *mnext1, + struct xLIST_ITEM *m1, + list cells1, + list vals1, + struct xLIST_ITEM *n2, + struct xLIST_ITEM *nprev2, + struct xLIST_ITEM *mnext2, + struct xLIST_ITEM *m2, + list cells2, + list vals2) + requires + DLS(n1, nprev1, mnext1, m1, cells1, vals1, ?l) &*& + DLS(n2, nprev2, mnext2, m2, cells2, vals2, l) &*& + mnext1 == n2 &*& m1 == nprev2; + ensures DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l); + { + if (n1 == m1) { + dls_first_mem(n1, nprev1, mnext1, m1, cells1); + dls_last_mem(n2, nprev2, mnext2, m2, cells2); + open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l); + dls_star_item(n2, m2, n1); + close DLS(n1, nprev1, mnext2, m2, append(singleton(n1), cells2), append(vals1, vals2), l); + } else { + open DLS(n1, nprev1, mnext1, m1, cells1, vals1, l); + assert DLS(?o, n1, mnext1, m1, ?cells1_tail, ?vals1_tail, l); + join(o, n1, mnext1, m1, cells1_tail, vals1_tail, + n2, nprev2, mnext2, m2, cells2, vals2); + assert DLS(o, n1, mnext2, m2, append(cells1_tail, cells2), append(vals1_tail, vals2), l); + dls_last_mem(o, n1, mnext2, m2, append(cells1_tail, cells2)); + dls_star_item(o, m2, n1); + close DLS(n1, nprev1, mnext2, m2, append(cells1, cells2), append(vals1, vals2), l); + } + } + @*/ +#endif /* VERIFAST_SINGLE_CORE */ + +/*@ +lemma void idx_remains_in_list( + list cells, + t idx, + t x, + int ix) +requires + idx != x &*& + mem(idx, cells) == true &*& + mem(x, cells) == true &*& + index_of(x, cells) == ix; +ensures mem(idx, remove_nth(ix, cells)) == true; +{ + neq_mem_remove(idx, x, cells); + remove_remove_nth(cells, x); +} +@*/ + +// Following lemma from `verifast/examples/shared_boxes/concurrentqueue.c`. +// Used in the uxListRemove proof to show that the item to remove `x` must +// have value `nth(i, vals)` where `i == index_of(x, cells)`. +/*@ +lemma void drop_nth_index_of(list vs, int i) +requires + 0 <= i && i < length(vs); +ensures + head(drop(i , vs)) == nth(i, vs); +{ + switch(vs) { + case nil: + case cons(h, t): + if (i == 0) { + // trivial + } else { + drop_nth_index_of(t, i - 1); + } + } +} +@*/ + +/*@ +lemma void remove_append(t x, list l1, list l2) + requires mem(x, l1) == false; + ensures remove(x, append(l1, l2)) == append(l1, remove(x, l2)); +{ + switch(l1) { + case nil: + case cons(h1, t1): + remove_append(x, t1, l2); + } +} +@*/ + + +#endif /* SCP_LIST_PREDICATES_H */ + +/* *INDENT-ON* */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/README.md new file mode 100644 index 00000000000..05888276b2c --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/README.md @@ -0,0 +1,4 @@ +This directory contains proofs that concern the predicates and proofs written +by Aalok Thakkar and Nathan Chong, see directory `single_core_proofs`. +For now, we want to have a clear separation between the reused proofs in +`single_core_proofs` and any new proofs. \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/scp_list_predicates_extended.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/scp_list_predicates_extended.h new file mode 100644 index 00000000000..950c5475a3e --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/single_core_proofs_extended/scp_list_predicates_extended.h @@ -0,0 +1,861 @@ +#ifndef SCP_LIST_PREDICATES_EXTENDED_H +#define SCP_LIST_PREDICATES_EXTENDED_H + +#include "single_core_proofs/scp_list_predicates.h" + +/* ============================================================================= + * The lemmas below assist in opening and closing DLS predicates in a way that + * allows accesses to `pxItem->pxNext`. +*/ + +/* @ +lemma void DLS_end_next_open(struct xLIST* pxList, struct xLIST_ITEM* pxItem) +requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, pxList) &*& + mem(pxItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gCells) > 1 + &*& + pxItem == gEnd; +ensures + xLIST_ITEM(gEnd, head(gVals), ?gItem_next, gEndPrev, pxList) &*& + DLS(gItem_next, gEnd, gEnd, gEndPrev, drop(1, gCells), drop(1, gVals), pxList ) &*& + mem(gItem_next, gCells) == true; +{ + open DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, pxList); + // open DLS and xLIST_ITEM predicates to justify + // accessing `pxItem->pxNext` + assert( xLIST_ITEM(gEnd, ?gItemVal, ?gItem_next, gEndPrev, pxList) ); + open xLIST_ITEM(gEnd, gItemVal, gItem_next, gEndPrev, pxList); + assert( DLS(gItem_next, gEnd, gEnd, gEndPrev, + drop(1, gCells), drop(1, gVals), pxList ) ); + open DLS(gItem_next, gEnd, gEnd, gEndPrev, + drop(1, gCells), drop(1, gVals), pxList ); + + // open DLS and xLIST_ITEM predicates to prove + // `mem( pxItem->pxNext, gCells) == true )` + // which requires accessing `pxItem->pxNext` + assert( xLIST_ITEM(gItem_next, ?gItem_nextVal, ?gItem_nextNext, gEnd, pxList) ); + open xLIST_ITEM(gItem_next, gItem_nextVal, gItem_nextNext, gEnd, pxList); + assert( mem(pxItem->pxNext, gCells) == true ); + close xLIST_ITEM(gItem_next, gItem_nextVal, gItem_nextNext, gEnd, pxList); + + // closing what we opened above + close DLS(gItem_next, gEnd, gEnd, gEndPrev, + drop(1, gCells), drop(1, gVals), pxList ); + close xLIST_ITEM(gEnd, gItemVal, gItem_next, gEndPrev, pxList); +} + + +lemma void DLS_end_next_close(struct xLIST* pxList, struct xLIST_ITEM* pxItem) +requires + xLIST_ITEM(?gEnd, ?gItemVal, ?gItem_next, ?gEndPrev, pxList) &*& + DLS(gItem_next, gEnd, gEnd, gEndPrev, ?gCells, ?gVals, pxList) &*& +// mem(gItem_next, gCells) == true &*& + length(gCells) == length(gVals) &*& + length(gCells) > 0 &*& + pxItem == gEnd; +ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, + cons(gEnd, gCells), cons(gItemVal, gVals), pxList); +{ + open DLS(gItem_next, gEnd, gEnd, gEndPrev, gCells, gVals, pxList); + close DLS(gItem_next, gEnd, gEnd, gEndPrev, gCells, gVals, pxList); + dls_star_item(gItem_next, gEndPrev, gEnd); + dls_distinct(gItem_next, gEnd, gEnd, gEndPrev, gCells); + dls_last_mem(gItem_next, gEnd, gEnd, gEndPrev, gCells); + close DLS(gEnd, gEndPrev, gEnd, gEndPrev, + cons(gEnd, gCells), cons(gItemVal, gVals), pxList); +} + + +lemma void DLS_nonEndItem_next_open(struct xLIST* pxList, struct xLIST_ITEM* pxItem) +requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, pxList) &*& + mem(pxItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gCells) > 1 + &*& + pxItem != gEnd; +ensures + // DLS prefix + DLS(gEnd, gEndPrev, pxItem, ?pxItem_prev, + take(index_of(pxItem, gCells), gCells), + take(index_of(pxItem, gCells), gVals), + pxList) + &*& + // item of interest + xLIST_ITEM(pxItem, ?gItemVal, ?pxItem_next, pxItem_prev, pxList) &*& + gItemVal == nth(index_of(pxItem, gCells), gVals) + &*& + // DLS suffix + (pxItem != gEndPrev + ? DLS(pxItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(index_of(pxItem, gCells), gCells)), + drop(1, drop(index_of(pxItem, gCells), gVals)), + pxList) + : (pxItem_next == gEnd &*& + index_of(pxItem, gCells) == length(gCells) - 1 + ) + ) + &*& + mem(pxItem_next, gCells) == true; +{ + int pxItemIndex_0 = index_of(pxItem, gCells); + + + // open DLS and xLIST_ITEM predicates to justify + // accessing `pxItem->pxNext` + split(gEnd, gEndPrev, gEnd, gEndPrev, + gCells, gVals, pxItem, pxItemIndex_0); + // DLS prefix + assert( DLS(gEnd, gEndPrev, pxItem, ?pxItem_prev, + take(pxItemIndex_0, gCells), take(pxItemIndex_0, gVals), + pxList) ); + // DLS suffix + assert( DLS(pxItem, pxItem_prev, gEnd, gEndPrev, + drop(pxItemIndex_0, gCells), drop(pxItemIndex_0, gVals), + pxList) ); + open DLS(pxItem, pxItem_prev, gEnd, gEndPrev, + drop(pxItemIndex_0, gCells), drop(pxItemIndex_0, gVals), + pxList); + assert( xLIST_ITEM(pxItem, ?gItemVal, ?pxItem_next, pxItem_prev, pxList) ); + assert( gItemVal == head(drop(pxItemIndex_0, gVals)) ); + head_drop_n_equals_nths(gVals, pxItemIndex_0); + assert( gItemVal == nth(index_of(pxItem, gCells), gVals) ); + + + // open DLS and xLIST_ITEM predicates to prove + // `mem( pxItem->pxNext, gCells) == true )` + // which requires accessing `pxItem->pxNext` + if(pxItem == gEndPrev) { + assert( drop(pxItemIndex_0, gCells) == cons(pxItem, nil) ); + drop_index_equals_singleton_implies_last_element(gCells, pxItem); + assert( pxItemIndex_0 == length(gCells) - 1 ); + + // `pxItem` is last element in DLS suffix + // -> `pxItem_next` is head fo DLS prefix + // open DLS prefix + open xLIST_ITEM(pxItem, gItemVal, pxItem_next, pxItem_prev, pxList); + assert( gCells == cons(_, _) ); + assert( mem(pxItem->pxNext, gCells) == true ); + + // close item of interest + close xLIST_ITEM(pxItem, gItemVal, pxItem_next, pxItem_prev, pxList); + } else { + // `pxItem` is not end of DLS suffix + // -> `pxItem_next` is also in DLS suffix + // open DLS suffix one step further + + // rest of DLS suffix + assert( DLS(pxItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(pxItemIndex_0, gCells)), + drop(1, drop(pxItemIndex_0, gVals)), + pxList) ); + open DLS(pxItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(pxItemIndex_0, gCells)), + drop(1, drop(pxItemIndex_0, gVals)), + pxList); + assert( xLIST_ITEM(pxItem_next, ?gItem_nextVal, ?pxItem_next_next, pxItem, pxList) ); + open xLIST_ITEM(pxItem, gItemVal, pxItem_next, pxItem_prev, pxList); + mem_suffix_implies_mem(pxItem_next, gCells, pxItemIndex_0); + assert( mem(pxItem->pxNext, gCells) == true ); + + // close rest of DLS suffix + close xLIST_ITEM(pxItem_next, gItem_nextVal, pxItem_next_next, pxItem, pxList); + close DLS(pxItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(pxItemIndex_0, gCells)), + drop(1, drop(pxItemIndex_0, gVals)), + pxList); + + // close item of interest + close xLIST_ITEM(pxItem, gItemVal, pxItem_next, pxItem_prev, pxList); + } +} + + +lemma void DLS_nonEndItem_next_close(struct xLIST* pxList, struct xLIST_ITEM* pxItem, + list gCells, + list gVals) +requires + length(gCells) == length(gVals) &*& + length(gCells) > 1 + &*& + // DLS prefix + DLS(?gEnd, ?gEndPrev, pxItem, ?gItem_prev, ?gCellsPrefix, ?gValsPrefix, + pxList) + &*& + mem(pxItem, gCells) == true &*& + gCellsPrefix == take(index_of(pxItem, gCells), gCells) &*& + gValsPrefix == take(index_of(pxItem, gCells), gVals) + &*& + // item of interest + pxItem != gEnd &*& + xLIST_ITEM(pxItem, ?gItemVal, ?gItem_next, gItem_prev, pxList) &*& + mem(gItemVal, gVals) == true &*& + gItemVal == nth(index_of(pxItem, gCells), gVals) + &*& + // DLS suffix + (pxItem != gEndPrev + ? DLS(gItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(index_of(pxItem, gCells), gCells)), + drop(1, drop(index_of(pxItem, gCells), gVals)), + pxList) + : (gItem_next == gEnd &*& + index_of(pxItem, gCells) == length(gCells) - 1 + ) + ) + &*& + mem(gItem_next, gCells) == true; +ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, pxList); +{ + int gItemIndex = index_of(pxItem, gCells); + head_drop_n_equals_nths(gCells, gItemIndex); + head_drop_n_equals_nths(gVals, gItemIndex); + + if( pxItem != gEndPrev ) { + assert( drop(gItemIndex, gVals) == cons(_, _) ); + assert( xLIST_ITEM(pxItem, ?gV, _, gItem_prev, pxList) ); + nth_index(gCells, pxItem); + close DLS(pxItem, gItem_prev, gEnd, gEndPrev, + drop(gItemIndex, gCells), drop(gItemIndex, gVals), + pxList); + join(gEnd, gEndPrev, pxItem, gItem_prev, gCellsPrefix, gValsPrefix, + pxItem, gItem_prev, gEnd, gEndPrev, drop(gItemIndex, gCells), drop(gItemIndex, gVals)); + } else { + assert( xLIST_ITEM(pxItem, ?gV, ?gNext, gItem_prev, pxList) ); + assert( xLIST_ITEM(pxItem, gV, gEnd, gItem_prev, pxList) ); + close DLS(pxItem, gItem_prev, gEnd, gEndPrev, cons(pxItem, nil), cons(gItemVal, nil), pxList); + join(gEnd, gEndPrev, pxItem, gItem_prev, gCellsPrefix, gValsPrefix, + pxItem, gItem_prev, gEnd, gEndPrev, cons(pxItem, nil), cons(gItemVal, nil)); + assert( DLS(gEnd, gEndPrev, gEnd, gEndPrev, ?gCellsRes, ?gValsRes, pxList)); + + assert( gCellsPrefix == take(index_of(pxItem, gCells), gCells) ); + assert( gValsPrefix == take(index_of(pxItem, gCells), gVals) ); + assert( gCellsRes == append(gCellsPrefix, cons(pxItem, nil)) ); + assert( gValsRes == append(gValsPrefix, cons(gItemVal, nil)) ); + + + drop_n_plus_one(gCells, index_of(pxItem, gCells)); + drop_n_plus_one(gVals, index_of(pxItem, gCells)); + nth_index(gCells, pxItem); + + assert( gCellsRes == gCells ); + assert( gValsRes == gVals ); + } +} + + +lemma void DLS_next_open(struct xLIST* pxList, struct xLIST_ITEM* pxItem) +requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, pxList) &*& + mem(pxItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gCells) > 1; +ensures + pxItem == gEnd + ? ( + xLIST_ITEM(gEnd, head(gVals), ?gItem_next, gEndPrev, pxList) &*& + DLS(gItem_next, gEnd, gEnd, gEndPrev, drop(1, gCells), drop(1, gVals), pxList ) &*& + mem(gItem_next, gCells) == true + ) + : ( + // DLS prefix + DLS(gEnd, gEndPrev, pxItem, ?gItem_prev, + take(index_of(pxItem, gCells), gCells), + take(index_of(pxItem, gCells), gVals), + pxList) + &*& + // item of interest + xLIST_ITEM(pxItem, ?gItemVal, ?pxItem_next, gItem_prev, pxList) &*& + gItemVal == nth(index_of(pxItem, gCells), gVals) + &*& + // DLS suffix + (pxItem != gEndPrev + ? DLS(pxItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(index_of(pxItem, gCells), gCells)), + drop(1, drop(index_of(pxItem, gCells), gVals)), + pxList) + : (pxItem_next == gEnd &*& + index_of(pxItem, gCells) == length(gCells) - 1 + ) + ) + &*& + mem(pxItem_next, gCells) == true + ); +{ + if( pxItem == gEnd ) { + DLS_end_next_open(pxList, pxItem); + } else { + DLS_nonEndItem_next_open(pxList, pxItem); + } +} + + +lemma void DLS_next_close(struct xLIST* pxList, struct xLIST_ITEM* pxItem, + list gCells, + list gVals, + struct xLIST_ITEM* gEnd, + struct xLIST_ITEM* gEndPrev) +requires + head(gCells) == gEnd &*& + length(gCells) > 1 &*& + length(gCells) == length(gVals) &*& + pxItem == gEnd + ? ( + xLIST_ITEM(gEnd, ?gItemVal, ?gItem_next, gEndPrev, pxList) &*& + DLS(gItem_next, gEnd, gEnd, gEndPrev, drop(1, gCells), drop(1, gVals), pxList) &*& + length(gCells) == length(gVals) &*& + length(gCells) > 0 &*& + head(gVals) == gItemVal + ) + : ( + length(gCells) == length(gVals) &*& + length(gCells) > 1 + &*& + // DLS prefix + DLS(gEnd, gEndPrev, pxItem, ?gItem_prev, ?gCellsPrefix, ?gValsPrefix, + pxList) + &*& + mem(pxItem, gCells) == true &*& + gCellsPrefix == take(index_of(pxItem, gCells), gCells) &*& + gValsPrefix == take(index_of(pxItem, gCells), gVals) + &*& + // item of interest + pxItem != gEnd &*& + xLIST_ITEM(pxItem, ?gItemVal, ?gItem_next, gItem_prev, pxList) &*& + mem(gItemVal, gVals) == true &*& + gItemVal == nth(index_of(pxItem, gCells), gVals) + &*& + // DLS suffix + (pxItem != gEndPrev + ? DLS(gItem_next, pxItem, gEnd, gEndPrev, + drop(1, drop(index_of(pxItem, gCells), gCells)), + drop(1, drop(index_of(pxItem, gCells), gVals)), + pxList) + : (gItem_next == gEnd &*& + index_of(pxItem, gCells) == length(gCells) - 1 + ) + ) + &*& + mem(gItem_next, gCells) == true + ); +ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, pxList); +{ + if( pxItem == gEnd ) { + DLS_end_next_close(pxList, pxItem); + + // why is this necessary? + assert( gCells == cons( _, _) ); + assert( gVals == cons(_, _) ); + } else { + DLS_nonEndItem_next_close(pxList, pxItem, gCells, gVals); + } +} +@*/ + +#ifdef IGNORE_DEPRECATED +/* By verifying the following function, we can validate that the above lemmas + * apply to the use cases they are meant for. + */ +void lemma_validation__DLS_item_next(struct xLIST_ITEM* pxTaskItem) +/* @ requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, ?gList) &*& + mem(pxTaskItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gCells) > 1; +@*/ +/* @ ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gList) &*& + mem(pxTaskItem, gCells) == true; +@*/ +{ + //@ struct xLIST_ITEM* gTaskItem_0 = pxTaskItem; + + /* @ + if( gTaskItem_0 == gEnd ) { + DLS_end_next_open(gList, gTaskItem_0); + } else { + DLS_nonEndItem_next_open(gList, gTaskItem_0); + } + @*/ + + //@ DLS_next_open(gList, gTaskItem_0); + + pxTaskItem = pxTaskItem->pxNext; + //@ struct xLIST_ITEM* pxItem_1 = pxTaskItem; + + + //@ close xLIST_ITEM(gTaskItem_0, ?gTaskItemVal, _, _, gList); + + //@ DLS_next_close(gList, gTaskItem_0, gCells, gVals, gEnd, gEndPrev); + + /* @ + if( gTaskItem_0 == gEnd ) { + DLS_end_next_close(gList, gTaskItem_0); + assert( DLS(gEnd, gEndPrev, gEnd, gEndPrev, ?gCells2, ?gVals2, gList) ); + + // why is this necessary? + assert( gCells == cons( _, _) ); + assert( gVals == cons(_, _) ); + } else { + DLS_nonEndItem_next_close(gList, gTaskItem_0, gCells, gVals); + } + @*/ + + //@ assert( mem(pxItem_1, gCells) == true ); +} +#endif I/* GNORE_DEPRECATED */ + + + +/* ---------------------------------------- + * The folling lemmas aim to simpilfy the lemmas above and reduce + * the number of case distinctions that are introduced by applying them. + */ + + +/*@ +// Splitting a full DLS of the form +// DLS(end, endPrev, end, endPrev, cells, vals, list) +// at item `I` should result in a prefix, the item of interest and a suffix. +// Both prefix and suffix can be empty, which the standard DLS predicate does +// not allow +predicate DLS_prefix( + // prefix args + list prefCells, + list prefVals, + list prefOwners, + struct xLIST_ITEM* item, + struct xLIST_ITEM* itemPrev, + // unsplit DLS args + struct xLIST_ITEM *end, + struct xLIST_ITEM *endPrev, + struct xLIST *pxContainer) = + length(prefCells) == length(prefVals) &*& + length(prefOwners) == length(prefCells) &*& + switch(prefCells) { + case nil: return + prefVals == nil &*& + prefOwners == nil &*& + item == end &*& + itemPrev == endPrev; + case cons(headItem, tailCells): return + item != end &*& + // itemPrev != endPrev &*& // do we need to know this? + headItem == end &*& + DLS(end, endPrev, item, itemPrev, prefCells, prefVals, prefOwners, + pxContainer); + }; + +predicate DLS_suffix( + // suffix args + list sufCells, + list sufVals, + list sufOwners, + struct xLIST_ITEM* item, + struct xLIST_ITEM* itemNext, + // unsplit DLS args + struct xLIST_ITEM *end, + struct xLIST_ITEM *endPrev, + struct xLIST *pxContainer) = + length(sufCells) == length(sufVals) &*& + length(sufOwners) == length(sufCells) &*& + switch(sufCells) { + case nil: return + sufVals == nil &*& + sufOwners == nil &*& + item == endPrev &*& + itemNext == end; + case cons(headItem, tailCells): return + item != endPrev &*& + mem(endPrev, sufCells) == true &*& + index_of(endPrev, sufCells) == length(sufCells)-1 &*& + DLS(itemNext, item, end, endPrev, sufCells, sufVals, sufOwners, + pxContainer); + }; + + +lemma void DLS_open_2(struct xLIST_ITEM* pxItem) +requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, ?gOwners, ?gList) &*& + mem(pxItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gOwners) == length(gCells) &*& + length(gCells) > 1; +ensures + DLS_prefix(?gPrefCells, ?gPrefVals, ?gPrefOwners, pxItem, ?gItemPrev, + gEnd, gEndPrev, gList) + &*& + xLIST_ITEM(pxItem, ?gItemVal, ?gItemNext, gItemPrev, ?gOw, gList) + &*& + DLS_suffix(?gSufCells, ?gSufVals, ?gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList) + &*& + // lists have form "prefix + element + suffix" + gCells == append(gPrefCells, append(singleton(pxItem), gSufCells)) &*& + gVals == append(gPrefVals, append(singleton(gItemVal), gSufVals)) &*& + gOwners == append(gPrefOwners, append(singleton(gOw), gSufOwners)) + &*& + // next in cells + mem(gItemNext, gCells) == true &*& + // prev in cells + mem(gItemPrev, gCells) == true + ; +{ + if(pxItem == gEnd) { + // pxItem is first/ left-most item in the list + // -> empty prefix + + open DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gList); + assert( xLIST_ITEM(pxItem, ?gItemVal, ?gItemNext, ?gItemPrev, ?gOw, gList) ); + assert( DLS(gItemNext, pxItem, gEnd, gEndPrev, + ?gSufCells, ?gSufVals, ?gSufOwners, gList) ); + close DLS_prefix(nil, nil, nil, pxItem, gItemPrev, + gEnd, gEndPrev, gList); + + // Prove: `mem(gItemNext, gCells) == true` + open DLS(gItemNext, pxItem, gEnd, gEndPrev, + gSufCells, gSufVals, gSufOwners, gList); + assert( mem(gItemNext, gCells) == true ); + close DLS(gItemNext, pxItem, gEnd, gEndPrev, + gSufCells, gSufVals, gSufOwners, gList); + + // Prove: `mem(gItemPrev, gCells) == true ` + assert( gItemPrev == gEndPrev ); + dls_last_mem(gItemNext, pxItem, gEnd, gEndPrev, gSufCells); + assert( mem(gItemPrev, gCells) == true ); + + close DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + } else { + // pxItem is not the first/ left-most item in the list + // -> non-empty prefix + // (potentially empty suffix) + + int gItemIndex = index_of(pxItem, gCells); + split(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, pxItem, gItemIndex); + + assert( DLS(gEnd, gEndPrev, pxItem, ?gItemPrev, + ?gPrefCells, ?gPrefVals, ?gPrefOwners, gList) ); + // -> Will be wrapped inside the prefix constructed at the end of this + // lemma. + + assert( DLS(pxItem, gItemPrev, gEnd, gEndPrev, + ?gPartCells, ?gPartVals, ?gPartOwners, gList) ); + // -> The tail of this DLS will make up the suffix constructed at the + // end of this lemma. + + // Notes on cell and val lists: + assert( length(gPartCells) == length(gPartVals) ); + assert( gPartCells == drop(gItemIndex, gCells) ); + assert( gPartVals == drop(gItemIndex, gVals) ); + + // Prove: `head(gPrefCells) == gEnd` + // Necessary to construct prefix later. + // Implies `mem(gItemPrev, gCells) == true`. + open DLS(gEnd, gEndPrev, pxItem, gItemPrev, + gPrefCells, gPrefVals, gPrefOwners, gList); + assert( head(gPrefCells) == gEnd ); + close DLS(gEnd, gEndPrev, pxItem, gItemPrev, + gPrefCells, gPrefVals, gPrefOwners, gList); + assert( mem(gItemPrev, gCells) == true ); + + open DLS(pxItem, gItemPrev, gEnd, gEndPrev, + gPartCells, gPartVals, gPartOwners, gList); + assert( xLIST_ITEM(pxItem, ?gItemVal, ?gItemNext, gItemPrev, ?gOw, gList) ); + + if( pxItem == gEndPrev ) { + // pxItem is the last/ right-most item in the list. + // -> empty suffix + assert( gItemNext == gEnd ); + + // prove: `mem(gItemNext, gCells) == true` + dls_first_mem(gEnd, gEndPrev, pxItem, gItemPrev, gPrefCells); + assert( mem(gItemNext, gPrefCells) == true ); + assert( gPrefCells == take(gItemIndex, gCells) ); + mem_prefix_implies_mem(gItemNext, gCells, gItemIndex); + assert( mem(gItemNext, gCells) == true ); + + + // prove: mem(gItemNext, gCells) == true + open xLIST_ITEM(pxItem, gItemVal, gItemNext, gItemPrev, gOw, + gList); + assert( gItemNext == gEnd ); + assert( mem(gItemNext, gCells) == true ); + close xLIST_ITEM(pxItem, gItemVal, gItemNext, gItemPrev, gOw, + gList); + + close DLS_prefix(gPrefCells, gPrefVals, gPrefOwners, pxItem, + gItemPrev, gEnd, gEndPrev, gList); + close DLS_suffix(nil, nil, nil, pxItem, gItemNext, + gEnd, gEndPrev, gList); + } else { + // pxItem is not the last/ right-most item in the list. + // -> non-empty suffix + + assert( DLS(gItemNext, pxItem, gEnd, gEndPrev, + ?gSufCells, ?gSufVals, ?gSufOwners, gList) ); + assert( gSufCells == drop(1, gPartCells) ); + + // Prove: - `drop(gItemIndex+1, gCells) == gSufCells` + // - `drop(gItemIndex+1, gVals) == gSufVals` + // - `drop(gItemIndex+1, gOwners) == gSufOwners` + // -> Required to prove `mem(gItemNext, gCells) == true` and also to + // prove relationship between gCells/gVals and their segmentation. + assert( drop(1, drop(gItemIndex, gCells)) == gSufCells ); + assert( drop(1, drop(gItemIndex, gVals)) == gSufVals ); + assert( drop(1, drop(gItemIndex, gOwners)) == gSufOwners ); + drop_n_plus_m(gCells, 1, gItemIndex); + drop_n_plus_m(gVals, 1, gItemIndex); + drop_n_plus_m(gOwners, 1, gItemIndex); + assert( drop(gItemIndex+1, gCells) == gSufCells ); + assert( drop(gItemIndex+1, gVals) == gSufVals ); + assert( drop(gItemIndex+1, gOwners) == gSufOwners ); + + // Prove: `mem(gItemNext, gCells) == true` + open DLS(gItemNext, pxItem, gEnd, gEndPrev, + gSufCells, gSufVals, gSufOwners, gList); + assert( mem(gItemNext, gSufCells) == true ); + mem_suffix_implies_mem(gItemNext, gCells, gItemIndex+1); + assert( mem(gItemNext, gCells) == true ); + close DLS(gItemNext, pxItem, gEnd, gEndPrev, + gSufCells, gSufVals, gSufOwners, gList); + + close DLS_prefix(gPrefCells, gPrefVals, gPrefOwners, + pxItem, gItemPrev, gEnd, gEndPrev, gList); + dls_last_mem(gItemNext, pxItem, gEnd, gEndPrev, gSufCells); + close DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + } + } +} + +lemma void DLS_close_2(struct xLIST_ITEM* pxItem, + list gCells, + list gVals, + list gOwners) +requires + length(gCells) == length(gVals) &*& + DLS_prefix(?gPrefCells, ?gPrefVals, ?gPrefOwners, pxItem, ?gItemPrev, + ?gEnd, ?gEndPrev, ?gList) + &*& + gEnd == head(gCells) + &*& + xLIST_ITEM(pxItem, ?gItemVal, ?gItemNext, gItemPrev, ?gOw, gList) + &*& + DLS_suffix(?gSufCells, ?gSufVals, ?gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList) + &*& + // lists have form "prefix + element + suffix" + gCells == append(gPrefCells, append(singleton(pxItem), gSufCells)) &*& + gVals == append(gPrefVals, append(singleton(gItemVal), gSufVals)) &*& + gOwners == append(gPrefOwners, append(singleton(gOw), gSufOwners)) + &*& + // next in cells + mem(gItemNext, gCells) == true &*& + // prev in cells + mem(gItemPrev, gCells) == true + ; +ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gList) &*& + mem(pxItem, gCells) == true &*& + mem(gItemNext, gCells) == true &*& + mem(gItemPrev, gCells) == true &*& + // length(gCells) == length(gVals) &*& + // length(gCells) > 1; + true; +{ + if( gPrefCells == nil ) { + // pxItem is first/ left-most item in the list + // -> empty prefix + + open DLS_prefix(gPrefCells, gPrefVals, gPrefOwners, pxItem, gItemPrev, + gEnd, gEndPrev, gList); + assert( pxItem == gEnd ); + assert( gPrefVals == nil ); + + if( gSufCells == nil ) { + // pxItem is last/ right-most item in the list + + open DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + assert( pxItem == gEndPrev ); + assert( gSufVals == nil ); + + close DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, + gList); + } else { + // pxItem is not last/ right-most item in the list + + open DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + close DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, + gList); + } + } else { + // pxItem is not the first/ left-most item in the list + // -> non-empty prefix + // (potentially empty suffix) + + open DLS_prefix(gPrefCells, gPrefVals, gPrefOwners, pxItem, gItemPrev, + gEnd, gEndPrev, gList); + + if( gSufCells == nil ) { + // pxItem is the last/ right-most item in the list + // -> empty suffix + + open DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + assert( pxItem == gEndPrev ); + close DLS(pxItem, gItemPrev, gEnd, gEndPrev, + singleton(pxItem), singleton(gItemVal), singleton(gOw), + gList); + join(gEnd, gEndPrev, pxItem, gItemPrev, gPrefCells, gPrefVals, + pxItem, gItemPrev, gEnd, gEndPrev, + singleton(pxItem), singleton(gItemVal)); + } else { + // pxItem is not the last/ right-most item in the list + // -> non-empty suffix + + open DLS_suffix(gSufCells, gSufVals, gSufOwners, pxItem, gItemNext, + gEnd, gEndPrev, gList); + close DLS(pxItem, gItemPrev, gEnd, gEndPrev, + cons(pxItem, gSufCells), cons(gItemVal, gSufVals), + cons(gOw, gSufOwners), + gList); + join(gEnd, gEndPrev, pxItem, gItemPrev, gPrefCells, gPrefVals, + pxItem, gItemPrev, gEnd, gEndPrev, + cons(pxItem, gSufCells), cons(gItemVal, gSufVals)); + } + } +} +@*/ + +struct xLIST_ITEM* lemma_validation__DLS_item_next_2(struct xLIST_ITEM* pxTaskItem) +/*@ requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, ?gOwners, ?gList) &*& + mem(pxTaskItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gOwners) == length(gCells) &*& + length(gCells) > 1; +@*/ +/*@ ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gList) &*& + mem(pxTaskItem, gCells) == true &*& + mem(result, gCells) == true; +@*/ +{ + //@ struct xLIST_ITEM* gTaskItem_0 = pxTaskItem; + + // first iteration step + + //@ DLS_open_2(gTaskItem_0); + /*@ assert( xLIST_ITEM(gTaskItem_0, ?gTaskItem_0_val, + ?gTaskItem_0_next, ?gTaskItem_0_prev, ?gTaskItem_0_owner, + gList) ); + @*/ + pxTaskItem = pxTaskItem->pxNext; + //@ struct xLIST_ITEM* gTaskItem_1 = pxTaskItem; + + /*@ close xLIST_ITEM(gTaskItem_0, gTaskItem_0_val, + gTaskItem_0_next, gTaskItem_0_prev, gTaskItem_0_owner, + gList); + @*/ + //@ DLS_close_2(gTaskItem_0, gCells, gVals, gOwners); + + + // second iteration step + + //@ DLS_open_2(gTaskItem_1); + /*@ assert( xLIST_ITEM(gTaskItem_1, ?gTaskItem_1_val, + ?gTaskItem_1_next, ?gTaskItem_1_prev, ?gTaskItem_1_owner, + gList) ); + @*/ + pxTaskItem = pxTaskItem->pxNext; + //@ struct xLIST_ITEM* gTaskItem_2 = pxTaskItem; + + /*@ close xLIST_ITEM(gTaskItem_1, gTaskItem_1_val, + gTaskItem_1_next, gTaskItem_1_prev, gTaskItem_1_owner, + gList); + @*/ + //@ DLS_close_2(gTaskItem_1, gCells, gVals, gOwners); + + + //@ assert( mem(gTaskItem_2, gCells) == true ); + return pxTaskItem; +} + + +struct xLIST_ITEM* lemma_validation__DLS_item_prev_2(struct xLIST_ITEM* pxTaskItem) +/*@ requires + DLS(?gEnd, ?gEndPrev, gEnd, gEndPrev, ?gCells, ?gVals, ?gOwners, ?gList) &*& + mem(pxTaskItem, gCells) == true &*& + gEnd == head(gCells) &*& + length(gCells) == length(gVals) &*& + length(gOwners) == length(gCells) &*& + length(gCells) > 1; +@*/ +/*@ ensures + DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gList) &*& + mem(pxTaskItem, gCells) == true &*& + mem(result, gCells) == true; +@*/ +{ + //@ struct xLIST_ITEM* gTaskItem_0 = pxTaskItem; + + // first iteration step + + //@ DLS_open_2(gTaskItem_0); + /*@ assert( xLIST_ITEM(gTaskItem_0, ?gTaskItem_0_val, + ?gTaskItem_0_next, ?gTaskItem_0_prev, ?gTaskItem_0_owner, + gList) ); + @*/ + pxTaskItem = pxTaskItem->pxPrevious; + //@ struct xLIST_ITEM* gTaskItem_1 = pxTaskItem; + + /*@ close xLIST_ITEM(gTaskItem_0, gTaskItem_0_val, + gTaskItem_0_next, gTaskItem_0_prev, gTaskItem_0_owner, + gList); + @*/ + //@ DLS_close_2(gTaskItem_0, gCells, gVals, gOwners); + + + // second iteration step + + //@ DLS_open_2(gTaskItem_1); + /*@ assert( xLIST_ITEM(gTaskItem_1, ?gTaskItem_1_val, + ?gTaskItem_1_next, ?gTaskItem_1_prev, ?gTaskItem_1_owner, + gList) ); + @*/ + pxTaskItem = pxTaskItem->pxPrevious; + //@ struct xLIST_ITEM* gTaskItem_2 = pxTaskItem; + + /*@ close xLIST_ITEM(gTaskItem_1, gTaskItem_1_val, + gTaskItem_1_next, gTaskItem_1_prev,gTaskItem_1_owner, + gList); + @*/ + //@ DLS_close_2(gTaskItem_1, gCells, gVals, gOwners); + + + //@ assert( mem(gTaskItem_2, gCells) == true ); + return pxTaskItem; +} + + + + + + + + + +#endif /* SCP_LIST_PREDICATES_EXTENDED_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/stack_predicates.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/stack_predicates.h new file mode 100644 index 00000000000..3941beec858 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/stack_predicates.h @@ -0,0 +1,39 @@ +#ifndef STACK_PREDICATES +#define STACK_PREDICATES + + +/*@ +// Represents a stack that grows down (cf. RP2040 stack) +predicate stack_p(StackType_t * pxStack, + uint32_t ulStackDepth, + StackType_t * pxTopOfStack, + uint32_t ulFreeBytes, + uint32_t ulUsedCells, + uint32_t ulUnalignedBytes) = + malloc_block_chars((char*) pxStack, ulStackDepth * sizeof(StackType_t)) &*& + // Free stack cells. The size of this memory block is not necessarily a + // multiple of sizeof(StackType_t), due to bitvector arithmetic. + // At least, we cannot prove it. + chars((char*) pxStack, ulFreeBytes, _) &*& + //integer_(pxTopOfStack + sizeof(StackType_t), sizeof(StackType_t), false, _) &*&; + + // If there is any free memory left in this stack, + // pxTopOfStack points to the last sizeof(StackType_t) number of bytes. + (char*) pxStack + ulFreeBytes == (char*) pxTopOfStack + sizeof(StackType_t) &*& + // Used stack cells + integers_(pxTopOfStack + 1, sizeof(StackType_t), false, ulUsedCells, _) &*& + // Unaligned rest + unalignedRestOfStack_p((char*) pxStack + ulFreeBytes + sizeof(StackType_t) * ulUsedCells, + ulUnalignedBytes) &*& + // `taskCHECK_FOR_STACK_OVERFLOW` macro on RP2040 port expects minimal stack size + ulFreeBytes >= 0 &*& + ulUsedCells >= 0 &*& + ulFreeBytes + ulUsedCells * sizeof(StackType_t) >= 4 * sizeof(StackType_t); + +predicate unalignedRestOfStack_p(char* p, uint32_t ulUnalignedBytes) = + chars(p, ulUnalignedBytes, _); +@*/ + + + +#endif /* STACK_PREDICATES */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_predicates.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_predicates.h new file mode 100644 index 00000000000..7d63cbffb1f --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_predicates.h @@ -0,0 +1,70 @@ +#ifndef TASKS_GH + +#define TASKS_GH + +#include "single_core_proofs/scp_list_predicates.h" + + +/*@ +// This predicate represents the memory corresponding to an +// initialised instance of type `TCB_t` aka `tskTaskControlBlock`. +// The predicate itself is not used during the verification of +// `vTaskSwitchContext`. However, we keep it around to allow proof authors to +// validate that the predicates below indeed capture specific segments of a TCB. +predicate TCB_p(TCB_t * tcb, uint32_t ulFreeBytesOnStack) = + malloc_block_tskTaskControlBlock(tcb) &*& + tcb->pxStack |-> ?stackPtr &*& + tcb->pxTopOfStack |-> ?topPtr &*& + stack_p(stackPtr, ?ulStackDepth, topPtr, + ulFreeBytesOnStack, ?ulUsedCells, ?ulUnalignedBytes) &*& + + xLIST_ITEM(&tcb->xStateListItem, _, _, _, _, _) &*& + struct_xLIST_ITEM_padding(&tcb->xStateListItem) &*& + xLIST_ITEM(&tcb->xEventListItem, _, _, _, _, _) &*& + struct_xLIST_ITEM_padding(&tcb->xEventListItem) &*& + + tcb->uxPriority |-> _ &*& + + tcb->xTaskRunState |-> ?gTaskRunState &*& + tcb->xIsIdle |-> _ &*& + + // Assumes macro `configMAX_TASK_NAME_LEN` evaluates to 16. + chars_(tcb->pcTaskName, 16, _) &*& + + tcb->uxCriticalNesting |-> ?uxCriticalNesting &*& + tcb->uxTCBNumber |-> _ &*& + tcb->uxTaskNumber |-> _ &*& + tcb->uxBasePriority |-> _ &*& + tcb->uxMutexesHeld |-> _ &*& + + // void * pvThreadLocalStoragePointers[ 5 ]; + pointers(tcb->pvThreadLocalStoragePointers, 5, _) &*& + + // We assume that the macro `configTASK_NOTIFICATION_ARRAY_ENTRIES` + // evaluates to 1. + integers_(tcb->ulNotifiedValue, 4, false, 1, _) &*& + uchars((unsigned char*) tcb->ucNotifyState, 1, _) &*& + + tcb->ucDelayAborted |-> _; +@*/ + +/*@ +// This predicate represents write access to a TCB's stack. +predicate TCB_stack_p(TCB_t* tcb, uint32_t ulFreeBytesOnStack) = + tcb->pxStack |-> ?stackPtr &*& + tcb->pxTopOfStack |-> ?topPtr &*& + stack_p(stackPtr, ?ulStackDepth, topPtr, + ulFreeBytesOnStack, ?ulUsedCells, ?ulUnalignedBytes); + +// This predicate represents write access to the run state of a TCB. +predicate TCB_runState_p(TCB_t* tcb, TaskRunning_t state;) = + tcb->xTaskRunState |-> state; + +// This predicate represents write access to the nesting level of a TCB. +// Entering a critical section increases the nesting level. Leaving it, +// decreases it. +predicate TCB_criticalNesting_p(TCB_t* tcb, UBaseType_t uxCriticalNesting) = + tcb->uxCriticalNesting |-> uxCriticalNesting; +@*/ + +#endif /* TASKS_GH */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_running_states.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_running_states.h new file mode 100644 index 00000000000..80d8c7a5775 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/task_running_states.h @@ -0,0 +1,46 @@ +#ifndef TASK_RUNNING_STATES_H +#define TASK_RUNNING_STATES_H + +/* The source file `tasks.c` defines macros to denote the running states of + * tasks: + * - `taskTASK_NOT_RUNNING` == -1 + * - `taskTASK_YIELDING` == -2 + * - state >= 0 => task is running on core with ID `state` + * We cannot import theses definitions into our proof headers. Hence, we define + * our own macros and proof in `tasks.c` that they match. + */ + +#include "portmacro.h" // defines `BaseType_t` + +/* Indicates that the task is not actively running on any core. */ +//VF_macro #define taskTASK_NOT_RUNNING ( BaseType_t ) ( -1 ) + +/* Indicates that the task is actively running but scheduled to yield. */ +//VF_macro #define taskTASK_YIELDING ( BaseType_t ) ( -2 ) + + +/* Verify that the preprocessor and our VeriFast proofs evaluate + * `taskTASK_NOT_RUNNING` to the same values. + */ +void validate_taskTASK_NOT_RUNNING_value() +//@ requires true; +//@ ensures true; +{ + //@ TaskRunning_t gVal = taskTASK_NOT_RUNNING; + TaskRunning_t val = taskTASK_NOT_RUNNING; + //@ assert( gVal == val ); +} + +/* Verify that the preprocessor and our VeriFast proofs evaluate + * `taskTASK_YIELDING` to the same values. + */ +void validate_taskTASK_YIELDING_value() +//@ requires true; +//@ ensures true; +{ + //@ TaskRunning_t gVal = taskTASK_YIELDING; + TaskRunning_t val = taskTASK_YIELDING; + //@ assert( gVal == val ); +} + +#endif /* TASK_RUNNING_STATES_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof/verifast_lists_extended.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof/verifast_lists_extended.h new file mode 100644 index 00000000000..448a020c376 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof/verifast_lists_extended.h @@ -0,0 +1,116 @@ +#ifndef VERIFAST_LISTS_EXTENDED_H +#define VERIFAST_LISTS_EXTENDED_H + +/* This file contains lemmas that would fit `list.gh` which is part + * of VeriFast's standard library. + */ + +// Most of the following lemmas are axioms. + + +/*@ +lemma void head_drop_n_equals_nths(list xs, int n); +requires n >= 0; +ensures head(drop(n, xs)) == nth(n, xs); + +lemma void drop_index_equals_singleton_implies_last_element(list xs, t x); +requires drop(index_of(x, xs), xs) == cons(x, nil); +ensures index_of(x, xs) == length(xs) - 1; + +lemma void nth_index(list xs, t x); +requires mem(x, xs) == true; +ensures nth(index_of(x, xs), xs) == x; + +lemma void mem_prefix_implies_mem(t x, list xs, int n); +requires mem(x, take(n, xs)) == true; +ensures mem(x, xs) == true; + +lemma void mem_suffix_implies_mem(t x, list xs, int n); +requires mem(x, drop(n, xs)) == true; +ensures mem(x, xs) == true; + +lemma void drop_n_plus_m(list xs, int n, int m); +requires true; +ensures drop(n, drop(m, xs)) == drop(n + m, xs); + + +fixpoint bool superset(list super, list sub) { + return subset(sub, super); +} + + +lemma void update_out_of_bounds(int index, t x, list xs) +requires (index < 0 || index >= length(xs)); +ensures update(index, x, xs) == xs; +{ + switch(xs) { + case nil: // nothing to do + case cons(h, rest): { + update_out_of_bounds(index-1, x, rest); + } + } +} + +lemma void index_of_different(t x1, t x2, list xs) +requires x1 != x2 &*& mem(x1, xs) == true &*& mem(x2, xs) == true; +ensures index_of(x1, xs) != index_of(x2, xs); +{ + switch(xs) { + case nil: + case cons(h, rest): + if(h != x1 && h != x2) { + index_of_different(x1, x2, rest); + } + } +} + +lemma void remove_result_subset(t x, list xs); +requires true; +ensures subset(remove(x, xs), xs) == true; + +lemma void append_take_nth_drop(int n, list xs); +requires 0 <= n &*& n < length(xs); +ensures xs == append( take(n, xs), cons(nth(n, xs), drop(n+1, xs)) ); + +// Note: `listex.gh` contains lemma `forall_drop` but no corresponding +// `forall_take`. +lemma void forall_take(list xs, fixpoint(t, bool) p, int i); + requires forall(xs, p) == true; + ensures forall(take(i, xs), p) == true; + +lemma void forall_mem_implies_superset(list super, list sub); +requires forall(sub, (mem_list_elem)(super)) == true; +ensures superset(super, sub) == true; + +lemma void subset_implies_forall_mem(list sub, list super); +requires subset(sub, super) == true; +ensures forall(sub, (mem_list_elem)(super)) == true; + +lemma void forall_remove(t x, list xs, fixpoint(t, bool) p); +requires forall(xs, p) == true; +ensures forall(remove(x, xs), p) == true; + +lemma void forall_remove_nth(int n, list xs, fixpoint(t, bool) p); +requires forall(xs, p) == true; +ensures forall(remove_nth(n, xs), p) == true; + +lemma void nth_implies_mem(int n, list xs); +requires 0 <= n &*& n < length(xs); +ensures mem(nth(n, xs), xs) == true; + +lemma void subset_append(list sub1, list sub2, list super); +requires subset(sub1, super) == true &*& subset(sub2, super) == true; +ensures subset(append(sub1, sub2), super) == true; + +lemma void subset_take(int i, list xs); +requires true; +ensures subset(take(i, xs), xs) == true; + +lemma void subset_drop(int i, list xs); +requires true; +ensures subset(drop(i, xs), xs) == true; +@*/ + + + +#endif /* VERIFAST_LISTS_EXTENDED_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/FreeRTOSConfig.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/FreeRTOSConfig.h new file mode 100644 index 00000000000..1d4e280209c --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/FreeRTOSConfig.h @@ -0,0 +1,143 @@ +/* This is a stub used for the VeriFast proof. */ + +/* + * FreeRTOS V202107.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * http://www.FreeRTOS.org + * http://aws.amazon.com/freertos + * + * 1 tab == 4 spaces! + */ + +#ifndef FREERTOS_CONFIG_H +#define FREERTOS_CONFIG_H + +/*----------------------------------------------------------- + * Application specific definitions. + * + * These definitions should be adjusted for your particular hardware and + * application requirements. + * + * THESE PARAMETERS ARE DESCRIBED WITHIN THE 'CONFIGURATION' SECTION OF THE + * FreeRTOS API DOCUMENTATION AVAILABLE ON THE FreeRTOS.org WEB SITE. + * + * See http://www.freertos.org/a00110.html + *----------------------------------------------------------*/ + +/* Scheduler Related */ +#define configUSE_PREEMPTION 1 +#define configUSE_TICKLESS_IDLE 0 +#define configUSE_IDLE_HOOK 0 +#define configUSE_TICK_HOOK 1 +#define configTICK_RATE_HZ ( ( TickType_t ) 1000 ) +#define configMAX_PRIORITIES 32 +#define configMINIMAL_STACK_SIZE ( configSTACK_DEPTH_TYPE ) 256 +#define configUSE_16_BIT_TICKS 0 + +#define configIDLE_SHOULD_YIELD 1 + +/* Synchronization Related */ +#define configUSE_MUTEXES 1 +#define configUSE_RECURSIVE_MUTEXES 1 +#define configUSE_APPLICATION_TASK_TAG 0 +#define configUSE_COUNTING_SEMAPHORES 1 +#define configQUEUE_REGISTRY_SIZE 8 +#define configUSE_QUEUE_SETS 1 +#define configUSE_TIME_SLICING 1 +#define configUSE_NEWLIB_REENTRANT 0 +#define configENABLE_BACKWARD_COMPATIBILITY 0 +#define configNUM_THREAD_LOCAL_STORAGE_POINTERS 5 + +/* System */ +#define configSTACK_DEPTH_TYPE uint32_t +#define configMESSAGE_BUFFER_LENGTH_TYPE size_t + +/* Memory allocation related definitions. */ +#define configSUPPORT_STATIC_ALLOCATION 0 +#define configSUPPORT_DYNAMIC_ALLOCATION 1 +#define configTOTAL_HEAP_SIZE (128*1024) +#define configAPPLICATION_ALLOCATED_HEAP 0 + +/* Hook function related definitions. */ +#define configCHECK_FOR_STACK_OVERFLOW 2 +#define configUSE_MALLOC_FAILED_HOOK 1 +#define configUSE_DAEMON_TASK_STARTUP_HOOK 0 + +/* Run time and task stats gathering related definitions. */ +#define configGENERATE_RUN_TIME_STATS 0 +#define configUSE_TRACE_FACILITY 1 +#define configUSE_STATS_FORMATTING_FUNCTIONS 0 + +/* Co-routine related definitions. */ +#define configUSE_CO_ROUTINES 0 +#define configMAX_CO_ROUTINE_PRIORITIES 1 + +/* Software timer related definitions. */ +#define configUSE_TIMERS 1 +#define configTIMER_TASK_PRIORITY ( configMAX_PRIORITIES - 1 ) +#define configTIMER_QUEUE_LENGTH 10 +#define configTIMER_TASK_STACK_DEPTH 1024 + +/* Interrupt nesting behaviour configuration. */ +/* +#define configKERNEL_INTERRUPT_PRIORITY [dependent of processor] +#define configMAX_SYSCALL_INTERRUPT_PRIORITY [dependent on processor and application] +#define configMAX_API_CALL_INTERRUPT_PRIORITY [dependent on processor and application] +*/ + +/* SMP port only */ +#define configNUM_CORES 100 +#define configTICK_CORE 1 +#define configRUN_MULTIPLE_PRIORITIES 1 + +/* RP2040 specific */ +#define configSUPPORT_PICO_SYNC_INTEROP 1 +#define configSUPPORT_PICO_TIME_INTEROP 1 + +#ifndef VERIFAST + /* Reason for rewrite: VeriFast does not accept duplicate fct prototypes. */ + #include +#endif /* VERIFAST */ +/* Define to trap errors during development. */ +#define configASSERT(x) assert(x) + +/* Set the following definitions to 1 to include the API function, or zero +to exclude the API function. */ +#define INCLUDE_vTaskPrioritySet 1 +#define INCLUDE_uxTaskPriorityGet 1 +#define INCLUDE_vTaskDelete 1 +#define INCLUDE_vTaskSuspend 1 +#define INCLUDE_vTaskDelayUntil 1 +#define INCLUDE_vTaskDelay 1 +#define INCLUDE_xTaskGetSchedulerState 1 +#define INCLUDE_xTaskGetCurrentTaskHandle 1 +#define INCLUDE_uxTaskGetStackHighWaterMark 1 +#define INCLUDE_xTaskGetIdleTaskHandle 1 +#define INCLUDE_eTaskGetState 1 +#define INCLUDE_xTimerPendFunctionCall 1 +#define INCLUDE_xTaskAbortDelay 1 +#define INCLUDE_xTaskGetHandle 1 +#define INCLUDE_xTaskResumeFromISR 1 +#define INCLUDE_xQueueGetMutexHolder 1 + +/* A header file that defines trace macro can be included here. */ + +#endif /* FREERTOS_CONFIG_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/asm.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/asm.h new file mode 100644 index 00000000000..83da579c183 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/asm.h @@ -0,0 +1,35 @@ +#ifndef ASM_H +#define ASM_H + +/* VeriFast does not support inline assembler. + * The following definitions replace macros that would normally evaluate to + * inline assember by failing assertions. + */ + +/* VeriFast treats `assert` as keyword and does not support calling it + * in many contexts where function calls are permitted. */ +bool assert_fct(bool b, const char*) +{ + assert(b); + return b; +} + +// Port macros were originally defined in `portmacro.h`. + +#undef portCHECK_IF_IN_ISR +#define portCHECK_IF_IN_ISR() assert_fct(false, "portCHECK_IF_IN_ISR") + +/* Additional reason for rewrite: + * VeriFast does not support embedding block statements that consist of + * multiple elemts in expression contexts, e.g., `({e1; e2})`. + */ +#undef portSET_INTERRUPT_MASK_FROM_ISR +#define portSET_INTERRUPT_MASK_FROM_ISR() assert_fct(false, "portSET_INTERRUPT_MASK_FROM_ISR") + +#undef portRESTORE_INTERRUPTS +#define portRESTORE_INTERRUPTS(ulState) assert_fct(false, "portRESTORE_INTERRUPTS") + +//#undef portDISABLE_INTERRUPTS +//#define portDISABLE_INTERRUPTS() assert_fct(false, "portDISABLE_INTERRUPTS") + +#endif /* ASM_H */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/README.md new file mode 100644 index 00000000000..74d8029f2a4 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/README.md @@ -0,0 +1 @@ +This directory contains files that would normally be generated during the build. diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/README.md new file mode 100644 index 00000000000..f17fa89d613 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/README.md @@ -0,0 +1,3 @@ +This directory contains files that would normally be generated during the build +and placed into +`$FREERTOS_SMP_DEMO_DIR/FreeRTOS/Demo/CORTEX_M0+_RP2040/build/generated/pico_base/pico` diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/config_autogen.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/config_autogen.h new file mode 100644 index 00000000000..013a80dfc35 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/config_autogen.h @@ -0,0 +1,19 @@ +/* This is a stub used for the VeriFast proof. */ + +// AUTOGENERATED FROM PICO_CONFIG_HEADER_FILES and then PICO__CONFIG_HEADER_FILES +// DO NOT EDIT! + + +// based on PICO_CONFIG_HEADER_FILES: + +#ifdef VERIFAST + /* Reason for rewrite: VeriFast cannot handle absolute include paths. */ + #include "freertos_sdk_config.h" + #include "boards/pico.h" + + // based on PICO_RP2040_CONFIG_HEADER_FILES: + + #include "cmsis/rename_exceptions.h" +#else + // Generated include directives with absolute paths. +#endif diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/version.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/version.h new file mode 100644 index 00000000000..1bb2f11061f --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/generated/pico_base/pico/version.h @@ -0,0 +1,21 @@ +/* This is a stub used for the VeriFast proof. */ + +/* + * Copyright (c) 2020 Raspberry Pi (Trading) Ltd. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +// --------------------------------------- +// THIS FILE IS AUTOGENERATED; DO NOT EDIT +// --------------------------------------- + +#ifndef _PICO_VERSION_H +#define _PICO_VERSION_H + +#define PICO_SDK_VERSION_MAJOR 1 +#define PICO_SDK_VERSION_MINOR 4 +#define PICO_SDK_VERSION_REVISION 0 +#define PICO_SDK_VERSION_STRING "1.4.0" + +#endif diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/proof_defs.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/proof_defs.h new file mode 100644 index 00000000000..25fefd3bb63 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/proof_defs.h @@ -0,0 +1,27 @@ +/* + * This file contains defines to configure the VeriFast proof setup. + * + */ + + +#ifndef PROOF_DEFS_H + // Delete keywords VeriFast canot parse (in some contexts) + #define inline + #define __always_inline + + /* `projdefs.h` defines `pdFALSE` and `pdTRUE` as 0 and 1 of type + * `BaseType_t`. Both are assigned to variables smaller or + * unsigned types. While that's safe in practice, it is not + * type safe. Hence we define + */ + #undef pdFALSE + #undef pdTRUE + #define pdFALSE ( ( char ) 0 ) + #define pdTRUE ( ( char ) 1 ) + #define pd_U_FALSE ( ( unsigned char ) pdFALSE ) + #define pd_U_TRUE ( ( unsigned char ) pdTRUE ) + + #undef assert + #undef configASSERT + #define configASSERT(x) assert(x) +#endif /* PROOF_DEFS_H */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/README.md b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/README.md new file mode 100644 index 00000000000..752771d3a7a --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/README.md @@ -0,0 +1,12 @@ +This directory contains files copied from: +`/Library/Developer/CommandLineTools/SDKs/MacOSX12.3.sdk/System/Library/Frameworks/Kernel.framework/Versions/A/Headers` +We cannot put this directory on VeriFast's include path because it contains +the file `stdbool.h` which VeriFast cannot parse. + +More specifically, VeriFast cannot parse the defines `#define false 0` and +`#define true 1` contained in the header `stdbool.h`. +Therefore, by default, it skips all includes of `stdbool.h` and +uses its builtin definitions of `true` and `false`. However, if we manually +specify an include path (via VeriFast's `-I` option) that contains `stdbool.h`, +this behaviour changes. It stops skipping these includes which leads to parse +errors. diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/cdefs.h b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/cdefs.h new file mode 100644 index 00000000000..9a05dfb24b6 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/proof_setup/sys/cdefs.h @@ -0,0 +1,924 @@ +/* This is a stub used for the VeriFast proof. */ + +/* + * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/* Copyright 1995 NeXT Computer, Inc. All rights reserved. */ +/* + * Copyright (c) 1991, 1993 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Berkeley Software Design, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cdefs.h 8.8 (Berkeley) 1/9/95 + */ + +#ifndef _CDEFS_H_ +#define _CDEFS_H_ + +/* Verifast proof setup */ +#ifdef VERIFAST + /* + * The proof setup header is already included at the top of the proof target, + * e.g., `tasks.c`. But it seems like the contained defines are not propagated + * to this file. + */ + #include "proof_defs.h" +#endif + +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif + +/* This SDK is designed to work with clang and specific versions of + * gcc >= 4.0 with Apple's patch sets */ +#if !defined(__GNUC__) || __GNUC__ < 4 +#warning "Unsupported compiler detected" +#endif + +/* + * Compatibility with compilers and environments that don't support compiler + * feature checking function-like macros. + */ +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif +#ifndef __has_include +#define __has_include(x) 0 +#endif +#ifndef __has_feature +#define __has_feature(x) 0 +#endif +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + +/* + * The __CONCAT macro is used to concatenate parts of symbol names, e.g. + * with "#define OLD(foo) __CONCAT(old,foo)", OLD(foo) produces oldfoo. + * The __CONCAT macro is a bit tricky -- make sure you don't put spaces + * in between its arguments. __CONCAT can also concatenate double-quoted + * strings produced by the __STRING macro, but this only works with ANSI C. + */ +#if defined(__STDC__) || defined(__cplusplus) +#define __P(protos) protos /* full-blown ANSI C */ +#define __CONCAT(x, y) x ## y +#define __STRING(x) #x + +#define __const const /* define reserved names to standard */ +#define __signed signed +#define __volatile volatile +#if defined(__cplusplus) +#define __inline inline /* convert to C++ keyword */ +#else +#ifndef __GNUC__ +#define __inline /* delete GCC keyword */ +#endif /* !__GNUC__ */ +#endif /* !__cplusplus */ + +#else /* !(__STDC__ || __cplusplus) */ +#define __P(protos) () /* traditional C preprocessor */ +#define __CONCAT(x, y) x /**/ y +#define __STRING(x) "x" + +#ifndef __GNUC__ +#define __const /* delete pseudo-ANSI C keywords */ +#define __inline +#define __signed +#define __volatile +#endif /* !__GNUC__ */ + +/* + * In non-ANSI C environments, new programs will want ANSI-only C keywords + * deleted from the program and old programs will want them left alone. + * When using a compiler other than gcc, programs using the ANSI C keywords + * const, inline etc. as normal identifiers should define -DNO_ANSI_KEYWORDS. + * When using "gcc -traditional", we assume that this is the intent; if + * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone. + */ +#ifndef NO_ANSI_KEYWORDS +#define const __const /* convert ANSI C keywords */ +#define inline __inline +#define signed __signed +#define volatile __volatile +#endif /* !NO_ANSI_KEYWORDS */ +#endif /* !(__STDC__ || __cplusplus) */ + +/* + * __pure2 can be used for functions that are only a function of their scalar + * arguments (meaning they can't dereference pointers). + * + * __stateful_pure can be used for functions that have no side effects, + * but depend on the state of the memory. + */ +#define __dead2 __attribute__((__noreturn__)) +#define __pure2 __attribute__((__const__)) +#define __stateful_pure __attribute__((__pure__)) + +/* __unused denotes variables and functions that may not be used, preventing + * the compiler from warning about it if not used. + */ +#define __unused __attribute__((__unused__)) + +/* __used forces variables and functions to be included even if it appears + * to the compiler that they are not used (and would thust be discarded). + */ +#define __used __attribute__((__used__)) + +/* __cold marks code used for debugging or that is rarely taken + * and tells the compiler to optimize for size and outline code. + */ +#if __has_attribute(cold) +#define __cold __attribute__((__cold__)) +#else +#define __cold +#endif + +/* __exported denotes symbols that should be exported even when symbols + * are hidden by default. + * __exported_push/_exported_pop are pragmas used to delimit a range of + * symbols that should be exported even when symbols are hidden by default. + */ +#define __exported __attribute__((__visibility__("default"))) +#define __exported_push _Pragma("GCC visibility push(default)") +#define __exported_pop _Pragma("GCC visibility pop") + +/* __deprecated causes the compiler to produce a warning when encountering + * code using the deprecated functionality. + * __deprecated_msg() does the same, and compilers that support it will print + * a message along with the deprecation warning. + * This may require turning on such warning with the -Wdeprecated flag. + * __deprecated_enum_msg() should be used on enums, and compilers that support + * it will print the deprecation warning. + * __kpi_deprecated() specifically indicates deprecation of kernel programming + * interfaces in Kernel.framework used by KEXTs. + */ +#define __deprecated __attribute__((__deprecated__)) + +#if __has_extension(attribute_deprecated_with_message) || \ + (defined(__GNUC__) && ((__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)))) + #define __deprecated_msg(_msg) __attribute__((__deprecated__(_msg))) +#else + #define __deprecated_msg(_msg) __attribute__((__deprecated__)) +#endif + +#if __has_extension(enumerator_attributes) + #define __deprecated_enum_msg(_msg) __deprecated_msg(_msg) +#else + #define __deprecated_enum_msg(_msg) +#endif + +#define __kpi_deprecated(_msg) __deprecated_msg(_msg) + +/* __unavailable causes the compiler to error out when encountering + * code using the tagged function + */ +#if __has_attribute(unavailable) +#define __unavailable __attribute__((__unavailable__)) +#else +#define __unavailable +#endif + +#define __kpi_unavailable __unavailable + +#if defined(__arm64__) +#define __kpi_deprecated_arm64_macos_unavailable __unavailable +#else +#define __kpi_deprecated_arm64_macos_unavailable __deprecated +#endif /* XNU_KERNEL_PRIVATE */ + +/* Delete pseudo-keywords wherever they are not available or needed. */ +#ifndef __dead +#define __dead +#define __pure +#endif + +/* + * We use `__restrict' as a way to define the `restrict' type qualifier + * without disturbing older software that is unaware of C99 keywords. + */ +#if __STDC_VERSION__ < 199901 +#define __restrict +#else +#define __restrict restrict +#endif + +/* Compatibility with compilers and environments that don't support the + * nullability feature. + */ + +#if !__has_feature(nullability) +#ifndef __nullable +#define __nullable +#endif +#ifndef __nonnull +#define __nonnull +#endif +#ifndef __null_unspecified +#define __null_unspecified +#endif +#ifndef _Nullable +#define _Nullable +#endif +#ifndef _Nonnull +#define _Nonnull +#endif +#ifndef _Null_unspecified +#define _Null_unspecified +#endif +#endif + +/* + * __disable_tail_calls causes the compiler to not perform tail call + * optimization inside the marked function. + */ +#if __has_attribute(disable_tail_calls) +#define __disable_tail_calls __attribute__((__disable_tail_calls__)) +#else +#define __disable_tail_calls +#endif + +/* + * __not_tail_called causes the compiler to prevent tail call optimization + * on statically bound calls to the function. It has no effect on indirect + * calls. Virtual functions, objective-c methods, and functions marked as + * "always_inline" cannot be marked as __not_tail_called. + */ +#if __has_attribute(not_tail_called) +#define __not_tail_called __attribute__((__not_tail_called__)) +#else +#define __not_tail_called +#endif + +/* + * __result_use_check warns callers of a function that not using the function + * return value is a bug, i.e. dismissing malloc() return value results in a + * memory leak. + */ +#if __has_attribute(warn_unused_result) +#define __result_use_check __attribute__((__warn_unused_result__)) +#else +#define __result_use_check +#endif + +/* + * __swift_unavailable causes the compiler to mark a symbol as specifically + * unavailable in Swift, regardless of any other availability in C. + */ +#if __has_feature(attribute_availability_swift) +#define __swift_unavailable(_msg) __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define __swift_unavailable(_msg) +#endif + +/* + * __abortlike is the attribute to put on functions like abort() that are + * typically used to mark assertions. These optimize the codegen + * for outlining while still maintaining debugability. + */ +#ifndef __abortlike +#define __abortlike __dead2 __cold __not_tail_called +#endif + +/* Declaring inline functions within headers is error-prone due to differences + * across various versions of the C language and extensions. __header_inline + * can be used to declare inline functions within system headers. In cases + * where you want to force inlining instead of letting the compiler make + * the decision, you can use __header_always_inline. + * + * Be aware that using inline for functions which compilers may also provide + * builtins can behave differently under various compilers. If you intend to + * provide an inline version of such a function, you may want to use a macro + * instead. + * + * The check for !__GNUC__ || __clang__ is because gcc doesn't correctly + * support c99 inline in some cases: + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=55965 + */ + +#if defined(__cplusplus) || \ + (__STDC_VERSION__ >= 199901L && \ + !defined(__GNUC_GNU_INLINE__) && \ + (!defined(__GNUC__) || defined(__clang__))) +# define __header_inline inline +#elif defined(__GNUC__) && defined(__GNUC_STDC_INLINE__) +# define __header_inline extern __inline __attribute__((__gnu_inline__)) +#elif defined(__GNUC__) +# define __header_inline extern __inline +#else +/* If we land here, we've encountered an unsupported compiler, + * so hopefully it understands static __inline as a fallback. + */ +# define __header_inline static __inline +#endif + +#ifdef __GNUC__ +# define __header_always_inline __header_inline __attribute__ ((__always_inline__)) +#else +/* Unfortunately, we're using a compiler that we don't know how to force to + * inline. Oh well. + */ +# define __header_always_inline __header_inline +#endif + +/* + * Compiler-dependent macros that bracket portions of code where the + * "-Wunreachable-code" warning should be ignored. Please use sparingly. + */ +#if defined(__clang__) +# define __unreachable_ok_push \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wunreachable-code\"") +# define __unreachable_ok_pop \ + _Pragma("clang diagnostic pop") +#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +# define __unreachable_ok_push \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wunreachable-code\"") +# define __unreachable_ok_pop \ + _Pragma("GCC diagnostic pop") +#else +# define __unreachable_ok_push +# define __unreachable_ok_pop +#endif + +/* + * Compiler-dependent macros to declare that functions take printf-like + * or scanf-like arguments. They are null except for versions of gcc + * that are known to support the features properly. Functions declared + * with these attributes will cause compilation warnings if there is a + * mismatch between the format string and subsequent function parameter + * types. + */ +#define __printflike(fmtarg, firstvararg) \ + __attribute__((__format__ (__printf__, fmtarg, firstvararg))) +#define __printf0like(fmtarg, firstvararg) \ + __attribute__((__format__ (__printf0__, fmtarg, firstvararg))) +#define __scanflike(fmtarg, firstvararg) \ + __attribute__((__format__ (__scanf__, fmtarg, firstvararg))) +#define __osloglike(fmtarg, firstvararg) \ + __attribute__((__format__ (__os_log__, fmtarg, firstvararg))) + +#define __IDSTRING(name, string) static const char name[] __used = string + +#ifndef __COPYRIGHT +#define __COPYRIGHT(s) __IDSTRING(copyright,s) +#endif + +#ifndef __RCSID +#define __RCSID(s) __IDSTRING(rcsid,s) +#endif + +#ifndef __SCCSID +#define __SCCSID(s) __IDSTRING(sccsid,s) +#endif + +#ifndef __PROJECT_VERSION +#define __PROJECT_VERSION(s) __IDSTRING(project_version,s) +#endif + +/* Source compatibility only, ID string not emitted in object file */ +#ifndef __FBSDID +#define __FBSDID(s) +#endif + +#ifndef __DECONST +#define __DECONST(type, var) __CAST_AWAY_QUALIFIER(var, const, type) +#endif + +#ifndef __DEVOLATILE +#define __DEVOLATILE(type, var) __CAST_AWAY_QUALIFIER(var, volatile, type) +#endif + +#ifndef __DEQUALIFY +#define __DEQUALIFY(type, var) __CAST_AWAY_QUALIFIER(var, const volatile, type) +#endif + +/* + * __alloc_size can be used to label function arguments that represent the + * size of memory that the function allocates and returns. The one-argument + * form labels a single argument that gives the allocation size (where the + * arguments are numbered from 1): + * + * void *malloc(size_t __size) __alloc_size(1); + * + * The two-argument form handles the case where the size is calculated as the + * product of two arguments: + * + * void *calloc(size_t __count, size_t __size) __alloc_size(1,2); + */ +#ifndef __alloc_size +#if __has_attribute(alloc_size) +#define __alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else +#define __alloc_size(...) +#endif +#endif // __alloc_size + +/* + * COMPILATION ENVIRONMENTS -- see compat(5) for additional detail + * + * DEFAULT By default newly complied code will get POSIX APIs plus + * Apple API extensions in scope. + * + * Most users will use this compilation environment to avoid + * behavioral differences between 32 and 64 bit code. + * + * LEGACY Defining _NONSTD_SOURCE will get pre-POSIX APIs plus Apple + * API extensions in scope. + * + * This is generally equivalent to the Tiger release compilation + * environment, except that it cannot be applied to 64 bit code; + * its use is discouraged. + * + * We expect this environment to be deprecated in the future. + * + * STRICT Defining _POSIX_C_SOURCE or _XOPEN_SOURCE restricts the + * available APIs to exactly the set of APIs defined by the + * corresponding standard, based on the value defined. + * + * A correct, portable definition for _POSIX_C_SOURCE is 200112L. + * A correct, portable definition for _XOPEN_SOURCE is 600L. + * + * Apple API extensions are not visible in this environment, + * which can cause Apple specific code to fail to compile, + * or behave incorrectly if prototypes are not in scope or + * warnings about missing prototypes are not enabled or ignored. + * + * In any compilation environment, for correct symbol resolution to occur, + * function prototypes must be in scope. It is recommended that all Apple + * tools users add either the "-Wall" or "-Wimplicit-function-declaration" + * compiler flags to their projects to be warned when a function is being + * used without a prototype in scope. + */ + +/* These settings are particular to each product. */ +#define __DARWIN_ONLY_64_BIT_INO_T 0 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 0 +#define __DARWIN_ONLY_VERS_1050 0 +#if defined(__x86_64__) +#define __DARWIN_SUF_DARWIN14 "_darwin14" +#define __DARWIN14_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_DARWIN14) +#else +#define __DARWIN14_ALIAS(sym) +#endif + +/* + * The __DARWIN_ALIAS macros are used to do symbol renaming; they allow + * legacy code to use the old symbol, thus maintaining binary compatibility + * while new code can use a standards compliant version of the same function. + * + * __DARWIN_ALIAS is used by itself if the function signature has not + * changed, it is used along with a #ifdef check for __DARWIN_UNIX03 + * if the signature has changed. Because the __LP64__ environment + * only supports UNIX03 semantics it causes __DARWIN_UNIX03 to be + * defined, but causes __DARWIN_ALIAS to do no symbol mangling. + * + * As a special case, when XCode is used to target a specific version of the + * OS, the manifest constant __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ + * will be defined by the compiler, with the digits representing major version + * time 100 + minor version times 10 (e.g. 10.5 := 1050). If we are targeting + * pre-10.5, and it is the default compilation environment, revert the + * compilation environment to pre-__DARWIN_UNIX03. + */ +#if !defined(__DARWIN_UNIX03) +# define __DARWIN_UNIX03 0 +#endif /* !__DARWIN_UNIX03 */ + +#if !defined(__DARWIN_64_BIT_INO_T) +# define __DARWIN_64_BIT_INO_T 0 +#endif /* !__DARWIN_64_BIT_INO_T */ + +#if !defined(__DARWIN_VERS_1050) +# define __DARWIN_VERS_1050 0 +#endif /* !__DARWIN_VERS_1050 */ + +#if !defined(__DARWIN_NON_CANCELABLE) +# define __DARWIN_NON_CANCELABLE 0 +#endif /* !__DARWIN_NON_CANCELABLE */ + +/* + * symbol suffixes used for symbol versioning + */ +#if __DARWIN_UNIX03 +# if __DARWIN_ONLY_UNIX_CONFORMANCE +# define __DARWIN_SUF_UNIX03 /* nothing */ +# else /* !__DARWIN_ONLY_UNIX_CONFORMANCE */ +# define __DARWIN_SUF_UNIX03 "$UNIX2003" +# endif /* __DARWIN_ONLY_UNIX_CONFORMANCE */ + +# if __DARWIN_64_BIT_INO_T +# if __DARWIN_ONLY_64_BIT_INO_T +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# else /* !__DARWIN_ONLY_64_BIT_INO_T */ +# define __DARWIN_SUF_64_BIT_INO_T "$INODE64" +# endif /* __DARWIN_ONLY_64_BIT_INO_T */ +# else /* !__DARWIN_64_BIT_INO_T */ +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# endif /* __DARWIN_64_BIT_INO_T */ + +# if __DARWIN_VERS_1050 +# if __DARWIN_ONLY_VERS_1050 +# define __DARWIN_SUF_1050 /* nothing */ +# else /* !__DARWIN_ONLY_VERS_1050 */ +# define __DARWIN_SUF_1050 "$1050" +# endif /* __DARWIN_ONLY_VERS_1050 */ +# else /* !__DARWIN_VERS_1050 */ +# define __DARWIN_SUF_1050 /* nothing */ +# endif /* __DARWIN_VERS_1050 */ + +# if __DARWIN_NON_CANCELABLE +# define __DARWIN_SUF_NON_CANCELABLE "$NOCANCEL" +# else /* !__DARWIN_NON_CANCELABLE */ +# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ +# endif /* __DARWIN_NON_CANCELABLE */ + +#else /* !__DARWIN_UNIX03 */ +# define __DARWIN_SUF_UNIX03 /* nothing */ +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ +# define __DARWIN_SUF_1050 /* nothing */ +#endif /* __DARWIN_UNIX03 */ + +#define __DARWIN_SUF_EXTSN "$DARWIN_EXTSN" + +/* + * symbol versioning macros + */ +#define __DARWIN_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_UNIX03) +#define __DARWIN_ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) +#define __DARWIN_ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) +#define __DARWIN_NOCANCEL(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE) +#define __DARWIN_INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T) + +#define __DARWIN_1050(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050) +#define __DARWIN_1050ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_UNIX03) +#define __DARWIN_1050ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) +#define __DARWIN_1050ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) +#define __DARWIN_1050INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T) + +#define __DARWIN_EXTSN(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN) +#define __DARWIN_EXTSN_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN __DARWIN_SUF_NON_CANCELABLE) + +/* + * symbol release macros + */ +#define __DARWIN_ALIAS_STARTING(_mac, _iphone, x) + + +/* + * POSIX.1 requires that the macros we test be defined before any standard + * header file is included. This permits us to convert values for feature + * testing, as necessary, using only _POSIX_C_SOURCE. + * + * Here's a quick run-down of the versions: + * defined(_POSIX_SOURCE) 1003.1-1988 + * _POSIX_C_SOURCE == 1L 1003.1-1990 + * _POSIX_C_SOURCE == 2L 1003.2-1992 C Language Binding Option + * _POSIX_C_SOURCE == 199309L 1003.1b-1993 + * _POSIX_C_SOURCE == 199506L 1003.1c-1995, 1003.1i-1995, + * and the omnibus ISO/IEC 9945-1: 1996 + * _POSIX_C_SOURCE == 200112L 1003.1-2001 + * _POSIX_C_SOURCE == 200809L 1003.1-2008 + * + * In addition, the X/Open Portability Guide, which is now the Single UNIX + * Specification, defines a feature-test macro which indicates the version of + * that specification, and which subsumes _POSIX_C_SOURCE. + */ + +/* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1L. */ +#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1L +#undef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199009L +#endif + +/* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2L. */ +#if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2L +#undef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199209L +#endif + +/* Deal with various X/Open Portability Guides and Single UNIX Spec. */ +#ifdef _XOPEN_SOURCE +#if _XOPEN_SOURCE - 0L >= 700L && (!defined(_POSIX_C_SOURCE) || _POSIX_C_SOURCE - 0L < 200809L) +#undef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 200809L +#elif _XOPEN_SOURCE - 0L >= 600L && (!defined(_POSIX_C_SOURCE) || _POSIX_C_SOURCE - 0L < 200112L) +#undef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 200112L +#elif _XOPEN_SOURCE - 0L >= 500L && (!defined(_POSIX_C_SOURCE) || _POSIX_C_SOURCE - 0L < 199506L) +#undef _POSIX_C_SOURCE +#define _POSIX_C_SOURCE 199506L +#endif +#endif + +/* + * Deal with all versions of POSIX. The ordering relative to the tests above is + * important. + */ +#if defined(_POSIX_SOURCE) && !defined(_POSIX_C_SOURCE) +#define _POSIX_C_SOURCE 198808L +#endif + +/* POSIX C deprecation macros */ +#define __POSIX_C_DEPRECATED(ver) + +/* + * Set a single macro which will always be defined and can be used to determine + * the appropriate namespace. For POSIX, these values will correspond to + * _POSIX_C_SOURCE value. Currently there are two additional levels corresponding + * to ANSI (_ANSI_SOURCE) and Darwin extensions (_DARWIN_C_SOURCE) + */ +#define __DARWIN_C_ANSI 010000L +#define __DARWIN_C_FULL 900000L + +#if defined(_ANSI_SOURCE) +#define __DARWIN_C_LEVEL __DARWIN_C_ANSI +#elif defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE) && !defined(_NONSTD_SOURCE) +#define __DARWIN_C_LEVEL _POSIX_C_SOURCE +#else +#define __DARWIN_C_LEVEL __DARWIN_C_FULL +#endif + +/* If the developer has neither requested a strict language mode nor a version + * of POSIX, turn on functionality provided by __STDC_WANT_LIB_EXT1__ as part + * of __DARWIN_C_FULL. + */ +#if !defined(__STDC_WANT_LIB_EXT1__) && !defined(__STRICT_ANSI__) && __DARWIN_C_LEVEL >= __DARWIN_C_FULL +#define __STDC_WANT_LIB_EXT1__ 1 +#endif + +/* + * long long is not supported in c89 (__STRICT_ANSI__), but g++ -ansi and + * c99 still want long longs. While not perfect, we allow long longs for + * g++. + */ +#if (defined(__STRICT_ANSI__) && (__STDC_VERSION__ - 0 < 199901L) && !defined(__GNUG__)) +#define __DARWIN_NO_LONG_LONG 1 +#else +#define __DARWIN_NO_LONG_LONG 0 +#endif + +/***************************************** +* Public darwin-specific feature macros +*****************************************/ + +/* + * _DARWIN_FEATURE_64_BIT_INODE indicates that the ino_t type is 64-bit, and + * structures modified for 64-bit inodes (like struct stat) will be used. + */ +#if __DARWIN_64_BIT_INO_T +#define _DARWIN_FEATURE_64_BIT_INODE 1 +#endif + +/* + * _DARWIN_FEATURE_64_ONLY_BIT_INODE indicates that the ino_t type may only + * be 64-bit; there is no support for 32-bit ino_t when this macro is defined + * (and non-zero). There is no struct stat64 either, as the regular + * struct stat will already be the 64-bit version. + */ +#if __DARWIN_ONLY_64_BIT_INO_T +#define _DARWIN_FEATURE_ONLY_64_BIT_INODE 1 +#endif + +/* + * _DARWIN_FEATURE_ONLY_VERS_1050 indicates that only those APIs updated + * in 10.5 exists; no pre-10.5 variants are available. + */ +#if __DARWIN_ONLY_VERS_1050 +#define _DARWIN_FEATURE_ONLY_VERS_1050 1 +#endif + +/* + * _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE indicates only UNIX conforming API + * are available (the legacy BSD APIs are not available) + */ +#if __DARWIN_ONLY_UNIX_CONFORMANCE +#define _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE 1 +#endif + +/* + * _DARWIN_FEATURE_UNIX_CONFORMANCE indicates whether UNIX conformance is on, + * and specifies the conformance level (3 is SUSv3) + */ +#if __DARWIN_UNIX03 +#define _DARWIN_FEATURE_UNIX_CONFORMANCE 3 +#endif + + +/* + * This macro casts away the qualifier from the variable + * + * Note: use at your own risk, removing qualifiers can result in + * catastrophic run-time failures. + */ +#ifndef __CAST_AWAY_QUALIFIER +#define __CAST_AWAY_QUALIFIER(variable, qualifier, type) (type) (long)(variable) +#endif + +/* + * __XNU_PRIVATE_EXTERN is a linkage decoration indicating that a symbol can be + * used from other compilation units, but not other libraries or executables. + */ +#ifndef __XNU_PRIVATE_EXTERN +#define __XNU_PRIVATE_EXTERN __attribute__((visibility("hidden"))) +#endif + +#if __has_include() +#include +#else +/* + * We intentionally define to nothing pointer attributes which do not have an + * impact on the ABI. __indexable and __bidi_indexable are not defined because + * of the ABI incompatibility that makes the diagnostic preferable. + */ +#define __has_ptrcheck 0 +#define __single +#define __unsafe_indexable +#define __counted_by(N) +#define __sized_by(N) +#define __ended_by(E) + +/* + * Similarly, we intentionally define to nothing the + * __ptrcheck_abi_assume_single and __ptrcheck_abi_assume_unsafe_indexable + * macros because they do not lead to an ABI incompatibility. However, we do not + * define the indexable and unsafe_indexable ones because the diagnostic is + * better than the silent ABI break. + */ +#define __ptrcheck_abi_assume_single() +#define __ptrcheck_abi_assume_unsafe_indexable() + +/* __unsafe_forge intrinsics are defined as regular C casts. */ +#define __unsafe_forge_bidi_indexable(T, P, S) ((T)(P)) +#define __unsafe_forge_single(T, P) ((T)(P)) + +/* decay operates normally; attribute is meaningless without pointer checks. */ +#define __array_decay_dicards_count_in_parameters +#endif /* !__has_include() */ + +#define __ASSUME_PTR_ABI_SINGLE_BEGIN __ptrcheck_abi_assume_single() +#define __ASSUME_PTR_ABI_SINGLE_END __ptrcheck_abi_assume_unsafe_indexable() + +#if __has_ptrcheck +#define __header_indexable __indexable +#define __header_bidi_indexable __bidi_indexable +#else +#define __header_indexable +#define __header_bidi_indexable +#endif + +/* + * Architecture validation for current SDK + */ +#if !defined(__sys_cdefs_arch_unknown__) && defined(__i386__) +#elif !defined(__sys_cdefs_arch_unknown__) && defined(__x86_64__) +#elif !defined(__sys_cdefs_arch_unknown__) && defined(__arm__) +#elif !defined(__sys_cdefs_arch_unknown__) && defined(__arm64__) +#else +#error Unsupported architecture +#endif + + +/* + * Check if __probable and __improbable have already been defined elsewhere. + * These macros inform the compiler (and humans) about which branches are likely + * to be taken. + */ +#if !defined(__probable) && !defined(__improbable) +#define __probable(x) __builtin_expect(!!(x), 1) +#define __improbable(x) __builtin_expect(!!(x), 0) +#endif /* !defined(__probable) && !defined(__improbable) */ + +#define __container_of(ptr, type, field) __extension__({ \ + const __typeof__(((type *)NULL)->field) *__ptr = (ptr); \ + (type *)((uintptr_t)__ptr - offsetof(type, field)); \ + }) + + +#define __compiler_barrier() __asm__ __volatile__("" ::: "memory") + +#if __has_attribute(enum_extensibility) +#define __enum_open __attribute__((__enum_extensibility__(open))) +#define __enum_closed __attribute__((__enum_extensibility__(closed))) +#else +#define __enum_open +#define __enum_closed +#endif // __has_attribute(enum_extensibility) + +#if __has_attribute(flag_enum) +#define __enum_options __attribute__((__flag_enum__)) +#else +#define __enum_options +#endif + +/* + * Similar to OS_ENUM/OS_CLOSED_ENUM/OS_OPTIONS/OS_CLOSED_OPTIONS + * + * This provides more advanced type checking on compilers supporting + * the proper extensions, even in C. + */ +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_fixed_enum) || \ + __has_extension(cxx_strong_enums) +#define __enum_decl(_name, _type, ...) \ + typedef enum : _type __VA_ARGS__ __enum_open _name +#define __enum_closed_decl(_name, _type, ...) \ + typedef enum : _type __VA_ARGS__ __enum_closed _name +#define __options_decl(_name, _type, ...) \ + typedef enum : _type __VA_ARGS__ __enum_open __enum_options _name +#define __options_closed_decl(_name, _type, ...) \ + typedef enum : _type __VA_ARGS__ __enum_closed __enum_options _name +#else +#define __enum_decl(_name, _type, ...) \ + typedef _type _name; enum __VA_ARGS__ __enum_open +#define __enum_closed_decl(_name, _type, ...) \ + typedef _type _name; enum __VA_ARGS__ __enum_closed +#define __options_decl(_name, _type, ...) \ + typedef _type _name; enum __VA_ARGS__ __enum_open __enum_options +#define __options_closed_decl(_name, _type, ...) \ + typedef _type _name; enum __VA_ARGS__ __enum_closed __enum_options +#endif + + +#if defined(KERNEL) && __has_attribute(xnu_usage_semantics) +/* + * These macros can be used to annotate type definitions or scalar structure + * fields to inform the compiler about which semantic they have with regards + * to the content of the underlying memory represented by such type or field. + * + * This information is used in the analysis of the types performed by the + * signature based type segregation implemented in kalloc. + */ +#define __kernel_ptr_semantics __attribute__((xnu_usage_semantics("pointer"))) +#define __kernel_data_semantics __attribute__((xnu_usage_semantics("data"))) +#define __kernel_dual_semantics __attribute__((xnu_usage_semantics("pointer", "data"))) + +#else /* defined(KERNEL) && __has_attribute(xnu_usage_semantics) */ + +#define __kernel_ptr_semantics +#define __kernel_data_semantics +#define __kernel_dual_semantics + +#endif /* defined(KERNEL) && __has_attribute(xnu_usage_semantics) */ + +#endif /* !_CDEFS_H_ */ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/run-verifast.sh b/Test/VeriFast/tasks/vTaskSwitchContext/run-verifast.sh new file mode 100755 index 00000000000..b24fd91ed22 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/run-verifast.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# This script runs the preprocesses the annotated 'tasks.c' file +# and checks the resulting proof file with VeriFast. +# +# This script expects the following arguments: +# $1 : Absolute path to the base directory of this repository. +# $2 : Absolute path to the VeriFast installation directory. + + +# Checking validity of command line arguments. +HELP="false" +if [ $1 == "-h" ] || [ $1 == "--help" ]; then + HELP="true" +else + if [ $# != 2 ] ; then + echo Wrong number of arguments. Found $#, expected 2. + HELP="true" + fi + + if [ ! -d "$1" ]; then + echo "Directory (\$1) '$1' does not exist." + HELP="true" + fi + + if [ ! -d "$2" ]; then + echo "Directory (\$2) '$2' does not exist." + HELP="true" + fi +fi + +if [ "$HELP" != "false" ]; then + echo Expected call of the form + echo "run-verifast.sh " + echo "where" + echo " is the absolute path to the base directory of this repository and" + echo " is the absolute path to the VeriFast installation directory." + exit +fi + + + +# Relative or absolute path to the directory this script and `paths.sh` reside in. +PREFIX=`dirname $0` +# Absolute path to the base of this repository. +REPO_BASE_DIR="$1" +# Absolute path the VeriFast installation directory +VF_DIR="$2" + +# Load functions used to compute paths. +. "$PREFIX/paths.sh" + + +VF_PROOF_BASE_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + +PP_SCRIPT_DIR=`pp_script_dir $REPO_BASE_DIR` +PREP="$PP_SCRIPT_DIR/prepare_file_for_VeriFast.sh" +TASK_C=`vf_annotated_tasks_c $REPO_BASE_DIR` +PP_TASK_C=`pp_vf_tasks_c $REPO_BASE_DIR` + +PROOF_SETUP_DIR=`vf_proof_setup_dir $REPO_BASE_DIR` +PROOF_FILES_DIR=`vf_proof_dir $REPO_BASE_DIR` + +PP_ERR_LOG="`pp_log_dir $REPO_BASE_DIR`/preprocessing_errors.txt" + + +ensure_output_dirs_exist $REPO_BASE_DIR + +"$PREP" "$TASK_C" "$PP_TASK_C" "$PP_ERR_LOG" \ + "$REPO_BASE_DIR" "$VF_PROOF_BASE_DIR" "$VF_DIR" + +# Remarks: +# - Recently, provenance checks have been added to VF that break old proofs +# involving pointer comparisons. The flag `-assume_no_provenance` turns them +# off. + +"$VF_DIR/bin/verifast" \ + -I $PROOF_SETUP_DIR \ + -I $PROOF_FILES_DIR \ + -assume_no_provenance \ + -disable_overflow_check \ + -allow_dead_code \ + -c \ + "$PP_TASK_C" \ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/run-vfide.sh b/Test/VeriFast/tasks/vTaskSwitchContext/run-vfide.sh new file mode 100755 index 00000000000..cd57e7be673 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/run-vfide.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +# This script runs the preprocesses the annotated 'tasks.c' file +# and loads the result into the VeriFast IDE. +# +# This script expects the following arguments: +# $1 : Absolute path to the base directory of this repository. +# $2 : Absolute path to the VeriFast installation directory. +# $3 (Optional) : Font size + + + +# Checking validity of command line arguments. +HELP="false" +if [ $1 == "-h" ] || [ $1 == "--help" ]; then + HELP="true" +else + if [[ $# < 2 || $# > 3 ]] ; then + echo Wrong number of arguments. Found $#, expected 2 or 3. + HELP="true" + fi + + if [ ! -d "$1" ]; then + echo "Directory (\$1) '$1' does not exist." + HELP="true" + fi + + if [ ! -d "$2" ]; then + echo "Directory (\$2) '$2' does not exist." + HELP="true" + fi + + if ! [[ "$3" =~ ^[1-9]*$ ]] ; then + echo "Argument (\$3) '$3' is not a number." + HELP="true" + fi +fi + +if [ "$HELP" != "false" ]; then + echo Expected call of the form + echo "run-vfide.sh []" + echo "where:" + echo "- is the absolute path to the base directory of this repository" + echo "- is the absolute path to the VeriFast installation directory" + echo "- is an optional argument specifying the font size" + exit +fi + + +# Relative or absolute path to the directory this script and `paths.sh` reside in. +PREFIX=`dirname $0` +# Absolute path to the base of this repository. +REPO_BASE_DIR="$1" +# Absolute path the VeriFast installation directory +VF_DIR="$2" + +FONT_SIZE=17 +if [ "$3" != "" ] +then + FONT_SIZE="$3" +fi + +# Load functions used to compute paths. +. "$PREFIX/paths.sh" + + +VF_PROOF_BASE_DIR=`vf_proof_base_dir $REPO_BASE_DIR` + + +PP_SCRIPT_DIR=`pp_script_dir $REPO_BASE_DIR` +PREP="$PP_SCRIPT_DIR/prepare_file_for_VeriFast.sh" +TASK_C=`vf_annotated_tasks_c $REPO_BASE_DIR` +PP_TASK_C=`pp_vf_tasks_c $REPO_BASE_DIR` + +PROOF_SETUP_DIR=`vf_proof_setup_dir $REPO_BASE_DIR` +PROOF_FILES_DIR=`vf_proof_dir $REPO_BASE_DIR` + +PP_ERR_LOG="`pp_log_dir $REPO_BASE_DIR`/preprocessing_errors.txt" + + + + +ensure_output_dirs_exist $REPO_BASE_DIR + +"$PREP" "$TASK_C" "$PP_TASK_C" "$PP_ERR_LOG" \ + "$REPO_BASE_DIR" "$VF_PROOF_BASE_DIR" "$VF_DIR" + +# Remarks: +# - Recently, provenance checks have been added to VF that break old proofs +# involving pointer comparisons. The flag `-assume_no_provenance` turns them +# off. + +"$VF_DIR/bin/vfide" "$PP_TASK_C" \ + -I $PROOF_SETUP_DIR \ + -I $PROOF_FILES_DIR \ + -assume_no_provenance \ + -disable_overflow_check \ + "$PP_TASK_C" \ + -codeFont "$FONT_SIZE" -traceFont "$FONT_SIZE" \ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/sdks/pico-sdk b/Test/VeriFast/tasks/vTaskSwitchContext/sdks/pico-sdk new file mode 160000 index 00000000000..780173e22f1 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/sdks/pico-sdk @@ -0,0 +1 @@ +Subproject commit 780173e22f197ec7481001f45a5daa1ae5d1788a diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/src/list.c b/Test/VeriFast/tasks/vTaskSwitchContext/src/list.c new file mode 100644 index 00000000000..5532d8e59e6 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/src/list.c @@ -0,0 +1,1081 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + + +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +#include "FreeRTOS.h" +#include "list.h" + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified + * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be + * defined for the header files above, but not in this file, in order to + * generate the correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/*----------------------------------------------------------- +* PUBLIC LIST API documented in list.h +*----------------------------------------------------------*/ + +void vListInitialise( List_t * const pxList ) +{ + /* The list structure contains a list item which is used to mark the + * end of the list. To initialise the list the list end is inserted + * as the only list entry. */ + pxList->pxIndex = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + /* The list end value is the highest possible value in the list to + * ensure it remains at the end of the list. */ + pxList->xListEnd.xItemValue = portMAX_DELAY; + + /* The list end next and previous pointers point to itself so we know + * when the list is empty. */ + pxList->xListEnd.pxNext = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + pxList->xListEnd.pxPrevious = ( ListItem_t * ) &( pxList->xListEnd ); /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. */ + + pxList->uxNumberOfItems = ( UBaseType_t ) 0U; + + /* Write known values into the list if + * configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_LIST_INTEGRITY_CHECK_1_VALUE( pxList ); + listSET_LIST_INTEGRITY_CHECK_2_VALUE( pxList ); +} +/*-----------------------------------------------------------*/ + +void vListInitialiseItem( ListItem_t * const pxItem ) +//@ requires pxItem->pxContainer |-> _; +//@ ensures pxItem->pxContainer |-> 0; +{ + /* Make sure the list item is not recorded as being on a list. */ + pxItem->pxContainer = NULL; + + /* Write known values into the list item if + * configUSE_LIST_DATA_INTEGRITY_CHECK_BYTES is set to 1. */ + listSET_FIRST_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); + listSET_SECOND_LIST_ITEM_INTEGRITY_CHECK_VALUE( pxItem ); +} +/*-----------------------------------------------------------*/ + +void vListInsertEnd( List_t * const pxList, + ListItem_t * const pxNewListItem ) +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * Predicates `xLIST_ITEM`, `DLS` and `xLIST` have been extended to expose + * node owners. Proofs using these predicates must be adapted as well. + */ + + // TODO: Adapt contract and proof to new version of predicates. + + /*@requires xLIST(pxList, ?len, ?idx, ?end, ?cells, ?vals, ?owners) &*& + xLIST_ITEM(pxNewListItem, ?val, _, _, ?ow, _) &*& + len < INT_MAX;@*/ + /*@ensures xLIST(pxList, len+1, idx, end, ?new_cells, ?new_vals, ?new_owners) &*& + idx == end + ? (new_cells == append(cells, singleton(pxNewListItem)) &*& + new_vals == append(vals, singleton(val)) &*& + new_owners == append(owners, singleton(ow))) + : (new_cells == append(take(index_of(idx, cells), cells), append(singleton(pxNewListItem), drop(index_of(idx, cells), cells))) &*& + new_vals == append(take(index_of(idx, cells), vals), append(singleton(val), drop(index_of(idx, cells), vals))) &*& + new_owners == append(take(index_of(idx, cells), owners), append(singleton(ow), drop(index_of(idx, cells), owners))));@*/ + { + /*@xLIST_star_item(pxList, pxNewListItem);@*/ + /*@assert mem(pxNewListItem, cells) == false;@*/ + /*@open xLIST(pxList, len, idx, end, cells, vals, owners);@*/ + #ifdef VERIFAST /*< const pointer declaration */ + ListItem_t * pxIndex = pxList->pxIndex; + #else + ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + * the list data structures being overwritten in memory. They will not catch + * data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + #endif + + /*@open xLIST_ITEM(pxNewListItem, _, _, _, _, _);@*/ + /*@assert DLS(end, ?endprev, end, _, cells, vals, owners, pxList);@*/ + /*@dls_first_mem(end, endprev, end, endprev, cells);@*/ + /*@dls_last_mem(end, endprev, end, endprev, cells);@*/ + /*@ + if (end == idx) + { + open DLS(end, endprev, end, endprev, cells, vals, owners, pxList); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, head(owners), pxList); + if (end == endprev) + { + // Case A (singleton): idx==end==endprev + } + else + { + assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), tail(owners), pxList); + if (endnext == endprev) + { + // Case B (two): idx==end and endnext==endprev + open DLS(endnext, end, end, endnext, _, _, _, _); + open xLIST_ITEM(endnext, _, _, _, _, _); + } + else + { + // Case C: idx==end and DLS:endnext...endprev + split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells))); + open DLS(endprev, _, _, _, _, _, _, _); + open xLIST_ITEM(endprev, _, _, _, _, _); + } + } + } + else + { + int i = index_of(idx, cells); + split(end, endprev, end, endprev, cells, vals, idx, i); + assert DLS(end, endprev, idx, ?idxprev, take(i, cells), take(i, vals), take(i, owners), pxList); + assert DLS(idx, idxprev, end, endprev, drop(i, cells), drop(i, vals), drop(i, owners), pxList); + open DLS(idx, idxprev, end, endprev, _, _, _, _); + open xLIST_ITEM(idx, _, _, _, _, _); + if (end == idxprev) + { + // Case D: end==idxprev and DLS:idx...endprev + take_take(1, i, vals); + take_head(vals); + open DLS(end, endprev, idx, idxprev, take(i, cells), take(i, vals), take(i, owners), pxList); + open xLIST_ITEM(end, portMAX_DELAY, _, _, _, _); + assert length(take(i, cells)) == 1; + } + else + { + // Case E: DLS:end...idxprev and DLS:idx...endprev + dls_last_mem(end, endprev, idx, idxprev, take(i, cells)); + split(end, endprev, idx, idxprev, take(i, cells), take(i, vals), idxprev, index_of(idxprev, take(i, cells))); + open DLS(idxprev, _, _, idxprev, _, _, _, _); + length_take(i, cells); + drop_take_singleton(i, vals); + drop_take_singleton(i, owners); + open xLIST_ITEM(idxprev, nth(i-1, vals), _, _, _, _); + } + } + @*/ + + /* Insert a new list item into pxList, but rather than sort the list, + * makes the new list item the last item to be removed by a call to + * listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; + + /*@ + if (end == idx) + { + close xLIST_ITEM(pxNewListItem, val, end, endprev, ow, pxList); + close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), singleton(ow), pxList); + close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, head(owners), pxList); + if (end == endprev) + { + // Case A (singleton): idx==end==endprev + close DLS(end, pxNewListItem, endnext, end, cells, vals, owners, pxList); + join(end, pxNewListItem, endnext, end, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)), append(owners, singleton(ow))); + } + else + { + close xLIST_ITEM(endprev, ?endprevval, pxNewListItem, ?endprevprev, ?endprevowner, _); + if (endnext == endprev) + { + // Case B (two): idx==end and endnext==endprev + close DLS(endprev, end, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), singleton(endprevowner), pxList); + close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, owners, pxList); + join(end, pxNewListItem, pxNewListItem, endprev, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)), append(owners, singleton(ow))); + } + else + { + // Case C: idx==end and DLS:endnext...endprev + close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), singleton(endprevowner), pxList); + assert DLS(endnext, end, endprev, endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, _, pxList); + join(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev, vals_endnext_to_endprevprev, + endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval)); + close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, owners, pxList); + join(end, pxNewListItem, pxNewListItem, endprev, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val)), append(owners, singleton(ow))); + } + } + } + else + { + // Case D: end==idxprev and DLS:idx...endprev + // Case E: DLS:end...idxprev and DLS:idx...endprev + int i = index_of(idx, cells); + close xLIST_ITEM(pxNewListItem, val, idx, ?idxprev, ow, pxList); + close xLIST_ITEM(idx, ?idxval, ?idxnext, pxNewListItem, ?idxowner, pxList); + nth_drop2(vals, i); + assert idxval == nth(i, vals); + nth_drop2(owners, i); + assert idxowner == nth(i, owners); + close xLIST_ITEM(idxprev, ?idxprevval, pxNewListItem, ?idxprevprev, ?idxprevowner, pxList); + + if (end == idxprev) + { + close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), singleton(head(owners)), pxList); + } + else + { + length_take(i, cells); + take_take(i-1, i, vals); + take_singleton(i-1, vals); + take_singleton(i, vals); + take_take(i-1, i, owners); + take_singleton(i-1, owners); + take_singleton(i, owners); + assert DLS(end, endprev, idxprev, idxprevprev, ?cells_end_to_idxprevprev, take(i-1, vals), take(i-1, owners), pxList); + close DLS(idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval), singleton(idxprevowner), pxList); + join(end, endprev, idxprev, idxprevprev, cells_end_to_idxprevprev, take(i-1, vals), + idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval)); + } + + if (idx == endprev) + { + close DLS(idx, pxNewListItem, end, idx, singleton(idx), singleton(idxval), singleton(idxowner), pxList); + } + else + { + assert DLS(end, endprev, pxNewListItem, idxprev, ?cells_end_to_idxprev, ?vals_end_to_idxprev, _, pxList); + close DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), drop(i, owners), pxList); + } + + assert DLS(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), take(i, owners), pxList); + assert DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), drop(i, owners), pxList); + assert xLIST_ITEM(pxNewListItem, val, idx, idxprev, ow, pxList); + dls_star_item(idx, endprev, pxNewListItem); + close DLS(pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)), cons(ow, drop(i, owners)), pxList); + join(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), + pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals))); + assert DLS(end, endprev, end, endprev, ?cells_new, ?vals_new, ?owners_new, pxList); + assert cells_new == append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells))); + assert vals_new == append(take(i, vals) , append(singleton(val), drop(i, vals))); + assert owners_new == append(take(i, owners) , append(singleton(ow), drop(i, owners))); + head_append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells))); + take_take(1, i, cells); + head_append(take(i, vals), append(singleton(val), drop(i, vals))); + take_take(1, i, vals); + close xLIST(pxList, len+1, idx, end, cells_new, vals_new, owners_new); + } + @*/ + } +#else + /* The contract and proof below have been wirtten by Aalok Thakkar and Nathan + * Chong in 2020 for the single-core setup. + */ + /*@requires xLIST(pxList, ?len, ?idx, ?end, ?cells, ?vals) &*& + xLIST_ITEM(pxNewListItem, ?val, _, _, _);@*/ + /*@ensures xLIST(pxList, len+1, idx, end, ?new_cells, ?new_vals) &*& + idx == end + ? (new_cells == append(cells, singleton(pxNewListItem)) &*& + new_vals == append(vals, singleton(val))) + : (new_cells == append(take(index_of(idx, cells), cells), append(singleton(pxNewListItem), drop(index_of(idx, cells), cells))) &*& + new_vals == append(take(index_of(idx, cells), vals), append(singleton(val), drop(index_of(idx, cells), vals))));@*/ + { + /*@xLIST_star_item(pxList, pxNewListItem);@*/ + /*@assert mem(pxNewListItem, cells) == false;@*/ + /*@open xLIST(pxList, len, idx, end, cells, vals);@*/ + #ifdef VERIFAST /*< const pointer declaration */ + ListItem_t * pxIndex = pxList->pxIndex; + #else + ListItem_t * const pxIndex = pxList->pxIndex; + + /* Only effective when configASSERT() is also defined, these tests may catch + * the list data structures being overwritten in memory. They will not catch + * data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + #endif + + /*@open xLIST_ITEM(pxNewListItem, _, _, _, _);@*/ + /*@assert DLS(end, ?endprev, end, _, cells, vals, pxList);@*/ + /*@dls_first_mem(end, endprev, end, endprev, cells);@*/ + /*@dls_last_mem(end, endprev, end, endprev, cells);@*/ + /*@ + if (end == idx) + { + open DLS(end, endprev, end, endprev, cells, vals, pxList); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, pxList); + if (end == endprev) + { + // Case A (singleton): idx==end==endprev + } + else + { + assert DLS(endnext, end, end, endprev, tail(cells), tail(vals), pxList); + if (endnext == endprev) + { + // Case B (two): idx==end and endnext==endprev + open DLS(endnext, end, end, endnext, _, _, _); + open xLIST_ITEM(endnext, _, _, _, _); + } + else + { + // Case C: idx==end and DLS:endnext...endprev + split(endnext, end, end, endprev, tail(cells), tail(vals), endprev, index_of(endprev, tail(cells))); + open DLS(endprev, _, _, _, _, _, _); + open xLIST_ITEM(endprev, _, _, _, _); + } + } + } + else + { + int i = index_of(idx, cells); + split(end, endprev, end, endprev, cells, vals, idx, i); + assert DLS(end, endprev, idx, ?idxprev, take(i, cells), take(i, vals), pxList); + assert DLS(idx, idxprev, end, endprev, drop(i, cells), drop(i, vals), pxList); + open DLS(idx, idxprev, end, endprev, _, _, _); + open xLIST_ITEM(idx, _, _, _, _); + if (end == idxprev) + { + // Case D: end==idxprev and DLS:idx...endprev + take_take(1, i, vals); + take_head(vals); + open DLS(end, endprev, idx, idxprev, take(i, cells), take(i, vals), pxList); + open xLIST_ITEM(end, portMAX_DELAY, _, _, _); + assert length(take(i, cells)) == 1; + } + else + { + // Case E: DLS:end...idxprev and DLS:idx...endprev + dls_last_mem(end, endprev, idx, idxprev, take(i, cells)); + split(end, endprev, idx, idxprev, take(i, cells), take(i, vals), idxprev, index_of(idxprev, take(i, cells))); + open DLS(idxprev, _, _, idxprev, _, _, _); + length_take(i, cells); + drop_take_singleton(i, vals); + open xLIST_ITEM(idxprev, nth(i-1, vals), _, _, _); + } + } + @*/ + + /* Insert a new list item into pxList, but rather than sort the list, + * makes the new list item the last item to be removed by a call to + * listGET_OWNER_OF_NEXT_ENTRY(). */ + pxNewListItem->pxNext = pxIndex; + pxNewListItem->pxPrevious = pxIndex->pxPrevious; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + pxIndex->pxPrevious->pxNext = pxNewListItem; + pxIndex->pxPrevious = pxNewListItem; + + /* Remember which list the item is in. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; + + /*@ + if (end == idx) + { + close xLIST_ITEM(pxNewListItem, val, end, endprev, pxList); + close DLS(pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val), pxList); + close xLIST_ITEM(end, portMAX_DELAY, ?endnext, pxNewListItem, pxList); + if (end == endprev) + { + // Case A (singleton): idx==end==endprev + close DLS(end, pxNewListItem, endnext, end, cells, vals, pxList); + join(end, pxNewListItem, endnext, end, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val))); + } + else + { + close xLIST_ITEM(endprev, ?endprevval, pxNewListItem, ?endprevprev, _); + if (endnext == endprev) + { + // Case B (two): idx==end and endnext==endprev + close DLS(endprev, end, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList); + close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList); + join(end, pxNewListItem, pxNewListItem, endprev, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val))); + } + else + { + // Case C: idx==end and DLS:endnext...endprev + close DLS(endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval), pxList); + assert DLS(endnext, end, endprev, endprevprev, ?cells_endnext_to_endprevprev, ?vals_endnext_to_endprevprev, pxList); + join(endnext, end, endprev, endprevprev, cells_endnext_to_endprevprev, vals_endnext_to_endprevprev, + endprev, endprevprev, pxNewListItem, endprev, singleton(endprev), singleton(endprevval)); + close DLS(end, pxNewListItem, pxNewListItem, endprev, cells, vals, pxList); + join(end, pxNewListItem, pxNewListItem, endprev, cells, vals, + pxNewListItem, endprev, end, pxNewListItem, singleton(pxNewListItem), singleton(val)); + close xLIST(pxList, len+1, idx, end, append(cells, singleton(pxNewListItem)), append(vals, singleton(val))); + } + } + } + else + { + // Case D: end==idxprev and DLS:idx...endprev + // Case E: DLS:end...idxprev and DLS:idx...endprev + int i = index_of(idx, cells); + close xLIST_ITEM(pxNewListItem, val, idx, ?idxprev, pxList); + close xLIST_ITEM(idx, ?idxval, ?idxnext, pxNewListItem, pxList); + nth_drop2(vals, i); + assert idxval == nth(i, vals); + close xLIST_ITEM(idxprev, ?idxprevval, pxNewListItem, ?idxprevprev, pxList); + + if (end == idxprev) + { + close DLS(end, endprev, pxNewListItem, end, singleton(end), singleton(portMAX_DELAY), pxList); + } + else + { + length_take(i, cells); + take_take(i-1, i, vals); + take_singleton(i-1, vals); + take_singleton(i, vals); + assert DLS(end, endprev, idxprev, idxprevprev, ?cells_end_to_idxprevprev, take(i-1, vals), pxList); + close DLS(idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval), pxList); + join(end, endprev, idxprev, idxprevprev, cells_end_to_idxprevprev, take(i-1, vals), + idxprev, idxprevprev, pxNewListItem, idxprev, singleton(idxprev), singleton(idxprevval)); + } + + if (idx == endprev) + { + close DLS(idx, pxNewListItem, end, idx, singleton(idx), singleton(idxval), pxList); + } + else + { + assert DLS(end, endprev, pxNewListItem, idxprev, ?cells_end_to_idxprev, ?vals_end_to_idxprev, pxList); + close DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList); + } + + assert DLS(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), pxList); + assert DLS(idx, pxNewListItem, end, endprev, drop(i, cells), drop(i, vals), pxList); + assert xLIST_ITEM(pxNewListItem, val, idx, idxprev, pxList); + dls_star_item(idx, endprev, pxNewListItem); + close DLS(pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals)), pxList); + join(end, endprev, pxNewListItem, idxprev, take(i, cells), take(i, vals), + pxNewListItem, idxprev, end, endprev, cons(pxNewListItem, drop(i, cells)), cons(val, drop(i, vals))); + assert DLS(end, endprev, end, endprev, ?cells_new, ?vals_new, pxList); + assert cells_new == append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells))); + assert vals_new == append(take(i, vals) , append(singleton(val), drop(i, vals))); + head_append(take(i, cells), append(singleton(pxNewListItem), drop(i, cells))); + take_take(1, i, cells); + head_append(take(i, vals), append(singleton(val), drop(i, vals))); + take_take(1, i, vals); + close xLIST(pxList, len+1, idx, end, cells_new, vals_new); + } + @*/ + } +#endif /* VERIFAST_SINGLE_CORE */ +/*-----------------------------------------------------------*/ + +void vListInsert( List_t * const pxList, + ListItem_t * const pxNewListItem ) +{ + ListItem_t * pxIterator; + const TickType_t xValueOfInsertion = pxNewListItem->xItemValue; + + /* Only effective when configASSERT() is also defined, these tests may catch + * the list data structures being overwritten in memory. They will not catch + * data errors caused by incorrect configuration or use of FreeRTOS. */ + listTEST_LIST_INTEGRITY( pxList ); + listTEST_LIST_ITEM_INTEGRITY( pxNewListItem ); + + /* Insert the new list item into the list, sorted in xItemValue order. + * + * If the list already contains a list item with the same item value then the + * new list item should be placed after it. This ensures that TCBs which are + * stored in ready lists (all of which have the same xItemValue value) get a + * share of the CPU. However, if the xItemValue is the same as the back marker + * the iteration loop below will not end. Therefore the value is checked + * first, and the algorithm slightly modified if necessary. */ + if( xValueOfInsertion == portMAX_DELAY ) + { + pxIterator = pxList->xListEnd.pxPrevious; + } + else + { + /* *** NOTE *********************************************************** + * If you find your application is crashing here then likely causes are + * listed below. In addition see https://www.FreeRTOS.org/FAQHelp.html for + * more tips, and ensure configASSERT() is defined! + * https://www.FreeRTOS.org/a00110.html#configASSERT + * + * 1) Stack overflow - + * see https://www.FreeRTOS.org/Stacks-and-stack-overflow-checking.html + * 2) Incorrect interrupt priority assignment, especially on Cortex-M + * parts where numerically high priority values denote low actual + * interrupt priorities, which can seem counter intuitive. See + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html and the definition + * of configMAX_SYSCALL_INTERRUPT_PRIORITY on + * https://www.FreeRTOS.org/a00110.html + * 3) Calling an API function from within a critical section or when + * the scheduler is suspended, or calling an API function that does + * not end in "FromISR" from an interrupt. + * 4) Using a queue or semaphore before it has been initialised or + * before the scheduler has been started (are interrupts firing + * before vTaskStartScheduler() has been called?). + * 5) If the FreeRTOS port supports interrupt nesting then ensure that + * the priority of the tick interrupt is at or below + * configMAX_SYSCALL_INTERRUPT_PRIORITY. + **********************************************************************/ + + for( pxIterator = ( ListItem_t * ) &( pxList->xListEnd ); pxIterator->pxNext->xItemValue <= xValueOfInsertion; pxIterator = pxIterator->pxNext ) /*lint !e826 !e740 !e9087 The mini list structure is used as the list end to save RAM. This is checked and valid. *//*lint !e440 The iterator moves to a different value, not xValueOfInsertion. */ + { + /* There is nothing to do here, just iterating to the wanted + * insertion position. */ + } + } + + pxNewListItem->pxNext = pxIterator->pxNext; + pxNewListItem->pxNext->pxPrevious = pxNewListItem; + pxNewListItem->pxPrevious = pxIterator; + pxIterator->pxNext = pxNewListItem; + + /* Remember which list the item is in. This allows fast removal of the + * item later. */ + pxNewListItem->pxContainer = pxList; + + ( pxList->uxNumberOfItems )++; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxListRemove( ListItem_t * const pxItemToRemove ) +#ifndef VERIFAST_SINGLE_CORE + /* Reason for rewrite: + * Predicates `xLIST_ITEM`, `DLS` and `xLIST` have been extended to expose + * node owners. Proofs using these predicates must be adapted as well. + */ + + /*@requires + exists(?l) &*& + xLIST(l, ?len, ?idx, ?end, ?cells, ?vals, ?owners) &*& + end != pxItemToRemove &*& + mem(pxItemToRemove, cells) == true;@*/ + /*@ensures + result == len-1 &*& + xLIST_ITEM(pxItemToRemove, nth(index_of(pxItemToRemove, cells), vals), _, ?pxItemToRemovePrevious, nth(index_of(pxItemToRemove, cells), owners), NULL) &*& + pxItemToRemovePrevious == nth(index_of(pxItemToRemove, cells)-1, cells) &*& + xLIST(l, result, idx == pxItemToRemove ? pxItemToRemovePrevious : idx, end, remove(pxItemToRemove, cells), remove_nth(index_of(pxItemToRemove, cells), vals), remove_nth(index_of(pxItemToRemove, cells), owners)); + @*/ + { + /* For brevity we alias x to pxItemToRemove */ + /*@struct xLIST_ITEM *x = pxItemToRemove;@*/ + + /* Start by establishing that the list must be non-empty since x != end */ + /*@open xLIST(l, len, idx, end, cells, vals, owners);@*/ + /*@assert DLS(end, ?endprev, end, _, cells, vals, owners, l);@*/ + /*@assert vals == cons(portMAX_DELAY, _);@*/ + /*@dls_not_empty(end, endprev, cells, x);@*/ + + /* We know the xLIST is a DLS: end...endprev + Split this into DLS1:end...xprev and DLS2:x...endprev */ + /*@int i = index_of(x, cells);@*/ + /*@split(end, endprev, end, endprev, cells, vals, x, i);@*/ + /*@list ys = take(i, cells);@*/ + /*@list zs = drop(i, cells);@*/ + /*@list vs = take(i, vals);@*/ + /*@list ws = drop(i, vals);@*/ + /*@list ts = take(i, owners);@*/ + /*@list us = drop(i, owners);@*/ + /*@assert length(ys) == length(vs);@*/ + /*@assert length(zs) == length(ws);@*/ + /*@assert length(ts) == length(vs);@*/ + /*@assert length(us) == length(ws);@*/ + /*@assert DLS(end, endprev, x, ?xprev, ys, vs, ts, l);@*/ /*< DLS1 (ys, vs) */ + /*@assert DLS(x, xprev, end, endprev, zs, ws, us, l);@*/ /*< DLS2 (zs, ws) */ + + /* Now case split to open DLS1 and DLS2 appropriately */ + /*@ + if (end == xprev) + { + if (x == endprev) + { + //Case A + //DLS1: extract end=prev=next + open DLS(end, endprev, x, xprev, ys, vs, ts, l); + assert owners == cons(_, _); + open xLIST_ITEM(end, portMAX_DELAY, x, endprev, head(owners), l); + //DLS2: extract x + open DLS(x, xprev, end, endprev, zs, ws, us, l); + //Lengths + assert length(ys) == 1; + assert length(zs) == 1; + assert length(us) == 1; + } + else + { + //Case B + //DLS1: extract end=prev + open DLS(end, endprev, x, xprev, ys, vs, ts, l); + open xLIST_ITEM(end, portMAX_DELAY, x, endprev, head(owners), l); + //DLS2: extract next and x + open DLS(x, end, end, endprev, zs, ws, us, l); + assert DLS(?xnext, x, end, endprev, tail(zs), tail(ws), tail(us), l); + open DLS(xnext, x, end, endprev, tail(zs), tail(ws), tail(us), l); + open xLIST_ITEM(xnext, _, _, x, _, l); + //Lengths + assert length(ys) == 1; + } + } + else + { + if (x == endprev) + { + //Case C + //DLS1: extract end=next and prev + dls_last_mem(end, endprev, x, xprev, ys); + assert mem(xprev, ys) == true; + open DLS(end, endprev, x, xprev, ys, vs, ts, l); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, head(ts), l); + if (endnext == xprev) + { + open DLS(endnext, end, x, xprev, tail(ys), tail(vs), tail(ts), l); + open xLIST_ITEM(xprev, _, x, _, _, l); + } + else + { + assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), tail(ts), l); + int k = index_of(xprev, tail(ys)); + dls_last_mem(endnext, end, x, xprev, tail(ys)); + split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k); + open DLS(xprev, _, x, xprev, _, _, _, l); + open xLIST_ITEM(xprev, _, x, _, _, l); + } + //DLS2: extract x + open DLS(x, xprev, end, endprev, zs, ws, us, l); + //Lengths + assert length(zs) == 1; + } + else + { + //Case D + //DLS1: extract prev + dls_last_mem(end, endprev, x, xprev, ys); + int j = index_of(xprev, ys); + open DLS(end, endprev, x, xprev, ys, vs, ts, l); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, head(ts), l); + if (endnext == xprev) + { + open DLS(endnext, end, x, xprev, tail(ys), tail(vs), tail(ts), l); + assert tail(ys) == singleton(xprev); + open xLIST_ITEM(xprev, _, x, _, _, l); + } + else + { + assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), tail(ts), l); + int k = index_of(xprev, tail(ys)); + dls_last_mem(endnext, end, x, xprev, tail(ys)); + split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k); + open DLS(xprev, _, x, xprev, _, _, _, l); + open xLIST_ITEM(xprev, _, x, _, _, l); + } + //DLS2: extract next and x + open DLS(x, xprev, end, endprev, zs, ws, us, l); + assert xLIST_ITEM(x, _, ?xnext, _, _, l); + open DLS(xnext, x, end, endprev, tail(zs), tail(ws), tail(us), l); + open xLIST_ITEM(xnext, _, _, x, _, l); + } + } + @*/ + /*@drop_nth_index_of(vals, i);@*/ + /*@drop_nth_index_of(owners, i);@*/ + /*@open xLIST_ITEM(x, nth(i, vals), ?xnext, xprev, nth(i, owners), l);@*/ + +/* The list item knows which list it is in. Obtain the list from the list + * item. */ +#ifdef VERIFAST /*< const pointer declaration */ + List_t * pxList = pxItemToRemove->pxContainer; +#else + List_t * const pxList = pxItemToRemove->pxContainer; +#endif + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pxContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; + + /*@ + // Reassemble DLS1 and a modified DLS2, which no longer includes x + if (end == xprev) + { + if (x == endprev) + { + //Case A + close xLIST_ITEM(end, portMAX_DELAY, _, _, _, _); + close DLS(end, end, end, end, singleton(end), singleton(portMAX_DELAY), singleton(head(owners)), l); + } + else + { + //Case B + close xLIST_ITEM(xprev, _, xnext, endprev, head(owners), l); + close DLS(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), singleton(head(owners)), l); + close xLIST_ITEM(xnext, _, _, xprev, _, l); + close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), tail(us), l); + join(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + } + else + { + if (x == endprev) + { + //Case C + close xLIST_ITEM(end, _, ?endnext, xprev, head(ts), l); + close xLIST_ITEM(xprev, ?xprev_val, end, _, ?xprev_owner, l); + if (endnext == xprev) + { + close DLS(xprev, end, end, xprev, singleton(xprev), singleton(xprev_val), singleton(xprev_owner), l); + close DLS(end, xprev, end, xprev, cons(end, singleton(xprev)), cons(portMAX_DELAY, singleton(xprev_val)), cons(head(ts), singleton(xprev_owner)), l); + } + else + { + close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), singleton(xprev_owner), l); + assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, _, l); + join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev, + xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val)); + close DLS(end, xprev, end, xprev, ys, vs, ts, l); + } + } + else + { + //Case D + close xLIST_ITEM(xnext, _, ?xnextnext, xprev, ?xnext_owner, l); + close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), tail(us), l); + close xLIST_ITEM(end, _, ?endnext, endprev, head(ts), l); + close xLIST_ITEM(xprev, ?xprev_val, xnext, _, ?xprev_owner, l); + if (endnext == xprev) + { + close DLS(xprev, _, xnext, xprev, singleton(xprev), singleton(xprev_val), singleton(xprev_owner), l); + close DLS(end, endprev, xnext, xprev, ys, vs, ts, l); + join(end, endprev, xnext, xprev, ys, vs, + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + else + { + close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), singleton(xprev_owner), l); + assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, _, l); + join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev, + xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val)); + close DLS(end, endprev, xnext, xprev, ys, vs, ts, l); + join(end, endprev, xnext, xprev, ys, vs, + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + } + } + @*/ + /*@remove_remove_nth(cells, x);@*/ + /*@ + if (idx == x) + { + close xLIST(l, len-1, xprev, end, append(ys, tail(zs)), append(vs, tail(ws)), append(ts, tail(us))); + } + else + { + idx_remains_in_list(cells, idx, x, i); + close xLIST(l, len-1, idx, end, append(ys, tail(zs)), append(vs, tail(ws)), append(ts, tail(us))); + } + @*/ + /*@close xLIST_ITEM(x, nth(i, vals), xnext, xprev, nth(i, owners), NULL);@*/ +} +#else + // Contract and proof written by Aalok Thakkar and Nathan Chong for the + // single-core setup in 2020. + + /*@requires + exists(?l) &*& + xLIST(l, ?len, ?idx, ?end, ?cells, ?vals) &*& + end != pxItemToRemove &*& + mem(pxItemToRemove, cells) == true;@*/ + /*@ensures + result == len-1 &*& + xLIST_ITEM(pxItemToRemove, nth(index_of(pxItemToRemove, cells), vals), _, ?pxItemToRemovePrevious, NULL) &*& + pxItemToRemovePrevious == nth(index_of(pxItemToRemove, cells)-1, cells) &*& + xLIST(l, result, idx == pxItemToRemove ? pxItemToRemovePrevious : idx, end, remove(pxItemToRemove, cells), remove_nth(index_of(pxItemToRemove, cells), vals));@*/ + { + /* For brevity we alias x to pxItemToRemove */ + /*@struct xLIST_ITEM *x = pxItemToRemove;@*/ + + /* Start by establishing that the list must be non-empty since x != end */ + /*@open xLIST(l, len, idx, end, cells, vals);@*/ + /*@assert DLS(end, ?endprev, end, _, cells, vals, l);@*/ + /*@assert vals == cons(portMAX_DELAY, _);@*/ + /*@dls_not_empty(end, endprev, cells, x);@*/ + + /* We know the xLIST is a DLS: end...endprev + Split this into DLS1:end...xprev and DLS2:x...endprev */ + /*@int i = index_of(x, cells);@*/ + /*@split(end, endprev, end, endprev, cells, vals, x, i);@*/ + /*@list ys = take(i, cells);@*/ + /*@list zs = drop(i, cells);@*/ + /*@list vs = take(i, vals);@*/ + /*@list ws = drop(i, vals);@*/ + /*@assert length(ys) == length(vs);@*/ + /*@assert length(zs) == length(ws);@*/ + /*@assert DLS(end, endprev, x, ?xprev, ys, vs, l);@*/ /*< DLS1 (ys, vs) */ + /*@assert DLS(x, xprev, end, endprev, zs, ws, l);@*/ /*< DLS2 (zs, ws) */ + + /* Now case split to open DLS1 and DLS2 appropriately */ + /*@ + if (end == xprev) + { + if (x == endprev) + { + //Case A + //DLS1: extract end=prev=next + open DLS(end, endprev, x, xprev, ys, vs, l); + open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l); + //DLS2: extract x + open DLS(x, xprev, end, endprev, zs, ws, l); + //Lengths + assert length(ys) == 1; + assert length(zs) == 1; + } + else + { + //Case B + //DLS1: extract end=prev + open DLS(end, endprev, x, xprev, ys, vs, l); + open xLIST_ITEM(end, portMAX_DELAY, x, endprev, l); + //DLS2: extract next and x + open DLS(x, end, end, endprev, zs, ws, l); + assert DLS(?xnext, x, end, endprev, tail(zs), tail(ws), l); + open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l); + open xLIST_ITEM(xnext, _, _, x, l); + //Lengths + assert length(ys) == 1; + } + } + else + { + if (x == endprev) + { + //Case C + //DLS1: extract end=next and prev + dls_last_mem(end, endprev, x, xprev, ys); + assert mem(xprev, ys) == true; + open DLS(end, endprev, x, xprev, ys, vs, l); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l); + if (endnext == xprev) + { + open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l); + open xLIST_ITEM(xprev, _, x, _, l); + } + else + { + assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l); + int k = index_of(xprev, tail(ys)); + dls_last_mem(endnext, end, x, xprev, tail(ys)); + split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k); + open DLS(xprev, _, x, xprev, _, _, l); + open xLIST_ITEM(xprev, _, x, _, l); + } + //DLS2: extract x + open DLS(x, xprev, end, endprev, zs, ws, l); + //Lengths + assert length(zs) == 1; + } + else + { + //Case D + //DLS1: extract prev + dls_last_mem(end, endprev, x, xprev, ys); + int j = index_of(xprev, ys); + open DLS(end, endprev, x, xprev, ys, vs, l); + open xLIST_ITEM(end, portMAX_DELAY, ?endnext, endprev, l); + if (endnext == xprev) + { + open DLS(endnext, end, x, xprev, tail(ys), tail(vs), l); + assert tail(ys) == singleton(xprev); + open xLIST_ITEM(xprev, _, x, _, l); + } + else + { + assert DLS(endnext, end, x, xprev, tail(ys), tail(vs), l); + int k = index_of(xprev, tail(ys)); + dls_last_mem(endnext, end, x, xprev, tail(ys)); + split(endnext, end, x, xprev, tail(ys), tail(vs), xprev, k); + open DLS(xprev, _, x, xprev, _, _, l); + open xLIST_ITEM(xprev, _, x, _, l); + } + //DLS2: extract next and x + open DLS(x, xprev, end, endprev, zs, ws, l); + assert xLIST_ITEM(x, _, ?xnext, _, l); + open DLS(xnext, x, end, endprev, tail(zs), tail(ws), l); + open xLIST_ITEM(xnext, _, _, x, l); + } + } + @*/ + /*@drop_nth_index_of(vals, i);@*/ + /*@open xLIST_ITEM(x, nth(i, vals), ?xnext, xprev, l);@*/ + + /* The list item knows which list it is in. Obtain the list from the list + * item. */ + #ifdef VERIFAST /*< const pointer declaration */ + List_t * pxList = pxItemToRemove->pxContainer; + #else + List_t * const pxList = pxItemToRemove->pxContainer; + #endif + + pxItemToRemove->pxNext->pxPrevious = pxItemToRemove->pxPrevious; + pxItemToRemove->pxPrevious->pxNext = pxItemToRemove->pxNext; + + /* Only used during decision coverage testing. */ + mtCOVERAGE_TEST_DELAY(); + + /* Make sure the index is left pointing to a valid item. */ + if( pxList->pxIndex == pxItemToRemove ) + { + pxList->pxIndex = pxItemToRemove->pxPrevious; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxItemToRemove->pxContainer = NULL; + ( pxList->uxNumberOfItems )--; + + return pxList->uxNumberOfItems; + + /*@ + // Reassemble DLS1 and a modified DLS2, which no longer includes x + if (end == xprev) + { + if (x == endprev) + { + //Case A + close xLIST_ITEM(end, portMAX_DELAY, _, _, _); + close DLS(end, end, end, end, singleton(end), singleton(portMAX_DELAY), l); + } + else + { + //Case B + close xLIST_ITEM(xprev, _, xnext, endprev, l); + close DLS(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), l); + close xLIST_ITEM(xnext, _, _, xprev, l); + close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l); + join(end, endprev, xnext, xprev, singleton(end), singleton(portMAX_DELAY), + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + } + else + { + if (x == endprev) + { + //Case C + close xLIST_ITEM(end, _, ?endnext, xprev, l); + close xLIST_ITEM(xprev, ?xprev_val, end, _, l); + if (endnext == xprev) + { + close DLS(xprev, end, end, xprev, singleton(xprev), singleton(xprev_val), l); + close DLS(end, xprev, end, xprev, cons(end, singleton(xprev)), cons(portMAX_DELAY, singleton(xprev_val)), l); + } + else + { + close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l); + assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l); + join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev, + xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val)); + close DLS(end, xprev, end, xprev, ys, vs, l); + } + } + else + { + //Case D + close xLIST_ITEM(xnext, _, ?xnextnext, xprev, l); + close DLS(xnext, xprev, end, endprev, tail(zs), tail(ws), l); + close xLIST_ITEM(end, _, ?endnext, endprev, l); + close xLIST_ITEM(xprev, ?xprev_val, xnext, _, l); + if (endnext == xprev) + { + close DLS(xprev, _, xnext, xprev, singleton(xprev), singleton(xprev_val), l); + close DLS(end, endprev, xnext, xprev, ys, vs, l); + join(end, endprev, xnext, xprev, ys, vs, + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + else + { + close DLS(xprev, ?xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val), l); + assert DLS(endnext, end, xprev, xprevprev, ?cells_endnext_to_xprevprev, ?vals_endnext_to_xprevprev, l); + join(endnext, end, xprev, xprevprev, cells_endnext_to_xprevprev, vals_endnext_to_xprevprev, + xprev, xprevprev, xnext, xprev, singleton(xprev), singleton(xprev_val)); + close DLS(end, endprev, xnext, xprev, ys, vs, l); + join(end, endprev, xnext, xprev, ys, vs, + xnext, xprev, end, endprev, tail(zs), tail(ws)); + } + } + } + @*/ + /*@remove_remove_nth(cells, x);@*/ + /*@ + if (idx == x) + { + close xLIST(l, len-1, xprev, end, append(ys, tail(zs)), append(vs, tail(ws))); + } + else + { + idx_remains_in_list(cells, idx, x, i); + close xLIST(l, len-1, idx, end, append(ys, tail(zs)), append(vs, tail(ws))); + } + @*/ + /*@close xLIST_ITEM(x, nth(i, vals), xnext, xprev, NULL);@*/ + } + + + +#endif /* VERIFAST_SINGLE_CORE */ + +/*-----------------------------------------------------------*/ diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/src/tasks.c b/Test/VeriFast/tasks/vTaskSwitchContext/src/tasks.c new file mode 100644 index 00000000000..1506b11cea3 --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/src/tasks.c @@ -0,0 +1,6999 @@ +/* + * FreeRTOS SMP Kernel V202110.00 + * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of + * this software and associated documentation files (the "Software"), to deal in + * the Software without restriction, including without limitation the rights to + * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + * the Software, and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * https://www.FreeRTOS.org + * https://github.com/FreeRTOS + * + */ + +#ifdef VERIFAST + /* Ghost header include must occur before any non-ghost includes or other + * non-ghost code. Otherwise VeriFast will report an unspecific parse error. + */ + + //@ #include + //@ #include "list.gh" + //@ #include + + /* The following includes will be visible to VeriFast in the preprocessed + * code. VeriFast requires includes to occur befor definitions. Hence, + * all includes visible to VeriFast must occur before the preprocessed + * ones. + */ + //VF_macro #include "FreeRTOSConfig.h" + + //VF_macro #define NULL 0 +#endif /* VERIFAST */ + + +/* Standard includes. */ +#include +#include + +/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining + * all the API functions to use the MPU wrappers. That should only be done when + * task.h is included from an application file. */ +#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE + +/* FreeRTOS includes. */ +#include "FreeRTOS.h" +#include "task.h" +#include "timers.h" + +#ifndef VERIFAST + /* Reason for rewrite: + * The stack macros rely on macros defined later in this file, e.g., + * `pxCurrentTCB`. We need to delay this inclusion until the task macros + * have been defined. Otherwise, VeriFast will report unknown symbols when + * checking the stack macro proofs. + */ + #include "stack_macros.h" +#endif /* VERIFAST */ + +/* Verifast proof setup + * + * Note that redefinitions of macros must be included after + * original ones have been included. + */ +#ifdef VERIFAST + #include "proof_defs.h" + #include "stack_predicates.h" + #include "task_predicates.h" + #include "ready_list_predicates.h" + #include "asm.h" + #include "port_locking_contracts.h" + #include "lock_predicates.h" + #include "verifast_lists_extended.h" + #include "single_core_proofs/scp_list_predicates.h" + #include "single_core_proofs_extended/scp_list_predicates_extended.h" + + #include "list.c" +#endif + +/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified + * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined + * for the header files above, but not in this file, in order to generate the + * correct privileged Vs unprivileged linkage and placement. */ +#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */ + +/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting + * functions but without including stdio.h here. */ +#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) + +/* At the bottom of this file are two optional functions that can be used + * to generate human readable text from the raw data generated by the + * uxTaskGetSystemState() function. Note the formatting functions are provided + * for convenience only, and are NOT considered part of the kernel. */ + #include +#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */ + +#if ( configUSE_PREEMPTION == 0 ) + +/* If the cooperative scheduler is being used then a yield should not be + * performed just because a higher priority task has been woken. */ + #define taskYIELD_IF_USING_PREEMPTION() +#else + #define taskYIELD_IF_USING_PREEMPTION() vTaskYieldWithinAPI() +#endif + +/* Values that can be assigned to the ucNotifyState member of the TCB. */ +#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */ +#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 ) +#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 ) + +/* + * The value used to fill the stack of a task when the task is created. This + * is used purely for checking the high water mark for tasks. + */ +#define tskSTACK_FILL_BYTE ( 0xa5U ) + +/* Bits used to record how a task's stack and TCB were allocated. */ +#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 ) +#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 ) +#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 ) + +/* If any of the following are set then task stacks are filled with a known + * value so the high water mark can be determined. If none of the following are + * set then don't fill the stack so there is no unnecessary dependency on memset. */ +#if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1 +#else + #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0 +#endif + +/* + * Macros used by vListTask to indicate which state a task is in. + */ +#define tskRUNNING_CHAR ( 'X' ) +#define tskBLOCKED_CHAR ( 'B' ) +#define tskREADY_CHAR ( 'R' ) +#define tskDELETED_CHAR ( 'D' ) +#define tskSUSPENDED_CHAR ( 'S' ) + +/* + * Some kernel aware debuggers require the data the debugger needs access to to + * be global, rather than file scope. + */ +#ifdef portREMOVE_STATIC_QUALIFIER + #define static +#endif + +/* The name allocated to the Idle task. This can be overridden by defining + * configIDLE_TASK_NAME in FreeRTOSConfig.h. */ +#ifndef configIDLE_TASK_NAME + #define configIDLE_TASK_NAME "IDLE" +#endif + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + +/* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is + * performed in a generic way that is not optimised to any particular + * microcontroller architecture. */ + +/* uxTopReadyPriority holds the priority of the highest priority ready + * state task. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) \ + { \ + if( ( uxPriority ) > uxTopReadyPriority ) \ + { \ + uxTopReadyPriority = ( uxPriority ); \ + } \ + } /* taskRECORD_READY_PRIORITY */ + + /*-----------------------------------------------------------*/ + +/* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as + * they are only required when a port optimised method of task selection is + * being used. */ + #define taskRESET_READY_PRIORITY( uxPriority ) + #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + #error configUSE_PORT_OPTIMISED_TASK_SELECTION not yet supported in SMP + +/* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is + * performed in a way that is tailored to the particular microcontroller + * architecture being used. */ + +/* A port optimised version is provided. Call the port defined macros. */ + #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority ) + + /*-----------------------------------------------------------*/ + +/* A port optimised version is provided, call it only if the TCB being reset + * is being referenced from a ready list. If it is referenced from a delayed + * or suspended list then it won't be in a ready list. */ + #define taskRESET_READY_PRIORITY( uxPriority ) \ + { \ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \ + { \ + portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \ + } \ + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + +/*-----------------------------------------------------------*/ + +/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick + * count overflows. */ +#define taskSWITCH_DELAYED_LISTS() \ + { \ + List_t * pxTemp; \ + \ + /* The delayed tasks list should be empty when the lists are switched. */ \ + configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \ + \ + pxTemp = pxDelayedTaskList; \ + pxDelayedTaskList = pxOverflowDelayedTaskList; \ + pxOverflowDelayedTaskList = pxTemp; \ + xNumOfOverflows++; \ + prvResetNextTaskUnblockTime(); \ + } + +/*-----------------------------------------------------------*/ + +/* + * Place the task represented by pxTCB into the appropriate ready list for + * the task. It is inserted at the end of the list. + */ +#define prvAddTaskToReadyList( pxTCB ) \ + traceMOVED_TASK_TO_READY_STATE( pxTCB ); \ + taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \ + vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \ + tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ) +/*-----------------------------------------------------------*/ + +/* + * Several functions take a TaskHandle_t parameter that can optionally be NULL, + * where NULL is used to indicate that the handle of the currently executing + * task should be used in place of the parameter. This macro simply checks to + * see if the parameter is NULL and returns a pointer to the appropriate TCB. + */ +#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) ) + +/* The item value of the event list item is normally used to hold the priority + * of the task to which it belongs (coded to allow it to be held in reverse + * priority order). However, it is occasionally borrowed for other purposes. It + * is important its value is not updated due to a task priority change while it is + * being used for another purpose. The following bit definition is used to inform + * the scheduler that the value should not be changed - in which case it is the + * responsibility of whichever module is using the value to ensure it gets set back + * to its original value when it is released. */ +#if ( configUSE_16_BIT_TICKS == 1 ) + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U +#else + #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL +#endif + +/* Indicates that the task is not actively running on any core. */ +#define taskTASK_NOT_RUNNING ( TaskRunning_t ) ( -1 ) + +/* Indicates that the task is actively running but scheduled to yield. */ +#define taskTASK_YIELDING ( TaskRunning_t ) ( -2 ) + +/* Returns pdTRUE if the task is actively running and not scheduled to yield. */ +#define taskTASK_IS_RUNNING( xTaskRunState ) ( ( 0 <= xTaskRunState ) && ( xTaskRunState < configNUM_CORES ) ) + +typedef BaseType_t TaskRunning_t; + +/* + * Task control block. A task control block (TCB) is allocated for each task, + * and stores task state information, including a pointer to the task's context + * (the task's run time environment, including register values) + */ +typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */ +{ + volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */ + #endif + + #if ( configUSE_CORE_AFFINITY == 1 && configNUM_CORES > 1 ) + UBaseType_t uxCoreAffinityMask; /*< Used to link the task to certain cores. UBaseType_t must have >= the same number of bits as SMP confNUM_CORES */ + #endif + + ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */ + ListItem_t xEventListItem; /*< Used to reference a task from an event list. */ + UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */ + StackType_t * pxStack; /*< Points to the start of the stack. */ + volatile TaskRunning_t xTaskRunState; /*< Used to identify the core the task is running on, if any. */ + BaseType_t xIsIdle; /*< Used to identify the idle tasks. */ + char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + BaseType_t xPreemptionDisable; /*< Used to prevent the task from being preempted */ + #endif + + #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) ) + StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */ + #endif + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */ + #endif + + #if ( configUSE_TRACE_FACILITY == 1 ) + UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */ + UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */ + #endif + + #if ( configUSE_MUTEXES == 1 ) + UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */ + UBaseType_t uxMutexesHeld; + #endif + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + TaskHookFunction_t pxTaskTag; + #endif + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) + void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ]; + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */ + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + /* Allocate a Newlib reent structure that is specific to this task. + * Note Newlib support has been included by popular demand, but is not + * used by the FreeRTOS maintainers themselves. FreeRTOS is not + * responsible for resulting newlib operation. User must be familiar with + * newlib and must provide system-wide implementations of the necessary + * stubs. Be warned that (at the time of writing) the current newlib design + * implements a system-wide malloc() that must be provided with locks. + * + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + struct _reent xNewLib_reent; + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ]; + volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ]; + #endif + + /* See the comments in FreeRTOS.h with the definition of + * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */ + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */ + #endif + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + uint8_t ucDelayAborted; + #endif + + #if ( configUSE_POSIX_ERRNO == 1 ) + int iTaskErrno; + #endif +} tskTCB; + +/* The old tskTCB name is maintained above then typedefed to the new TCB_t name + * below to enable the use of older kernel aware debuggers. */ +typedef tskTCB TCB_t; + +/*lint -save -e956 A manual analysis and inspection has been used to determine + * which static variables must be declared volatile. */ +PRIVILEGED_DATA TCB_t * volatile pxCurrentTCBs[ configNUM_CORES ] = { NULL }; +#define pxCurrentTCB xTaskGetCurrentTaskHandle() + +/* Lists for ready and blocked tasks. -------------------- + * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but + * doing so breaks some kernel aware debuggers and debuggers that rely on removing + * the static qualifier. */ +PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */ +PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */ +PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */ +PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */ + + +#ifdef VERIFAST + /* Reason for rewrite: + * The stack macros rely on some of the macros defined above, e.g., + * `pxCurrentTCB`. We need to delay this inclusion until the relevant task + * macros have been defined. Otherwise, VeriFast will report unknown symbols + * when checking the stack macro proofs. + */ + #include "stack_macros.h" +#endif /* VERIFAST */ + + +#if ( INCLUDE_vTaskDelete == 1 ) + + PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */ + PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U; + +#endif + +#if ( INCLUDE_vTaskSuspend == 1 ) + + PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */ + +#endif + +/* Global POSIX errno. Its value is changed upon context switching to match + * the errno of the currently running task. */ +#if ( configUSE_POSIX_ERRNO == 1 ) + int FreeRTOS_errno = 0; +#endif + +/* Other file private variables. --------------------------------*/ +PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; +PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY; +PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE; +PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U; +PRIVILEGED_DATA static volatile BaseType_t xYieldPendings[ configNUM_CORES ] = { pdFALSE }; +PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0; +PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U; +PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */ +PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[ configNUM_CORES ] = { NULL }; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */ + +#define xYieldPending prvGetCurrentYieldPending() + +/* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists. + * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority + * to determine the number of priority lists to read back from the remote target. */ +const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U; + +/* Context switches are held pending while the scheduler is suspended. Also, + * interrupts must not manipulate the xStateListItem of a TCB, or any of the + * lists the xStateListItem can be referenced from, if the scheduler is suspended. + * If an interrupt needs to unblock a task while the scheduler is suspended then it + * moves the task's event list item into the xPendingReadyList, ready for the + * kernel to move the task from the pending ready list into the real ready list + * when the scheduler is unsuspended. The pending ready list itself can only be + * accessed from a critical section. + * + * Updates to uxSchedulerSuspended must be protected by both the task and ISR locks and + * must not be done by an ISR. Reads must be protected by either lock and may be done by + * either an ISR or a task. */ +PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE; + +#if ( configGENERATE_RUN_TIME_STATS == 1 ) + +/* Do not move these variables to function scope as doing so prevents the + * code working with debuggers that need to remove the static qualifier. */ + PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */ + PRIVILEGED_DATA static volatile uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */ + +#endif + +/*lint -restore */ + +/*-----------------------------------------------------------*/ + +/* File private functions. --------------------------------*/ + +/* + * Creates the idle tasks during scheduler start + */ +static BaseType_t prvCreateIdleTasks( void ); + +/* + * Returns the yield pending count for the calling core. + */ +static BaseType_t prvGetCurrentYieldPending( void ); + +/* + * Checks to see if another task moved the current task out of the ready + * list while it was waiting to enter a critical section and yields if so. + */ +static void prvCheckForRunStateChange( void ); + +/* + * Yields the given core. + */ +static void prvYieldCore( BaseType_t xCoreID ); + +/* + * Yields a core, or cores if multiple priorities are not allowed to run + * simultaneously, to allow the task pxTCB to run. + */ +static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ); + +/* + * Selects the highest priority available task + */ +static BaseType_t prvSelectHighestPriorityTask( const BaseType_t xCoreID ); + +/** + * Utility task that simply returns pdTRUE if the task referenced by xTask is + * currently in the Suspended state, or pdFALSE if the task referenced by xTask + * is in any other state. + */ +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION; + +#endif /* INCLUDE_vTaskSuspend */ + +/* + * Utility to ready all the lists used by the scheduler. This is called + * automatically upon the creation of the first task. + */ +static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION; + +/* + * The idle task, which as all tasks is implemented as a never ending loop. + * The idle task is automatically created and added to the ready lists upon + * creation of the first user task. + * + */ +static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#if ( configNUM_CORES > 1 ) + static portTASK_FUNCTION_PROTO( prvMinimalIdleTask, pvParameters ) PRIVILEGED_FUNCTION; +#endif + +/* + * Utility to free all memory allocated by the scheduler to hold a TCB, + * including the stack pointed to by the TCB. + * + * This does not free memory allocated by the task itself (i.e. memory + * allocated by calls to pvPortMalloc from within the tasks application code). + */ +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Used only by the idle task. This checks to see if anything has been placed + * in the list of tasks waiting to be deleted. If so the task is cleaned up + * and its TCB deleted. + */ +static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION; + +/* + * The currently executing task is entering the Blocked state. Add the task to + * either the current or the overflow delayed task list. + */ +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, + const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION; + +/* + * Fills an TaskStatus_t structure with information on each task that is + * referenced from the pxList list (which may be a ready list, a delayed list, + * a suspended list, etc.). + * + * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM + * NORMAL APPLICATION CODE. + */ +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray, + List_t * pxList, + eTaskState eState ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Searches pxList for a task with name pcNameToQuery - returning a handle to + * the task if it is found, or NULL if the task is not found. + */ +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) PRIVILEGED_FUNCTION; + +#endif + +/* + * When a task is created, the stack of the task is filled with a known value. + * This function determines the 'high water mark' of the task stack by + * determining how much of the stack remains at the original preset value. + */ +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Return the amount of time, in ticks, that will pass before the kernel will + * next move a task from the Blocked state to the Running state. + * + * This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user + * defined low power mode implementations require configUSE_TICKLESS_IDLE to be + * set to a value other than 1. + */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Set xNextTaskUnblockTime to the time at which the next Blocked state task + * will exit the Blocked state. + */ +static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION; + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + +/* + * Helper function used to pad task names with spaces when printing out + * human readable tables of task information. + */ + static char * prvWriteNameToBuffer( char * pcBuffer, + const char * pcTaskName ) PRIVILEGED_FUNCTION; + +#endif + +/* + * Called after a Task_t structure has been allocated either statically or + * dynamically to fill in the structure's members. + */ +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t * pxNewTCB, + const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION; + +/* + * Called after a new task has been created and initialised to place the task + * under the control of the scheduler. + */ +static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION; + +/* + * freertos_tasks_c_additions_init() should only be called if the user definable + * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro + * called by the function. + */ +#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + + static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION; + +#endif + +/*-----------------------------------------------------------*/ + +#ifndef VERIFAST + /* Reason for rewrite: + * VeriFast cannot handle inline assembler and both `portDISABLE_INTERRUPTS` + * and `portRESTORE_INTERRUPTS` expand to inline assembler instructions. + */ + static BaseType_t prvGetCurrentYieldPending( void ) + { + BaseType_t xReturn; + UBaseType_t ulState; + + ulState = portDISABLE_INTERRUPTS(); + xReturn = xYieldPendings[ portGET_CORE_ID() ]; + portRESTORE_INTERRUPTS( ulState ); + + return xReturn; + } +#endif /* VERIFAST */ + +/*-----------------------------------------------------------*/ + +#ifndef VERIFAST + /* Reason for rewrite: + * VeriFast cannot handle inline assembler and `portCHECK_IF_IN_ISR` + * expands to inline assembler. + */ +static void prvCheckForRunStateChange( void ) +{ + UBaseType_t uxPrevCriticalNesting; + UBaseType_t uxPrevSchedulerSuspended; + TCB_t * pxThisTCB; + + /* This should be skipped when entering a critical section within + * an ISR. If the task on the current core is no longer running, then + * vTaskSwitchContext() probably should be run before returning, but + * we don't have a way to force that to happen from here. */ + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + /* This function is always called with interrupts disabled + * so this is safe. */ + pxThisTCB = pxCurrentTCBs[ portGET_CORE_ID() ]; + + while( pxThisTCB->xTaskRunState == taskTASK_YIELDING ) + { + /* We are only here if we just entered a critical section + * or if we just suspended the scheduler, and another task + * has requested that we yield. + * + * This is slightly complicated since we need to save and restore + * the suspension and critical nesting counts, as well as release + * and reacquire the correct locks. And then do it all over again + * if our state changed again during the reacquisition. */ + + uxPrevCriticalNesting = pxThisTCB->uxCriticalNesting; + uxPrevSchedulerSuspended = uxSchedulerSuspended; + + /* this must only be called the first time we enter into a critical + * section, otherwise it could context switch in the middle of a + * critical section. */ + configASSERT( uxPrevCriticalNesting + uxPrevSchedulerSuspended == 1U ); + + uxSchedulerSuspended = 0U; + + if( uxPrevCriticalNesting > 0U ) + { + pxThisTCB->uxCriticalNesting = 0U; + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); + } + else + { + /* uxPrevSchedulerSuspended must be 1 */ + portRELEASE_TASK_LOCK(); + } + + portMEMORY_BARRIER(); + configASSERT( pxThisTCB->xTaskRunState == taskTASK_YIELDING ); + + portENABLE_INTERRUPTS(); + + /* Enabling interrupts should cause this core to immediately + * service the pending interrupt and yield. If the run state is still + * yielding here then that is a problem. */ + configASSERT( pxThisTCB->xTaskRunState != taskTASK_YIELDING ); + + portDISABLE_INTERRUPTS(); + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + pxCurrentTCB->uxCriticalNesting = uxPrevCriticalNesting; + uxSchedulerSuspended = uxPrevSchedulerSuspended; + + if( uxPrevCriticalNesting == 0U ) + { + /* uxPrevSchedulerSuspended must be 1 */ + configASSERT( uxPrevSchedulerSuspended != ( UBaseType_t ) pdFALSE ); + portRELEASE_ISR_LOCK(); + } + } + } +} +#endif /* VERIFAST */ + +/*-----------------------------------------------------------*/ + +static void prvYieldCore( BaseType_t xCoreID ) +{ + /* This must be called from a critical section and + * xCoreID must be valid. */ + + if( portCHECK_IF_IN_ISR() && ( xCoreID == portGET_CORE_ID() ) ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + else if( pxCurrentTCBs[ xCoreID ]->xTaskRunState != taskTASK_YIELDING ) + { + if( xCoreID == portGET_CORE_ID() ) + { + xYieldPendings[ xCoreID ] = pdTRUE; + } + #if ( configNUM_CORES > 1 ) + else + { + portYIELD_CORE( xCoreID ); + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_YIELDING; + } + #endif + } +} + +/*-----------------------------------------------------------*/ + +static void prvYieldForTask( TCB_t * pxTCB, + const BaseType_t xPreemptEqualPriority ) +{ + BaseType_t xLowestPriority; + BaseType_t xTaskPriority; + BaseType_t xLowestPriorityCore = -1; + BaseType_t xYieldCount = 0; + BaseType_t x; + TaskRunning_t xTaskRunState; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION */ + + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + /* No task should yield for this one if it is a lower priority + * than priority level of currently ready tasks. */ + if( pxTCB->uxPriority < uxTopReadyPriority ) + { + return; + } + } + #endif + + xLowestPriority = ( BaseType_t ) pxTCB->uxPriority; + + if( xPreemptEqualPriority == pdFALSE ) + { + /* xLowestPriority will be decremented to -1 if the priority of pxTCB + * is 0. This is ok as we will give system idle tasks a priority of -1 below. */ + --xLowestPriority; + } + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + { + /* System idle tasks are being assigned a priority of tskIDLE_PRIORITY - 1 here */ + xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ x ]->uxPriority - pxCurrentTCBs[ x ]->xIsIdle; + xTaskRunState = pxCurrentTCBs[ x ]->xTaskRunState; + + if( ( taskTASK_IS_RUNNING( xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ x ] == pdFALSE ) ) + { + if( xTaskPriority <= xLowestPriority ) + { + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << x ) ) != 0 ) + #endif + #endif + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = x; + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 + { + /* Yield all currently running non-idle tasks with a priority lower than + * the task that needs to run. */ + if( ( ( BaseType_t ) tskIDLE_PRIORITY - 1 < xTaskPriority ) && ( xTaskPriority < ( BaseType_t ) pxTCB->uxPriority ) ) + { + prvYieldCore( x ); + xYieldCount++; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) && 1 */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( ( xYieldCount == 0 ) && taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + xYieldCount++; + } + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + /* Verify that the calling core always yields to higher priority tasks */ + if( !pxCurrentTCBs[ portGET_CORE_ID() ]->xIsIdle && ( pxTCB->uxPriority > pxCurrentTCBs[ portGET_CORE_ID() ]->uxPriority ) ) + { + configASSERT( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE || taskTASK_IS_RUNNING( pxCurrentTCBs[ portGET_CORE_ID() ]->xTaskRunState ) == pdFALSE ); + } + #endif +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + + static BaseType_t prvSelectHighestPriorityTask( const BaseType_t xCoreID ) + /*@ requires 0 <= xCoreID &*& xCoreID < configNUM_CORES &*& + xCoreID == coreID_f() &*& + // interrupts are disabled and locks acquired + interruptState_p(xCoreID, ?state) &*& + interruptsDisabled_f(state) == true &*& + taskLockInv_p() &*& + isrLockInv_p() &*& + taskISRLockInv_p() + &*& + // opened predicate `coreLocalInterruptInv_p()` + [1/2]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB0) &*& + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _); + @*/ + /*@ ensures 0 <= xCoreID &*& xCoreID < configNUM_CORES &*& + xCoreID == coreID_f() &*& + // interrupts are disabled and locks acquired + interruptState_p(xCoreID, state) &*& + interruptsDisabled_f(state) == true &*& + taskLockInv_p() &*& + isrLockInv_p() &*& + taskISRLockInv_p() + &*& + // opened predicate `coreLocalInterruptInv_p()` + [1/2]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*& + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _); + @*/ + { + //@ open taskISRLockInv_p(); + //@ open _taskISRLockInv_p(?gTopReadyPriority0); + //@ assert( integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, gTopReadyPriority0) ); + //@ assert( gTopReadyPriority0 == uxTopReadyPriority); + UBaseType_t uxCurrentPriority = uxTopReadyPriority; + BaseType_t xTaskScheduled = pdFALSE; + BaseType_t xDecrementTopPriority = pdTRUE; + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + TCB_t * pxPreviousTCB = NULL; + #endif + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + BaseType_t xPriorityDropped = pdFALSE; + #endif + //@ close _taskISRLockInv_p(gTopReadyPriority0); + + while( xTaskScheduled == pdFALSE ) + /*@ invariant + // requires clause + 0 <= xCoreID &*& xCoreID < configNUM_CORES &*& + xCoreID == coreID_f() &*& + // interrupts are disabled and locks acquired + interruptState_p(xCoreID, state) &*& + interruptsDisabled_f(state) == true &*& + taskLockInv_p() &*& + isrLockInv_p() &*& + _taskISRLockInv_p(?gTopReadyPriority) + &*& + // opened predicate `coreLocalInterruptInv_p()` + [0.5]pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*& + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) + &*& + // additional knowledge + (xTaskScheduled == 0 + ? (0 <= uxCurrentPriority &*& uxCurrentPriority <= gTopReadyPriority &*& + gTopReadyPriority < configMAX_PRIORITIES + ) : true + ); + @*/ + { + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + if( uxCurrentPriority < uxTopReadyPriority ) + { + /* We can't schedule any tasks, other than idle, that have a + * priority lower than the priority of a task currently running + * on another core. */ + uxCurrentPriority = tskIDLE_PRIORITY; + } + } + #endif + + //@ open _taskISRLockInv_p(gTopReadyPriority); + //@ assert( exists_in_taskISRLockInv_p(?gTasks, ?gStates0) ); + //@ assert( integer_((void*) &uxTopReadyPriority, sizeof(UBaseType_t), false, gTopReadyPriority) ); + //@ assert( gTopReadyPriority == uxTopReadyPriority); + + //@ open readyLists_p(?gCellLists, ?gOwnerLists); + //@ assert( List_array_p(&pxReadyTasksLists, configMAX_PRIORITIES, gCellLists, gOwnerLists) ); + //@ List_array_p_index_within_limits(&pxReadyTasksLists, uxCurrentPriority); + //@ List_array_split(pxReadyTasksLists, uxCurrentPriority); + //@ assert( List_array_p(&pxReadyTasksLists, uxCurrentPriority, ?gPrefCellLists, ?gPrefOwnerLists) ); + /*@ assert( List_array_p(&pxReadyTasksLists + uxCurrentPriority + 1, + configMAX_PRIORITIES-uxCurrentPriority-1, ?gSufCellLists, ?gSufOwnerLists) ); + @*/ + //@ List_t* gReadyList = &pxReadyTasksLists[uxCurrentPriority]; + + //@ assert( xLIST(gReadyList, ?gSize, ?gIndex, ?gEnd, ?gCells, ?gVals, ?gOwners) ); + //@ assert( mem(gOwners, gOwnerLists) == true ); + + //@ open xLIST(gReadyList, _, _, _, _, _, _); + //@ assert( length(gCells) == gReadyList->uxNumberOfItems + 1 ); + if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurrentPriority ] ) ) == pdFALSE ) + { + List_t * const pxReadyList = &( pxReadyTasksLists[ uxCurrentPriority ] ); + //@ assert( pxReadyList->pxIndex |-> gIndex ); + /*@ assert( DLS(gEnd, ?gEndPrev, gEnd, gEndPrev, + gCells, gVals, gOwners, gReadyList) ); + @*/ + + + //@ DLS_open_2(pxReadyList->pxIndex); + //@ assert( xLIST_ITEM(gIndex, _, ?gIndexNext, ?gIndexPrev, _, gReadyList) ); + ListItem_t * pxLastTaskItem = pxReadyList->pxIndex->pxPrevious; + ListItem_t * pxTaskItem = pxLastTaskItem; + //@ close xLIST_ITEM(gIndex, _, gIndexNext, gIndexPrev, _, gReadyList); + //@ DLS_close_2(pxReadyList->pxIndex, gCells, gVals, gOwners); + + //@ assert( mem(pxTaskItem, gCells) == true); + + //@ open DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gReadyList); + //@ assert( xLIST_ITEM(&pxReadyList->xListEnd, _, _, _, _, gReadyList) ); + //@ open xLIST_ITEM(&pxReadyList->xListEnd, _, _, _, _, gReadyList); + // opening required to prove validity of `&( pxReadyList->xListEnd )` + ///@ assert( pointer_within_limits( &pxReadyList->xListEnd ) == true ); + //@ close xLIST_ITEM(&pxReadyList->xListEnd, _, _, _, _, gReadyList); + if( ( void * ) pxLastTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + //@ assert( gVals == cons(?gV, ?gRest) ); + //@ assert( xLIST_ITEM(?gOldLastTaskItem, gV, ?gO, gEndPrev, _, gReadyList) ); + pxLastTaskItem = pxLastTaskItem->pxPrevious; + //@ close xLIST_ITEM(gOldLastTaskItem, gV, gO, gEndPrev, _, gReadyList); + } + //@ close DLS(gEnd, gEndPrev, gEnd, gEndPrev, gCells, gVals, gOwners, gReadyList); + //@ close xLIST(gReadyList, _, gIndex, gEnd, gCells, gVals, gOwners); + + /* The ready task list for uxCurrentPriority is not empty, so uxTopReadyPriority + * must not be decremented any further */ + xDecrementTopPriority = pdFALSE; + + //@ mem_nth(uxCurrentPriority, gCellLists); + //@ assert( mem(gCells, gCellLists) == true); + + // Prove that `gTasks` contains all tasks in current ready + //@ forall_mem(gOwners, gOwnerLists, (superset)(gTasks)); + + //@ bool gInnerLoopBroken = false; + do + /*@ invariant + 0 <= xCoreID &*& xCoreID < configNUM_CORES &*& + xCoreID == coreID_f() &*& + pointer(&pxCurrentTCBs[coreID_f], gCurrentTCB) &*& + mem(pxTaskItem, gCells) == true &*& + xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners) &*& + gSize > 0 &*& + exists_in_taskISRLockInv_p(gTasks, ?gStates) + &*& + // Read permissions for every task + foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates)) + &*& + // Write permission for task scheduled on this core + [1/2]TCB_runState_p(gCurrentTCB, ?gCurrentTCB_state) &*& + (gCurrentTCB_state == coreID_f() || gCurrentTCB_state == taskTASK_YIELDING) &*& + nth(index_of(gCurrentTCB, gTasks), gStates) == gCurrentTCB_state + &*& + // Write permissions for unscheduled tasks + foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) + &*& + subset(gOwners, gTasks) == true &*& + List_array_p(&pxReadyTasksLists, uxCurrentPriority, gPrefCellLists, + gPrefOwnerLists) &*& + List_array_p(&pxReadyTasksLists + uxCurrentPriority + 1, + configMAX_PRIORITIES-uxCurrentPriority-1, gSufCellLists, + gSufOwnerLists) &*& + !gInnerLoopBroken; + + @*/ + { + TCB_t * pxTCB; + + //@ open xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + //@ assert( DLS(gEnd, ?gEndPrev2, gEnd, gEndPrev2, gCells, gVals, gOwners, gReadyList) ); + + // Building an SSA for important variables helps us to + // refer to the right instances. + //@ struct xLIST_ITEM* gTaskItem_0 = pxTaskItem; + + //@ DLS_open_2(gTaskItem_0); + pxTaskItem = pxTaskItem->pxNext; + //@ struct xLIST_ITEM* gTaskItem_1 = pxTaskItem; + + //@ close xLIST_ITEM(gTaskItem_0, _, _, _, _, gReadyList); + //@ DLS_close_2(gTaskItem_0, gCells, gVals, gOwners); + + if( ( void * ) pxTaskItem == ( void * ) &( pxReadyList->xListEnd ) ) + { + // Prove that `gTaskItem_1->pxNext != gEnd` + //@ dls_distinct(gEnd, gEndPrev2, gEnd, gEndPrev2, gCells); + //@ open DLS(gEnd, gEndPrev2, gEnd, gEndPrev2, gCells, gVals, gOwners, gReadyList); + //@ open DLS(?gTaskItem_1_next, _, gEnd, gEndPrev2, _, _, _, gReadyList); + //@ assert( gTaskItem_1_next != gEnd ); + /*@ close DLS(gTaskItem_1_next, _, gEnd, gEndPrev2, + tail(gCells), tail(gVals), tail(gOwners), _); + @*/ + + pxTaskItem = pxTaskItem->pxNext; + //@ struct xLIST_ITEM* gTaskItem_2 = pxTaskItem; + + //@ close xLIST_ITEM(gTaskItem_1, _, _, _, _, gReadyList); + //@ close DLS(gEnd, gEndPrev2, gEnd, gEndPrev2, gCells, gVals, gOwners, gReadyList); + } + //@ struct xLIST_ITEM* gTaskItem_final = pxTaskItem; + + //@ DLS_open_2(gTaskItem_final); + pxTCB = pxTaskItem->pvOwner; + /*@ close xLIST_ITEM(gTaskItem_final, _, _, _, + pxTCB, gReadyList); + @*/ + //@ DLS_close_2(gTaskItem_final, gCells, gVals, gOwners); + + // Getting read access to fields of `pxTCB` + // aka first half of write permission + //@ assert( subset(gOwners, gTasks) == true ); + //@ mem_subset(pxTCB, gOwners, gTasks); + //@ foreach_remove(pxTCB, gTasks); + //@ assert( foreach(remove(pxTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates)) ); + + /*debug_printf("Attempting to schedule %s on core %d\n", pxTCB->pcTaskName, portGET_CORE_ID() ); */ + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + /* When falling back to the idle priority because only one priority + * level is allowed to run at a time, we should ONLY schedule the true + * idle tasks, not user tasks at the idle priority. */ + if( uxCurrentPriority < uxTopReadyPriority ) + { + if( pxTCB->xIsIdle == pdFALSE ) + { + continue; + } + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ + + //@ bool gPxTCB_not_running = (pxTCB->xTaskRunState == taskTASK_NOT_RUNNING); + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + #endif + { + //@ open exists_in_taskISRLockInv_p(gTasks, gStates); + //@ assert( nth(index_of(pxTCB, gTasks), gStates) == taskTASK_NOT_RUNNING); + //@ assert( foreach(remove(pxTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates)) ); + //@ assert( gCurrentTCB == pxCurrentTCBs[ xCoreID ] ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) ); + + /* We could reuse the read permission to `pxTCB` we extracted before the if statement. + * But putting permissions back as soon as we no longer need them simplifies the + * proof state and elimintates case-splits in the proof. + */ + + // Put read permission for `pxTCB` back + //@ close [1/2]TCB_runState_p(pxTCB, _); + //@ close readOnly_TCB_runState_p(gTasks, gStates)(pxTCB); + //@ foreach_unremove(pxTCB, gTasks); + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) ); + + // Get 2nd half of write permission for `gCurrentTCB` + //@ foreach_remove(gCurrentTCB, gTasks); + //@ assert( foreach(remove(gCurrentTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates)) ); + + /* If the task is not being executed by any core swap it in */ + pxCurrentTCBs[ xCoreID ]->xTaskRunState = taskTASK_NOT_RUNNING; + //@ assert( foreach(remove(gCurrentTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) ); + + // New states list reflects state update above. + //@ list gStates1 = def_state1(gTasks, gStates, gCurrentTCB, pxTCB); + //@ assert( nth(index_of(pxTCB, gTasks), gStates1) == taskTASK_NOT_RUNNING); + + /*@ close_updated_foreach_readOnly_TCB_runState(gCurrentTCB, gTasks, gStates, + gStates1, taskTASK_NOT_RUNNING); + @*/ + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates1)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) ); + /*@ stopUpdate_foreach_readOnly_TCB_runState_IF_not_running + (gCurrentTCB, gTasks, gTasks, gStates, gStates1); + @*/ + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates1)) ); + + + // Get write permission for `pxTCB` + //@ foreach_remove(pxTCB, gTasks); + //@ foreach_remove(pxTCB, gTasks); + //@ open readOnly_TCB_runState_IF_not_running_p(gTasks, gStates1)(pxTCB); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + pxPreviousTCB = pxCurrentTCBs[ xCoreID ]; + #endif + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + //@ assert( foreach(remove(pxTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates1)) ); + //@ assert( foreach(remove(pxTCB, gTasks), readOnly_TCB_runState_IF_not_running_p(gTasks, gStates1)) ); + /*@ list gStates2 = + def_state2(gTasks, gStates, gCurrentTCB, pxTCB, xCoreID); + @*/ + + /*@ close_updated_foreach_readOnly_TCB_runState(pxTCB, gTasks, gStates1, + gStates2, xCoreID); + @*/ + /*@ startUpdate_foreach_readOnly_TCB_runState_IF_not_running + (pxTCB, gTasks, gStates1, gStates2, xCoreID); + @*/ + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates2)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates2)) ); + + + pxCurrentTCBs[ xCoreID ] = pxTCB; + xTaskScheduled = pdTRUE; + + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStates2)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates2)) ); + //@ close exists_in_taskISRLockInv_p(gTasks, gStates2); + + // Putting back first have of write permission to `pxTCB` + //@ close [1/2]TCB_runState_p(pxTCB, _); + } + } + else if( pxTCB == pxCurrentTCBs[ xCoreID ] ) + { + configASSERT( ( pxTCB->xTaskRunState == xCoreID ) || ( pxTCB->xTaskRunState == taskTASK_YIELDING ) ); + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxTCB->uxCoreAffinityMask & ( 1 << xCoreID ) ) != 0 ) + #endif + #endif + { + //@ assert( pxTCB->xTaskRunState != taskTASK_NOT_RUNNING ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStates)) ); + //@ assert( nth(index_of(pxTCB, gTasks), gStates) != taskTASK_NOT_RUNNING); + //@ assert( foreach(remove(pxTCB, gTasks), readOnly_TCB_runState_p(gTasks, gStates)) ); + + /* The task is already running on this core, mark it as scheduled */ + pxTCB->xTaskRunState = ( TaskRunning_t ) xCoreID; + xTaskScheduled = pdTRUE; + + /*@ list gEquivStates + = update(index_of(pxTCB, gTasks), xCoreID, gStates); + @*/ + //@ open exists_in_taskISRLockInv_p(gTasks, gStates); + /*@ scheduleRunning_in_foreach_readOnly_TCB_runState_IF_not_running + (pxTCB, gTasks, gStates, gEquivStates, xCoreID); + @*/ + + //@ distinct_mem_remove(pxTCB, gTasks); + //@ remove_result_subset(pxTCB, gTasks); + /*@ update_foreach_readOnly_TCB_runState + (pxTCB, gTasks, remove(pxTCB, gTasks), + gStates, gEquivStates, xCoreID); + @*/ + + //@ close exists_in_taskISRLockInv_p(gTasks, gEquivStates); + + // Put read permission for `pxTCB` back + //@ foreach_unremove(pxTCB, gTasks); + + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gEquivStates)) ); + //@ close [1/2]TCB_runState_p(pxTCB, _); + } + } + /*@ + if( !gPxTCB_not_running && pxTCB != gCurrentTCB ) { + assert( exists_in_taskISRLockInv_p(gTasks, gStates) ); + // Put read permission for `pxTCB` back + close [1/2]TCB_runState_p(pxTCB, _); + close readOnly_TCB_runState_p(gTasks, gStates)(pxTCB); + foreach_unremove(pxTCB, gTasks); + } + @*/ + + //@ close xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + + if( xTaskScheduled != pdFALSE ) + { + //@ close exists(gReadyList); + + //@ assert( xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners) ); + + /* Once a task has been selected to run on this core, + * move it to the end of the ready task list. */ +#ifdef VERIFAST + /* Reasons for rewrite: + * - Linearization of subproof for performance reasons: + * The contracts of `uxListRemove` and `vListInserEnd` introduce case distinctions, i.e., + * branch splits in the proof tree. This increases the size of the proof tree exponentially + * and checking the proof with VeriFast takes very long. + * The contract of lemma `VF_reordeReadyList` does not expose these case distinctions. + * Hence, wrapping the function calls inside the lemma linearizes the subproof and + * improves the performance of VeriFast exponentially. + * - Reasoning about the function calls requires us introduce many temporary new facts + * about the cell and owner lists by calling list lemmas. Introducing such facts can + * easily lead to an infinite loop of auto lemmas calls. Encapsulating the subproof in a + * lemma allows us to ingore facts necessary for different parts of the proof. + * That is, makes it easier to ensure that we don't run into an infinite auto lemma call + * loop. + */ + /*@ close VF_reordeReadyList__ghost_args + (gTasks, gCellLists, gOwnerLists, uxCurrentPriority); + @*/ + VF_reordeReadyList( pxReadyList, pxTaskItem); +#else + uxListRemove( pxTaskItem ); + vListInsertEnd( pxReadyList, pxTaskItem ); +#endif /* VERIFAST */ + //@ assert( readyLists_p(?gReorderedCellLists, ?gReorderedOwnerLists) ); + //@ assert( forall(gReorderedOwnerLists, (superset)(gTasks)) == true ); + //@ gInnerLoopBroken = true; + break; + } + + //@ assert( exists_in_taskISRLockInv_p(gTasks, ?gStatesEnd) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_p(gTasks, gStatesEnd)) ); + //@ assert( foreach(gTasks, readOnly_TCB_runState_IF_not_running_p(gTasks, gStatesEnd)) ); + } while( pxTaskItem != pxLastTaskItem ); + + /* - If the loop above terminated via the break-branch, + * the heap already contains a `readyLists_p` predicate. + * - If the loop terminated normally, the heap matches + * the loop invariant (plus all chunks not touched by the + * loop). In this case, we still have to close the + * `readyLists_p` predicate. + */ + /*@ + if( !gInnerLoopBroken ) { + closeUnchanged_readyLists(gCellLists, gOwnerLists); + + assert( readyLists_p(gCellLists, gOwnerLists) ); + assert( forall(gOwnerLists, (superset)(gTasks)) == true ); + } + @*/ + + + //@ assert( readyLists_p(?gCellLists3, ?gOwnerLists3) ); + //@ assert( forall(gOwnerLists3, (superset)(gTasks)) == true ); + } + else + { + if( xDecrementTopPriority != pdFALSE ) + { +#if VERIFAST + /* Reason for rewrite: Code not memory safe. + */ + if(uxTopReadyPriority > 0) { + uxTopReadyPriority--; + } +#else + uxTopReadyPriority--; +#endif /* VERIFAST */ + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + { + xPriorityDropped = pdTRUE; + } + #endif + } + + //@ close xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + + //@ closeUnchanged_readyLists(gCellLists, gOwnerLists); + } + + /* This function can get called by vTaskSuspend() before the scheduler is started. + * In that case, since the idle tasks have not yet been created it is possible that we + * won't find a new task to schedule. Return pdFALSE in this case. */ + if( ( xSchedulerRunning == pdFALSE ) && ( uxCurrentPriority == tskIDLE_PRIORITY ) && ( xTaskScheduled == pdFALSE ) ) + { + // @ assert( xLIST(gReadyList, ?gReadyListSize, _, _, gCells, gVals, gOwners) ); + // @ assert( gReadyListSize == gSize ); + // @ List_array_join(&pxReadyTasksLists); + // @ assert( List_array_p(&pxReadyTasksLists, ?gSize2, ?gCellLists2, ?gOwnerLists2) ); + // @ assert( gPrefCellLists == take(uxCurrentPriority, gCellLists) ); + // @ assert( gSufCellLists == drop(uxCurrentPriority + 1, gCellLists) ); + // @ assert( gCells == nth(uxCurrentPriority, gCellLists) ); + // @ assert( gCellLists2 == append(gPrefCellLists, cons(gCells, gSufCellLists)) ); + // @ append_take_nth_drop(uxCurrentPriority, gCellLists); + // @ append_take_nth_drop(uxCurrentPriority, gOwnerLists); + + // @ close readyLists_p(gCellLists2, gOwnerLists2); + //@ close _taskISRLockInv_p(_); + //@ close taskISRLockInv_p(); + return pdFALSE; + } + +#ifndef VERIFAST + configASSERT( ( uxCurrentPriority > tskIDLE_PRIORITY ) || ( xTaskScheduled == pdTRUE ) ); +#endif /* VERIFAST */ + + +#if VERIFAST + /* Reason for rewrite: Code not memory safe. + */ + if(uxCurrentPriority > 0) { + uxCurrentPriority--; + } +#else + uxCurrentPriority--; +#endif /* VERIFAST */ + + // @ close xLIST(gReadyList, gSize, gIndex, gEnd, gCells, gVals, gOwners); + // @ assert( xLIST(gReadyList, ?gReadyListSize, _, _, gCells, gVals, gOwners) ); + // @ assert( gReadyListSize == gSize ); + // @ List_array_join(&pxReadyTasksLists); + // @ assert( List_array_p(&pxReadyTasksLists, ?gSize2, ?gCellLists2, ?gOwnerLists2) ); + // @ assert( gPrefCellLists == take(uxCurrentPriority, gCellLists) ); + // @ assert( gSufCellLists == drop(uxCurrentPriority + 1, gCellLists) ); + // @ assert( gCells == nth(uxCurrentPriority, gCellLists) ); + // @ assert( gCellLists2 == append(gPrefCellLists, cons(gCells, gSufCellLists)) ); + // @ append_take_nth_drop(uxCurrentPriority, gCellLists); + // @ append_take_nth_drop(uxCurrentPriority, gOwnerLists); + + //@ assert( exists_in_taskISRLockInv_p(gTasks, ?gStates) ); + //@ close _taskISRLockInv_p(uxTopReadyPriority); + } // outer loop end + +#ifndef VERIFAST + configASSERT( taskTASK_IS_RUNNING( pxCurrentTCBs[ xCoreID ]->xTaskRunState ) ); +#endif /* VERIFAST */ + + #if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) + if( xPriorityDropped != pdFALSE ) + { + /* There may be several ready tasks that were being prevented from running because there was + * a higher priority task running. Now that the last of the higher priority tasks is no longer + * running, make sure all the other idle tasks yield. */ + UBaseType_t x; + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configNUM_CORES; x++ ) + { + if( pxCurrentTCBs[ x ]->xIsIdle != pdFALSE ) + { + prvYieldCore( x ); + } + } + } + #endif /* if ( ( configRUN_MULTIPLE_PRIORITIES == 0 ) && ( configNUM_CORES > 1 ) ) */ + + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + if( ( pxPreviousTCB != NULL ) && ( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxPreviousTCB->uxPriority ] ), &( pxPreviousTCB->xStateListItem ) ) != pdFALSE ) ) + { + /* A ready task was just bumped off this core. Look at the cores it can run from + * from to see if it is able to run on any of them */ + UBaseType_t uxCoreMap = pxPreviousTCB->uxCoreAffinityMask; + BaseType_t xLowestPriority = pxPreviousTCB->uxPriority - pxPreviousTCB->xIsIdle; + BaseType_t xLowestPriorityCore = -1; + + if( ( uxCoreMap & ( 1 << xCoreID ) ) != 0 ) + { + /* The ready task that was removed from this core is not excluded from it. + * Only look at the intersection of the cores the removed task is allowed to run + * on with the cores that the new task is excluded from. It is possible that the + * new task was only placed onto this core because it is excluded from another. + * Check to see if the previous task could run on one of those cores. */ + uxCoreMap &= ~( pxCurrentTCBs[ xCoreID ]->uxCoreAffinityMask ); + } + else + { + /* The ready task that was removed from this core is excluded from it. */ + } + + uxCoreMap &= ( ( 1 << configNUM_CORES ) - 1 ); + + while( uxCoreMap != 0 ) + { + int uxCore = 31UL - ( uint32_t ) __builtin_clz( uxCoreMap ); + + configASSERT( taskVALID_CORE_ID( uxCore ) ); + + uxCoreMap &= ~( 1 << uxCore ); + + BaseType_t xTaskPriority = ( BaseType_t ) pxCurrentTCBs[ uxCore ]->uxPriority - pxCurrentTCBs[ uxCore ]->xIsIdle; + + if( ( xTaskPriority < xLowestPriority ) && ( taskTASK_IS_RUNNING( pxCurrentTCBs[ uxCore ]->xTaskRunState ) != pdFALSE ) && ( xYieldPendings[ uxCore ] == pdFALSE ) ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ uxCore ]->xPreemptionDisable == pdFALSE ) + #endif + { + xLowestPriority = xTaskPriority; + xLowestPriorityCore = uxCore; + } + } + } + + if( taskVALID_CORE_ID( xLowestPriorityCore ) ) + { + prvYieldCore( xLowestPriorityCore ); + } + } + #endif /* if ( configUSE_CORE_AFFINITY == 1 ) */ + #endif /* if ( configNUM_CORES > 1 ) */ + + //@ close taskISRLockInv_p(); + return pdTRUE; + } + +#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ + + static void prvSelectHighestPriorityTask( BaseType_t xCoreID ) + { + UBaseType_t uxTopPriority; + + /* Find the highest priority list that contains ready tasks. */ + portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); + configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); + listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); + } + +#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */ +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + + TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateStaticAffinitySet(pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, puxStackBuffer, pxTaskBuffer, tskNO_AFFINITY); + } + + TaskHandle_t xTaskCreateStaticAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + StackType_t * const puxStackBuffer, + StaticTask_t * const pxTaskBuffer, + UBaseType_t uxCoreAffinityMask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + { + TCB_t * pxNewTCB; + TaskHandle_t xReturn; + + configASSERT( puxStackBuffer != NULL ); + configASSERT( pxTaskBuffer != NULL ); + + #if ( configASSERT_DEFINED == 1 ) + { + /* Sanity check that the size of the structure used to declare a + * variable of type StaticTask_t equals the size of the real task + * structure. */ + volatile size_t xSize = sizeof( StaticTask_t ); + configASSERT( xSize == sizeof( TCB_t ) ); + ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */ + } + #endif /* configASSERT_DEFINED */ + + if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) ) + { + /* The memory used for the task's TCB and stack are passed into this + * function - use them. */ + pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */ + pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + * task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + } + else + { + xReturn = NULL; + } + + return xReturn; + } + +#endif /* SUPPORT_STATIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedStaticAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedStaticAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer != NULL ); + configASSERT( pxTaskDefinition->pxTaskBuffer != NULL ); + + if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) ) + { + /* Allocate space for the TCB. Where the memory comes from depends + * on the implementation of the port malloc function and whether or + * not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer; + + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note this + * task was created statically in case the task is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + + return xReturn; + } + +#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, + TaskHandle_t * pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateRestrictedAffinitySet( pxTaskDefinition, tskNO_AFFINITY, pxCreatedTask ); + } + + BaseType_t xTaskCreateRestrictedAffinitySet( const TaskParameters_t * const pxTaskDefinition, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + + configASSERT( pxTaskDefinition->puxStackBuffer ); + + if( pxTaskDefinition->puxStackBuffer != NULL ) + { + /* Allocate space for the TCB. Where the memory comes from depends + * on the implementation of the port malloc function and whether or + * not static allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer; + + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) + { + /* Tasks can be created statically or dynamically, so note + * this task had a statically allocated stack in case it is + * later deleted. The TCB was allocated dynamically. */ + pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskDefinition->pvTaskCode, + pxTaskDefinition->pcName, + ( uint32_t ) pxTaskDefinition->usStackDepth, + pxTaskDefinition->pvParameters, + pxTaskDefinition->uxPriority, + pxCreatedTask, pxNewTCB, + pxTaskDefinition->xRegions ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + } + + return xReturn; + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +#if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) + + BaseType_t xTaskCreate( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask ) + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + return xTaskCreateAffinitySet(pxTaskCode, pcName, usStackDepth, pvParameters, uxPriority, tskNO_AFFINITY, pxCreatedTask); + } + + BaseType_t xTaskCreateAffinitySet( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const configSTACK_DEPTH_TYPE usStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + UBaseType_t uxCoreAffinityMask, + TaskHandle_t * const pxCreatedTask ) + #endif /* ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) */ + { + TCB_t * pxNewTCB; + BaseType_t xReturn; + + /* If the stack grows down then allocate the stack then the TCB so the stack + * does not grow into the TCB. Likewise if the stack grows up then allocate + * the TCB then the stack. */ + #if ( portSTACK_GROWTH > 0 ) + { + /* Allocate space for the TCB. Where the memory comes from depends on + * the implementation of the port malloc function and whether or not static + * allocation is being used. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); + + if( pxNewTCB != NULL ) + { + /* Allocate space for the stack used by the task being created. + * The base of the stack memory stored in the TCB so the task can + * be deleted later if required. */ + pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + if( pxNewTCB->pxStack == NULL ) + { + /* Could not allocate the stack. Delete the allocated TCB. */ + vPortFree( pxNewTCB ); + pxNewTCB = NULL; + } + } + } + #else /* portSTACK_GROWTH */ + { + StackType_t * pxStack; + + /* Allocate space for the stack used by the task being created. */ + pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */ + + if( pxStack != NULL ) + { + /* Allocate space for the TCB. */ + pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */ + + if( pxNewTCB != NULL ) + { + /* Store the stack location in the TCB. */ + pxNewTCB->pxStack = pxStack; + } + else + { + /* The stack cannot be used as the TCB was not created. Free + * it again. */ + vPortFreeStack( pxStack ); + } + } + else + { + pxNewTCB = NULL; + } + } + #endif /* portSTACK_GROWTH */ + + if( pxNewTCB != NULL ) + { + #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */ + { + /* Tasks can be created statically or dynamically, so note this + * task was created dynamically in case it is later deleted. */ + pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB; + } + #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */ + + prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL ); + + #if ( ( configNUM_CORES > 1 ) && ( configUSE_CORE_AFFINITY == 1 ) ) + { + /* Set the task's affinity before scheduling it */ + pxNewTCB->uxCoreAffinityMask = uxCoreAffinityMask; + } + #endif + + prvAddNewTaskToReadyList( pxNewTCB ); + xReturn = pdPASS; + } + else + { + xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY; + } + + return xReturn; + } + +#endif /* configSUPPORT_DYNAMIC_ALLOCATION */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseNewTask( TaskFunction_t pxTaskCode, + const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + const uint32_t ulStackDepth, + void * const pvParameters, + UBaseType_t uxPriority, + TaskHandle_t * const pxCreatedTask, + TCB_t * pxNewTCB, + const MemoryRegion_t * const xRegions ) +{ + StackType_t * pxTopOfStack; + UBaseType_t x; + + #if ( portUSING_MPU_WRAPPERS == 1 ) + /* Should the task be created in privileged mode? */ + BaseType_t xRunPrivileged; + + if( ( uxPriority & portPRIVILEGE_BIT ) != 0U ) + { + xRunPrivileged = pdTRUE; + } + else + { + xRunPrivileged = pdFALSE; + } + uxPriority &= ~portPRIVILEGE_BIT; + #endif /* portUSING_MPU_WRAPPERS == 1 */ + + /* Avoid dependency on memset() if it is not required. */ + #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 ) + { + /* Fill the stack with a known value to assist debugging. */ + ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) ); + } + #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */ + + /* Calculate the top of stack address. This depends on whether the stack + * grows from high memory to low (as per the 80x86) or vice versa. + * portSTACK_GROWTH is used to make the result positive or negative as required + * by the port. */ + #if ( portSTACK_GROWTH < 0 ) + { + pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] ); + pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */ + + /* Check the alignment of the calculated top of stack is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + #if ( configRECORD_STACK_HIGH_ADDRESS == 1 ) + { + /* Also record the stack's high address, which may assist + * debugging. */ + pxNewTCB->pxEndOfStack = pxTopOfStack; + } + #endif /* configRECORD_STACK_HIGH_ADDRESS */ + } + #else /* portSTACK_GROWTH */ + { + pxTopOfStack = pxNewTCB->pxStack; + + /* Check the alignment of the stack buffer is correct. */ + configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) ); + + /* The other extreme of the stack space is required if stack checking is + * performed. */ + pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 ); + } + #endif /* portSTACK_GROWTH */ + + /* Store the task name in the TCB. */ + if( pcName != NULL ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + pxNewTCB->pcTaskName[ x ] = pcName[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( pcName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Ensure the name string is terminated in the case that the string length + * was greater or equal to configMAX_TASK_NAME_LEN. */ + pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0'; + } + else + { + /* The task has not been given a name, so just ensure there is a NULL + * terminator when it is read out. */ + pxNewTCB->pcTaskName[ 0 ] = 0x00; + } + + /* This is used as an array index so must ensure it's not too large. First + * remove the privilege bit if one is present. */ + if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxNewTCB->uxPriority = uxPriority; + #if ( configUSE_MUTEXES == 1 ) + { + pxNewTCB->uxBasePriority = uxPriority; + pxNewTCB->uxMutexesHeld = 0; + } + #endif /* configUSE_MUTEXES */ + + vListInitialiseItem( &( pxNewTCB->xStateListItem ) ); + vListInitialiseItem( &( pxNewTCB->xEventListItem ) ); + + + /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get + * back to the containing TCB from a generic item in a list. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB ); + + /* Event lists are always in priority order. */ + listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB ); + + #if ( portCRITICAL_NESTING_IN_TCB == 1 ) + { + pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U; + } + #endif /* portCRITICAL_NESTING_IN_TCB */ + + #if ( configUSE_APPLICATION_TASK_TAG == 1 ) + { + pxNewTCB->pxTaskTag = NULL; + } + #endif /* configUSE_APPLICATION_TASK_TAG */ + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxNewTCB->ulRunTimeCounter = 0UL; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth ); + } + #else + { + /* Avoid compiler warning about unreferenced parameter. */ + ( void ) xRegions; + } + #endif + + #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + { + memset( ( void * ) &( pxNewTCB->pvThreadLocalStoragePointers[ 0 ] ), 0x00, sizeof( pxNewTCB->pvThreadLocalStoragePointers ) ); + } + #endif + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + memset( ( void * ) &( pxNewTCB->ulNotifiedValue[ 0 ] ), 0x00, sizeof( pxNewTCB->ulNotifiedValue ) ); + memset( ( void * ) &( pxNewTCB->ucNotifyState[ 0 ] ), 0x00, sizeof( pxNewTCB->ucNotifyState ) ); + } + #endif + + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + /* Initialise this task's Newlib reent structure. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) ); + } + #endif + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + { + pxNewTCB->ucDelayAborted = pdFALSE; + } + #endif + + #if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + { + pxNewTCB->uxCoreAffinityMask = tskNO_AFFINITY; + } + #endif + #endif + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + { + pxNewTCB->xPreemptionDisable = 0; + } + #endif + + /* Initialize the TCB stack to look as if the task was already running, + * but had been interrupted by the scheduler. The return address is set + * to the start of the task function. Once the stack has been initialised + * the top of stack variable is updated. */ + #if ( portUSING_MPU_WRAPPERS == 1 ) + { + /* If the port has capability to detect stack overflow, + * pass the stack end address to the stack initialization + * function as well. */ + #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if ( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #else /* portUSING_MPU_WRAPPERS */ + { + /* If the port has capability to detect stack overflow, + * pass the stack end address to the stack initialization + * function as well. */ + #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 ) + { + #if ( portSTACK_GROWTH < 0 ) + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters ); + } + #else /* portSTACK_GROWTH */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters ); + } + #endif /* portSTACK_GROWTH */ + } + #else /* portHAS_STACK_OVERFLOW_CHECKING */ + { + pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters ); + } + #endif /* portHAS_STACK_OVERFLOW_CHECKING */ + } + #endif /* portUSING_MPU_WRAPPERS */ + + /* Initialize to not running */ + pxNewTCB->xTaskRunState = taskTASK_NOT_RUNNING; + + /* Is this an idle task? */ + if( pxTaskCode == prvIdleTask ) + { + pxNewTCB->xIsIdle = pdTRUE; + } + + #if ( configNUM_CORES > 1 ) + else if( pxTaskCode == prvMinimalIdleTask ) + { + pxNewTCB->xIsIdle = pdTRUE; + } + #endif + else + { + pxNewTCB->xIsIdle = pdFALSE; + } + + if( pxCreatedTask != NULL ) + { + /* Pass the handle out in an anonymous way. The handle can be used to + * change the created task's priority, delete the created task, etc.*/ + *pxCreatedTask = ( TaskHandle_t ) pxNewTCB; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*-----------------------------------------------------------*/ + +static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) +{ + /* Ensure interrupts don't access the task lists while the lists are being + * updated. */ + taskENTER_CRITICAL(); + { + uxCurrentNumberOfTasks++; + + if( xSchedulerRunning == pdFALSE ) + { + if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 ) + { + /* This is the first task to be created so do the preliminary + * initialisation required. We will not recover if this call + * fails, but we will report the failure. */ + prvInitialiseTaskLists(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( pxNewTCB->xIsIdle != pdFALSE ) + { + BaseType_t xCoreID; + + /* Check if a core is free. */ + for( xCoreID = ( UBaseType_t ) 0; xCoreID < ( UBaseType_t ) configNUM_CORES; xCoreID++ ) + { + if( pxCurrentTCBs[ xCoreID ] == NULL ) + { + pxNewTCB->xTaskRunState = xCoreID; + pxCurrentTCBs[ xCoreID ] = pxNewTCB; + break; + } + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + uxTaskNumber++; + + #if ( configUSE_TRACE_FACILITY == 1 ) + { + /* Add a counter into the TCB for tracing only. */ + pxNewTCB->uxTCBNumber = uxTaskNumber; + } + #endif /* configUSE_TRACE_FACILITY */ + traceTASK_CREATE( pxNewTCB ); + + prvAddTaskToReadyList( pxNewTCB ); + + portSETUP_TCB( pxNewTCB ); + + if( xSchedulerRunning != pdFALSE ) + { + /* If the created task is of a higher priority than another + * currently running task and preemption is on then it should + * run now. */ + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxNewTCB, pdFALSE ); + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + void vTaskDelete( TaskHandle_t xTaskToDelete ) + { + TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the calling task that is + * being deleted. */ + pxTCB = prvGetTCBFromHandle( xTaskToDelete ); + + xTaskRunningOnCore = pxTCB->xTaskRunState; + + /* Remove task from the ready/delayed list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Increment the uxTaskNumber also so kernel aware debuggers can + * detect that the task lists need re-generating. This is done before + * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will + * not return. */ + uxTaskNumber++; + + /* If the task is running (or yielding), we must add it to the + * termination list so that an idle task can delete it when it is + * no longer running. */ + if( xTaskRunningOnCore != taskTASK_NOT_RUNNING ) + { + /* A running task is being deleted. This cannot complete within the + * task itself, as a context switch to another task is required. + * Place the task in the termination list. The idle task will + * check the termination list and free up any memory allocated by + * the scheduler for the TCB and stack of the deleted task. */ + vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) ); + + /* Increment the ucTasksDeleted variable so the idle task knows + * there is a task that has been deleted and that it should therefore + * check the xTasksWaitingTermination list. */ + ++uxDeletedTasksWaitingCleanUp; + + /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as + * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */ + traceTASK_DELETE( pxTCB ); + + /* The pre-delete hook is primarily for the Windows simulator, + * in which Windows specific clean up operations are performed, + * after which it is not possible to yield away from this task - + * hence xYieldPending is used to latch that a context switch is + * required. */ + portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPendings[ pxTCB->xTaskRunState ] ); + } + else + { + --uxCurrentNumberOfTasks; + traceTASK_DELETE( pxTCB ); + prvDeleteTCB( pxTCB ); + + /* Reset the next expected unblock time in case it referred to + * the task that has just been deleted. */ + prvResetNextTaskUnblockTime(); + } + + /* Force a reschedule if the task that has just been deleted was running. */ + if( ( xSchedulerRunning != pdFALSE ) && ( taskTASK_IS_RUNNING( xTaskRunningOnCore ) ) ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + if( xTaskRunningOnCore == xCoreID ) + { + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskDelayUntil == 1 ) + + BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime, + const TickType_t xTimeIncrement ) + { + TickType_t xTimeToWake; + BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE; + + configASSERT( pxPreviousWakeTime ); + configASSERT( ( xTimeIncrement > 0U ) ); + + vTaskSuspendAll(); + { + configASSERT( uxSchedulerSuspended == 1 ); + + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount; + + /* Generate the tick time at which the task wants to wake. */ + xTimeToWake = *pxPreviousWakeTime + xTimeIncrement; + + if( xConstTickCount < *pxPreviousWakeTime ) + { + /* The tick count has overflowed since this function was + * lasted called. In this case the only time we should ever + * actually delay is if the wake time has also overflowed, + * and the wake time is greater than the tick time. When this + * is the case it is as if neither time had overflowed. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + /* The tick time has not overflowed. In this case we will + * delay if either the wake time has overflowed, and/or the + * tick time is less than the wake time. */ + if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) ) + { + xShouldDelay = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Update the wake time ready for the next call. */ + *pxPreviousWakeTime = xTimeToWake; + + if( xShouldDelay != pdFALSE ) + { + traceTASK_DELAY_UNTIL( xTimeToWake ); + + /* prvAddCurrentTaskToDelayedList() needs the block time, not + * the time to wake, so subtract the current tick count. */ + prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + xAlreadyYielded = xTaskResumeAll(); + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + * have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + vTaskYieldWithinAPI(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xShouldDelay; + } + +#endif /* INCLUDE_xTaskDelayUntil */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelay == 1 ) + + void vTaskDelay( const TickType_t xTicksToDelay ) + { + BaseType_t xAlreadyYielded = pdFALSE; + + /* A delay time of zero just forces a reschedule. */ + if( xTicksToDelay > ( TickType_t ) 0U ) + { + vTaskSuspendAll(); + { + configASSERT( uxSchedulerSuspended == 1 ); + traceTASK_DELAY(); + + /* A task that is removed from the event list while the + * scheduler is suspended will not get placed in the ready + * list or removed from the blocked list until the scheduler + * is resumed. + * + * This task cannot be in an event list as it is the currently + * executing task. */ + prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE ); + } + xAlreadyYielded = xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Force a reschedule if xTaskResumeAll has not already done so, we may + * have put ourselves to sleep. */ + if( xAlreadyYielded == pdFALSE ) + { + vTaskYieldWithinAPI(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskDelay */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) ) + + eTaskState eTaskGetState( TaskHandle_t xTask ) + { + eTaskState eReturn; + #ifdef VERIFAST + /* Reason for rewrite: + * VeriFast does not support multiple pointer declarations to + * user-defined types in single statement (i.e., `A p1, p2;` is ok, + * `A *p1, *p2;` fails) + */ + List_t const * pxStateList; + List_t const * pxDelayedList; + List_t const * pxOverflowedDelayedList; + #else + List_t const * pxStateList, * pxDelayedList, * pxOverflowedDelayedList; + #endif /* VERIFAST */ + const TCB_t * const pxTCB = xTask; + + configASSERT( pxTCB ); + + taskENTER_CRITICAL(); + { + pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) ); + pxDelayedList = pxDelayedTaskList; + pxOverflowedDelayedList = pxOverflowDelayedTaskList; + } + taskEXIT_CRITICAL(); + + if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) ) + { + /* The task being queried is referenced from one of the Blocked + * lists. */ + eReturn = eBlocked; + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + else if( pxStateList == &xSuspendedTaskList ) + { + /* The task being queried is referenced from the suspended + * list. Is it genuinely suspended or is it blocked + * indefinitely? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ) + { + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + BaseType_t x; + + /* The task does not appear on the event list item of + * and of the RTOS objects, but could still be in the + * blocked state if it is waiting on its notification + * rather than waiting on an object. If not, is + * suspended. */ + eReturn = eSuspended; + + for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + { + if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + { + eReturn = eBlocked; + break; + } + } + } + #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + { + eReturn = eSuspended; + } + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + } + else + { + eReturn = eBlocked; + } + } + #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */ + + #if ( INCLUDE_vTaskDelete == 1 ) + else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) ) + { + /* The task being queried is referenced from the deleted + * tasks list, or it is not referenced from any lists at + * all. */ + eReturn = eDeleted; + } + #endif + + else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */ + { + /* If the task is not in any other state, it must be in the + * Ready (including pending ready) state. */ + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + /* Is it actively running on a core? */ + eReturn = eRunning; + } + else + { + eReturn = eReady; + } + } + + return eReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_eTaskGetState */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask ) + { + TCB_t const * pxTCB; + UBaseType_t uxReturn; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the task + * that called uxTaskPriorityGet() that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + taskEXIT_CRITICAL(); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskPriorityGet == 1 ) + + UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask ) + { + TCB_t const * pxTCB; + UBaseType_t uxReturn, uxSavedInterruptState; + + /* RTOS ports that support interrupt nesting have the concept of a + * maximum system call (or maximum API call) interrupt priority. + * Interrupts that are above the maximum system call priority are keep + * permanently enabled, even when the RTOS kernel is in a critical section, + * but cannot make any calls to FreeRTOS API functions. If configASSERT() + * is defined in FreeRTOSConfig.h then + * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has + * been assigned a priority above the configured maximum system call + * priority. Only FreeRTOS functions that end in FromISR can be called + * from interrupts that have been assigned a priority at or (logically) + * below the maximum system call interrupt priority. FreeRTOS maintains a + * separate interrupt safe API to ensure interrupt entry is as fast and as + * simple as possible. More information (albeit Cortex-M specific) is + * provided on the following link: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR(); + { + /* If null is passed in here then it is the priority of the calling + * task that is being queried. */ + pxTCB = prvGetTCBFromHandle( xTask ); + uxReturn = pxTCB->uxPriority; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskPriorityGet */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskPrioritySet == 1 ) + + void vTaskPrioritySet( TaskHandle_t xTask, + UBaseType_t uxNewPriority ) + { + TCB_t * pxTCB; + UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry; + BaseType_t xYieldRequired = pdFALSE; + BaseType_t xYieldForTask = pdFALSE; + BaseType_t xCoreID; + + configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) ); + + /* Ensure the new priority is valid. */ + if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES ) + { + uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the priority of the calling + * task that is being changed. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + traceTASK_PRIORITY_SET( pxTCB, uxNewPriority ); + + #if ( configUSE_MUTEXES == 1 ) + { + uxCurrentBasePriority = pxTCB->uxBasePriority; + } + #else + { + uxCurrentBasePriority = pxTCB->uxPriority; + } + #endif + + if( uxCurrentBasePriority != uxNewPriority ) + { + /* The priority change may have readied a task of higher + * priority than a running task. */ + if( uxNewPriority > uxCurrentBasePriority ) + { + /* The priority of a task is being raised so + * perform a yield for this task later. */ + xYieldForTask = pdTRUE; + } + else if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + /* Setting the priority of a running task down means + * there may now be another task of higher priority that + * is ready to execute. */ + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxTCB->xPreemptionDisable == pdFALSE ) + #endif + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + xYieldRequired = pdTRUE; + } + } + else + { + /* Setting the priority of any other task down does not + * require a yield as the running task must be above the + * new priority of the task being modified. */ + } + + /* Remember the ready list the task might be referenced from + * before its uxPriority member is changed so the + * taskRESET_READY_PRIORITY() macro can function correctly. */ + uxPriorityUsedOnEntry = pxTCB->uxPriority; + + #if ( configUSE_MUTEXES == 1 ) + { + /* Only change the priority being used if the task is not + * currently using an inherited priority. */ + if( pxTCB->uxBasePriority == pxTCB->uxPriority ) + { + pxTCB->uxPriority = uxNewPriority; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* The base priority gets set whatever. */ + pxTCB->uxBasePriority = uxNewPriority; + } + #else /* if ( configUSE_MUTEXES == 1 ) */ + { + pxTCB->uxPriority = uxNewPriority; + } + #endif /* if ( configUSE_MUTEXES == 1 ) */ + + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task is in the blocked or suspended list we need do + * nothing more than change its priority variable. However, if + * the task is in a ready list it needs to be removed and placed + * in the list appropriate to its new priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* The task is currently in its ready list - remove before + * adding it to its new ready list. As we are in a critical + * section we can do this even if the scheduler is suspended. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* It's possible that xYieldForTask was already set to pdTRUE because + * its priority is being raised. However, since it is not in a ready list + * we don't actually need to yield for it. */ + xYieldForTask = pdFALSE; + } + + #if ( configUSE_PREEMPTION == 1 ) + if( xYieldRequired != pdFALSE ) + { + prvYieldCore( xCoreID ); + } + else if( xYieldForTask != pdFALSE ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + #endif /* if ( configUSE_PREEMPTION == 1 ) */ + + /* Remove compiler warning about unused variables when the port + * optimised task selection is not being used. */ + ( void ) uxPriorityUsedOnEntry; + } + } + taskEXIT_CRITICAL(); + } + +#endif /* INCLUDE_vTaskPrioritySet */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + + void vTaskCoreAffinitySet( const TaskHandle_t xTask, + UBaseType_t uxCoreAffinityMask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->uxCoreAffinityMask = uxCoreAffinityMask; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + + if( ( uxCoreAffinityMask & ( 1 << xCoreID ) ) == 0 ) + { + prvYieldCore( xCoreID ); + } + } + } + } + taskEXIT_CRITICAL(); + } + + #endif /* configUSE_CORE_AFFINITY */ +#endif /* if ( configNUM_CORES > 1 ) */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_CORES > 1 ) + #if ( configUSE_CORE_AFFINITY == 1 ) + + UBaseType_t vTaskCoreAffinityGet( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + UBaseType_t uxCoreAffinityMask; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + uxCoreAffinityMask = pxTCB->uxCoreAffinityMask; + } + taskEXIT_CRITICAL(); + + return uxCoreAffinityMask; + } + + #endif /* configUSE_CORE_AFFINITY */ +#endif /* if ( configNUM_CORES > 1 ) */ + +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionDisable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdTRUE; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + + void vTaskPreemptionEnable( const TaskHandle_t xTask ) + { + TCB_t * pxTCB; + BaseType_t xCoreID; + + taskENTER_CRITICAL(); + { + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTCB->xPreemptionDisable = pdFALSE; + + if( xSchedulerRunning != pdFALSE ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + xCoreID = ( BaseType_t ) pxTCB->xTaskRunState; + prvYieldCore( xCoreID ); + } + } + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_TASK_PREEMPTION_DISABLE */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskSuspend( TaskHandle_t xTaskToSuspend ) + { + TCB_t * pxTCB; + TaskRunning_t xTaskRunningOnCore; + + taskENTER_CRITICAL(); + { + /* If null is passed in here then it is the running task that is + * being suspended. */ + pxTCB = prvGetTCBFromHandle( xTaskToSuspend ); + + traceTASK_SUSPEND( pxTCB ); + + xTaskRunningOnCore = pxTCB->xTaskRunState; + + /* Remove task from the ready/delayed list and place in the + * suspended list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + taskRESET_READY_PRIORITY( pxTCB->uxPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Is the task waiting on an event also? */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ); + + #if ( configUSE_TASK_NOTIFICATIONS == 1 ) + { + BaseType_t x; + + for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ ) + { + if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION ) + { + /* The task was blocked to wait for a notification, but is + * now suspended, so no notification was received. */ + pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION; + } + } + } + #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */ + + if( xSchedulerRunning != pdFALSE ) + { + /* Reset the next expected unblock time in case it referred to the + * task that is now in the Suspended state. */ + prvResetNextTaskUnblockTime(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( taskTASK_IS_RUNNING( xTaskRunningOnCore ) ) + { + if( xSchedulerRunning != pdFALSE ) + { + if( xTaskRunningOnCore == portGET_CORE_ID() ) + { + /* The current task has just been suspended. */ + configASSERT( uxSchedulerSuspended == 0 ); + vTaskYieldWithinAPI(); + } + else + { + prvYieldCore( xTaskRunningOnCore ); + } + + taskEXIT_CRITICAL(); + } + else + { + taskEXIT_CRITICAL(); + + configASSERT( pxTCB == pxCurrentTCBs[ xTaskRunningOnCore ] ); + + /* The scheduler is not running, but the task that was pointed + * to by pxCurrentTCB has just been suspended and pxCurrentTCB + * must be adjusted to point to a different task. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */ + { + /* No other tasks are ready, so set the core's TCB back to + * NULL so when the next task is created the core's TCB will + * be able to be set to point to it no matter what its relative + * priority is. */ + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + else + { + /* Attempt to switch in a new task. This could fail since the idle tasks + * haven't been created yet. If it does then set the core's TCB back to + * NULL. */ + if( prvSelectHighestPriorityTask( xTaskRunningOnCore ) == pdFALSE ) + { + pxTCB->xTaskRunState = taskTASK_NOT_RUNNING; + pxCurrentTCBs[ xTaskRunningOnCore ] = NULL; + } + } + } + } + else + { + taskEXIT_CRITICAL(); + } + } /* taskEXIT_CRITICAL() - already exited in one of three cases above */ + } + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) + { + BaseType_t xReturn = pdFALSE; + const TCB_t * const pxTCB = xTask; + + /* Accesses xPendingReadyList so must be called from a critical section. */ + + /* It does not make sense to check if the calling task is suspended. */ + configASSERT( xTask ); + + /* Is the task being resumed actually in the suspended list? */ + if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + /* Has the task already been resumed from within an ISR? */ + if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE ) + { + /* Is it in the suspended list because it is in the Suspended + * state, or because is is blocked with no timeout? */ + if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */ + { + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */ + +#endif /* INCLUDE_vTaskSuspend */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskSuspend == 1 ) + + void vTaskResume( TaskHandle_t xTaskToResume ) + { + TCB_t * const pxTCB = xTaskToResume; + + /* It does not make sense to resume the calling task. */ + configASSERT( xTaskToResume ); + + /* The parameter cannot be NULL as it is impossible to resume the + * currently executing task. It is also impossible to resume a task + * that is actively running on another core but it is too dangerous + * to check their run state here. Safer to get into a critical section + * and check if it is actually suspended or not below. */ + if( pxTCB != NULL ) + { + taskENTER_CRITICAL(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME( pxTCB ); + + /* The ready list can be accessed even if the scheduler is + * suspended because this is inside a critical section. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* A higher priority task may have just been resumed. */ + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* INCLUDE_vTaskSuspend */ + +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) + + BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume ) + { + BaseType_t xYieldRequired = pdFALSE; + TCB_t * const pxTCB = xTaskToResume; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToResume ); + + /* RTOS ports that support interrupt nesting have the concept of a + * maximum system call (or maximum API call) interrupt priority. + * Interrupts that are above the maximum system call priority are keep + * permanently enabled, even when the RTOS kernel is in a critical section, + * but cannot make any calls to FreeRTOS API functions. If configASSERT() + * is defined in FreeRTOSConfig.h then + * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has + * been assigned a priority above the configured maximum system call + * priority. Only FreeRTOS functions that end in FromISR can be called + * from interrupts that have been assigned a priority at or (logically) + * below the maximum system call interrupt priority. FreeRTOS maintains a + * separate interrupt safe API to ensure interrupt entry is as fast and as + * simple as possible. More information (albeit Cortex-M specific) is + * provided on the following link: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE ) + { + traceTASK_RESUME_FROM_ISR( pxTCB ); + + /* Check the ready lists can be accessed. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Ready lists can be accessed so move the task from the + * suspended list to the ready list directly. */ + + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed or ready lists cannot be accessed so the task + * is held in the pending ready list until the scheduler is + * unsuspended. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdTRUE ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xYieldRequired = pdTRUE; + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xYieldRequired; + } + +#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */ +/*-----------------------------------------------------------*/ + +static BaseType_t prvCreateIdleTasks( void ) +{ + BaseType_t xReturn = pdPASS; + BaseType_t xCoreID; + char cIdleName[ configMAX_TASK_NAME_LEN ]; + + /* Add each idle task at the lowest priority. */ + for( xCoreID = ( BaseType_t ) 0; xCoreID < ( BaseType_t ) configNUM_CORES; xCoreID++ ) + { + BaseType_t x; + + if( xReturn == pdFAIL ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + for( x = ( BaseType_t ) 0; x < ( BaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cIdleName[ x ] = configIDLE_TASK_NAME[ x ]; + + /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than + * configMAX_TASK_NAME_LEN characters just in case the memory after the + * string is not accessible (extremely unlikely). */ + if( cIdleName[ x ] == ( char ) 0x00 ) + { + break; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Append the idle task number to the end of the name if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x++ ] = xCoreID + '0'; + + /* And append a null character if there is space */ + if( x < configMAX_TASK_NAME_LEN ) + { + cIdleName[ x ] = '\0'; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( configSUPPORT_STATIC_ALLOCATION == 1 ) + { + if( xCoreID == 0 ) + { + StaticTask_t * pxIdleTaskTCBBuffer = NULL; + StackType_t * pxIdleTaskStackBuffer = NULL; + uint32_t ulIdleTaskStackSize; + + /* The Idle task is created using user provided RAM - obtain the + * address of the RAM then create the idle task. */ + vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize ); + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvIdleTask, + cIdleName, + ulIdleTaskStackSize, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + pxIdleTaskStackBuffer, + pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + #if ( configNUM_CORES > 1 ) + else + { + static StaticTask_t xIdleTCBBuffers[ configNUM_CORES - 1 ]; + static StackType_t xIdleTaskStackBuffers[ configNUM_CORES - 1 ][ configMINIMAL_STACK_SIZE ]; + + xIdleTaskHandle[ xCoreID ] = xTaskCreateStatic( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */ + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + xIdleTaskStackBuffers[ xCoreID - 1 ], + &xIdleTCBBuffers[ xCoreID - 1 ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif /* if ( configNUM_CORES > 1 ) */ + + if( xIdleTaskHandle[ xCoreID ] != NULL ) + { + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */ + { + if( xCoreID == 0 ) + { + /* The Idle task is being created using dynamically allocated RAM. */ + xReturn = xTaskCreate( prvIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + + #if ( configNUM_CORES > 1 ) + else + { + xReturn = xTaskCreate( prvMinimalIdleTask, + cIdleName, + configMINIMAL_STACK_SIZE, + ( void * ) NULL, + portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */ + &xIdleTaskHandle[ xCoreID ] ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */ + } + #endif + } + #endif /* configSUPPORT_STATIC_ALLOCATION */ + } + + return xReturn; +} + +void vTaskStartScheduler( void ) +{ + BaseType_t xReturn; + + #if ( configUSE_TIMERS == 1 ) + { + xReturn = xTimerCreateTimerTask(); + } + #endif /* configUSE_TIMERS */ + + xReturn = prvCreateIdleTasks(); + + if( xReturn == pdPASS ) + { + /* freertos_tasks_c_additions_init() should only be called if the user + * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is + * the only macro called by the function. */ + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + { + freertos_tasks_c_additions_init(); + } + #endif + + /* Interrupts are turned off here, to ensure a tick does not occur + * before or during the call to xPortStartScheduler(). The stacks of + * the created tasks contain a status word with interrupts switched on + * so interrupts will automatically get re-enabled when the first task + * starts to run. */ + portDISABLE_INTERRUPTS(); + + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) && ( configNEWLIB_REENTRANT_IS_DYNAMIC == 0 ) ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + * structure specific to the task that will run first. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. + * + * Note: Updating the _impure_ptr is not required when Newlib is compiled with + * __DYNAMIC_REENT__ enabled. The port should provide __getreent() instead. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* ( configUSE_NEWLIB_REENTRANT == 1 ) && ( configNEWLIB_REENTRANT_IS_DYNAMIC == 0 ) */ + + xNextTaskUnblockTime = portMAX_DELAY; + xSchedulerRunning = pdTRUE; + xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT; + + /* If configGENERATE_RUN_TIME_STATS is defined then the following + * macro must be defined to configure the timer/counter used to generate + * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS + * is set to 0 and the following line fails to build then ensure you do not + * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your + * FreeRTOSConfig.h file. */ + portCONFIGURE_TIMER_FOR_RUN_TIME_STATS(); + + traceTASK_SWITCHED_IN(); + + /* Setting up the timer tick is hardware specific and thus in the + * portable interface. */ + if( xPortStartScheduler() != pdFALSE ) + { + /* Should not reach here as if the scheduler is running the + * function will not return. */ + } + else + { + /* Should only reach here if a task calls xTaskEndScheduler(). */ + } + } + else + { + /* This line will only be reached if the kernel could not be started, + * because there was not enough FreeRTOS heap to create the idle task + * or the timer task. */ + configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY ); + } + + /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0, + * meaning xIdleTaskHandle is not used anywhere else. */ + ( void ) xIdleTaskHandle; + + /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority + * from getting optimized out as it is no longer used by the kernel. */ + ( void ) uxTopUsedPriority; +} +/*-----------------------------------------------------------*/ + +void vTaskEndScheduler( void ) +{ + /* Stop the scheduler interrupts and call the portable scheduler end + * routine so the original ISRs can be restored if necessary. The port + * layer must ensure interrupts enable bit is left in the correct state. */ + portDISABLE_INTERRUPTS(); + xSchedulerRunning = pdFALSE; + vPortEndScheduler(); +} +/*----------------------------------------------------------*/ + +void vTaskSuspendAll( void ) +{ + UBaseType_t ulState; + + /* This must only be called from within a task */ + portASSERT_IF_IN_ISR(); + + if( xSchedulerRunning != pdFALSE ) + { + /* writes to uxSchedulerSuspended must be protected by both the task AND ISR locks. + * We must disable interrupts before we grab the locks in the event that this task is + * interrupted and switches context before incrementing uxSchedulerSuspended. + * It is safe to re-enable interrupts after releasing the ISR lock and incrementing + * uxSchedulerSuspended since that will prevent context switches. */ + ulState = portDISABLE_INTERRUPTS(); + + /* portSOFRWARE_BARRIER() is only implemented for emulated/simulated ports that + * do not otherwise exhibit real time behaviour. */ + portSOFTWARE_BARRIER(); + + portGET_TASK_LOCK(); + portGET_ISR_LOCK(); + + /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment + * is used to allow calls to vTaskSuspendAll() to nest. */ + ++uxSchedulerSuspended; + portRELEASE_ISR_LOCK(); + + if( ( uxSchedulerSuspended == 1U ) && ( pxCurrentTCB->uxCriticalNesting == 0U ) ) + { + prvCheckForRunStateChange(); + } + + portRESTORE_INTERRUPTS( ulState ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } +} +/*----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + static TickType_t prvGetExpectedIdleTime( void ) + { + TickType_t xReturn; + UBaseType_t uxHigherPriorityReadyTasks = pdFALSE; + + /* uxHigherPriorityReadyTasks takes care of the case where + * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority + * task that are in the Ready state, even though the idle task is + * running. */ + #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) + { + if( uxTopReadyPriority > tskIDLE_PRIORITY ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #else + { + const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01; + + /* When port optimised task selection is used the uxTopReadyPriority + * variable is used as a bit map. If bits other than the least + * significant bit are set then there are tasks that have a priority + * above the idle priority that are in the Ready state. This takes + * care of the case where the co-operative scheduler is in use. */ + if( uxTopReadyPriority > uxLeastSignificantBit ) + { + uxHigherPriorityReadyTasks = pdTRUE; + } + } + #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */ + + if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY ) + { + xReturn = 0; + } + else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 ) + { + /* There are other idle priority tasks in the ready state. If + * time slicing is used then the very next tick interrupt must be + * processed. */ + xReturn = 0; + } + else if( uxHigherPriorityReadyTasks != pdFALSE ) + { + /* There are tasks in the Ready state that have a priority above the + * idle priority. This path can only be reached if + * configUSE_PREEMPTION is 0. */ + xReturn = 0; + } + else + { + xReturn = xNextTaskUnblockTime - xTickCount; + } + + return xReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskResumeAll( void ) +{ + TCB_t * pxTCB = NULL; + BaseType_t xAlreadyYielded = pdFALSE; + + if( xSchedulerRunning != pdFALSE ) + { + /* It is possible that an ISR caused a task to be removed from an event + * list while the scheduler was suspended. If this was the case then the + * removed task will have been added to the xPendingReadyList. Once the + * scheduler has been resumed it is safe to move all the pending ready + * tasks from this list into their appropriate ready list. */ + taskENTER_CRITICAL(); + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + /* If uxSchedulerSuspended is zero then this function does not match a + * previous call to vTaskSuspendAll(). */ + configASSERT( uxSchedulerSuspended ); + + --uxSchedulerSuspended; + portRELEASE_TASK_LOCK(); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U ) + { + /* Move any readied tasks from the pending list into the + * appropriate ready list. */ + while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* All appropriate tasks yield at the moment a task is added to xPendingReadyList. + * If the current core yielded then vTaskSwitchContext() has already been called + * which sets xYieldPendings for the current core to pdTRUE. */ + } + + if( pxTCB != NULL ) + { + /* A task was unblocked while the scheduler was suspended, + * which may have prevented the next unblock time from being + * re-calculated, in which case re-calculate it now. Mainly + * important for low power tickless implementations, where + * this can prevent an unnecessary exit from low power + * state. */ + prvResetNextTaskUnblockTime(); + } + + /* If any ticks occurred while the scheduler was suspended then + * they should be processed now. This ensures the tick count does + * not slip, and that any delayed tasks are resumed at the correct + * time. + * + * It should be safe to call xTaskIncrementTick here from any core + * since we are in a critical section and xTaskIncrementTick itself + * protects itself within a critical section. Suspending the scheduler + * from any core causes xTaskIncrementTick to increment uxPendedCounts.*/ + { + TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */ + + if( xPendedCounts > ( TickType_t ) 0U ) + { + do + { + if( xTaskIncrementTick() != pdFALSE ) + { + /* other cores are interrupted from + * within xTaskIncrementTick(). */ + xYieldPendings[ xCoreID ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + --xPendedCounts; + } while( xPendedCounts > ( TickType_t ) 0U ); + + xPendedTicks = 0; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + if( xYieldPendings[ xCoreID ] != pdFALSE ) + { + /* If xYieldPendings is true then taskEXIT_CRITICAL() + * will yield, so make sure we return true to let the + * caller know a yield has already happened. */ + xAlreadyYielded = pdTRUE; + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xAlreadyYielded; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCount( void ) +{ + TickType_t xTicks; + + /* Critical section required if running on a 16 bit processor. */ + portTICK_TYPE_ENTER_CRITICAL(); + { + xTicks = xTickCount; + } + portTICK_TYPE_EXIT_CRITICAL(); + + return xTicks; +} +/*-----------------------------------------------------------*/ + +TickType_t xTaskGetTickCountFromISR( void ) +{ + TickType_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* RTOS ports that support interrupt nesting have the concept of a maximum + * system call (or maximum API call) interrupt priority. Interrupts that are + * above the maximum system call priority are kept permanently enabled, even + * when the RTOS kernel is in a critical section, but cannot make any calls to + * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h + * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has been + * assigned a priority above the configured maximum system call priority. + * Only FreeRTOS functions that end in FromISR can be called from interrupts + * that have been assigned a priority at or (logically) below the maximum + * system call interrupt priority. FreeRTOS maintains a separate interrupt + * safe API to ensure interrupt entry is as fast and as simple as possible. + * More information (albeit Cortex-M specific) is provided on the following + * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = xTickCount; + } + portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +UBaseType_t uxTaskGetNumberOfTasks( void ) +{ + /* A critical section is not required because the variables are of type + * BaseType_t. */ + return uxCurrentNumberOfTasks; +} +/*-----------------------------------------------------------*/ + +char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ +{ + TCB_t * pxTCB; + + /* If null is passed in here then the name of the calling task is being + * queried. */ + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + configASSERT( pxTCB ); + return &( pxTCB->pcTaskName[ 0 ] ); +} +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList, + const char pcNameToQuery[] ) + { + #ifdef VERIFAST + /* Reason for rewrite: + * VeriFast does not support multiple pointer declarations to + * user-defined types in single statement + * (i.e., `A p1, p2;` is ok, `A *p1, *p2;` fails) + */ + TCB_t * pxNextTCB; + TCB_t * pxFirstTCB; + TCB_t * pxReturn = NULL; + #else + TCB_t * pxNextTCB, * pxFirstTCB, * pxReturn = NULL; + #endif /* VERIFAST */ + UBaseType_t x; + char cNextChar; + BaseType_t xBreakLoop; + + /* This function is called with the scheduler suspended. */ + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Check each character in the name looking for a match or + * mismatch. */ + xBreakLoop = pdFALSE; + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ ) + { + cNextChar = pxNextTCB->pcTaskName[ x ]; + + if( cNextChar != pcNameToQuery[ x ] ) + { + /* Characters didn't match. */ + xBreakLoop = pdTRUE; + } + else if( cNextChar == ( char ) 0x00 ) + { + /* Both strings terminated, a match must have been + * found. */ + pxReturn = pxNextTCB; + xBreakLoop = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + if( xBreakLoop != pdFALSE ) + { + break; + } + } + + if( pxReturn != NULL ) + { + /* The handle has been found. */ + break; + } + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return pxReturn; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetHandle == 1 ) + + TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */ + { + UBaseType_t uxQueue = configMAX_PRIORITIES; + TCB_t * pxTCB; + + /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */ + configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN ); + + vTaskSuspendAll(); + { + /* Search the ready lists. */ + do + { + uxQueue--; + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery ); + + if( pxTCB != NULL ) + { + /* Found the handle. */ + break; + } + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Search the delayed lists. */ + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery ); + } + + if( pxTCB == NULL ) + { + pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery ); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the suspended list. */ + pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery ); + } + } + #endif + + #if ( INCLUDE_vTaskDelete == 1 ) + { + if( pxTCB == NULL ) + { + /* Search the deleted list. */ + pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery ); + } + } + #endif + } + ( void ) xTaskResumeAll(); + + return pxTCB; + } + +#endif /* INCLUDE_xTaskGetHandle */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, + const UBaseType_t uxArraySize, + uint32_t * const pulTotalRunTime ) + { + UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES; + + vTaskSuspendAll(); + { + /* Is there a space in the array for each task in the system? */ + if( uxArraySize >= uxCurrentNumberOfTasks ) + { + /* Fill in an TaskStatus_t structure with information on each + * task in the Ready state. */ + do + { + uxQueue--; + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady ); + } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + /* Fill in an TaskStatus_t structure with information on each + * task in the Blocked state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked ); + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + * each task that has been deleted but not yet cleaned up. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted ); + } + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* Fill in an TaskStatus_t structure with information on + * each task in the Suspended state. */ + uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended ); + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + if( pulTotalRunTime != NULL ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) ); + #else + *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + } + } + #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ + { + if( pulTotalRunTime != NULL ) + { + *pulTotalRunTime = 0; + } + } + #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) + + TaskHandle_t * xTaskGetIdleTaskHandle( void ) + { + /* If xTaskGetIdleTaskHandle() is called before the scheduler has been + * started, then xIdleTaskHandle will be NULL. */ + configASSERT( ( xIdleTaskHandle != NULL ) ); + return &( xIdleTaskHandle[ 0 ] ); + } + +#endif /* INCLUDE_xTaskGetIdleTaskHandle */ +/*----------------------------------------------------------*/ + +/* This conditional compilation should use inequality to 0, not equality to 1. + * This is to ensure vTaskStepTick() is available when user defined low power mode + * implementations require configUSE_TICKLESS_IDLE to be set to a value other than + * 1. */ +#if ( configUSE_TICKLESS_IDLE != 0 ) + + void vTaskStepTick( const TickType_t xTicksToJump ) + { + /* Correct the tick count value after a period during which the tick + * was suppressed. Note this does *not* call the tick hook function for + * each stepped tick. */ + configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime ); + xTickCount += xTicksToJump; + traceINCREASE_TICK_COUNT( xTicksToJump ); + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp ) +{ + BaseType_t xYieldOccurred; + + /* Must not be called with the scheduler suspended as the implementation + * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */ + configASSERT( uxSchedulerSuspended == 0 ); + + /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when + * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */ + vTaskSuspendAll(); + xPendedTicks += xTicksToCatchUp; + xYieldOccurred = xTaskResumeAll(); + + return xYieldOccurred; +} +/*----------------------------------------------------------*/ + +#if ( INCLUDE_xTaskAbortDelay == 1 ) + + BaseType_t xTaskAbortDelay( TaskHandle_t xTask ) + { + TCB_t * pxTCB = xTask; + BaseType_t xReturn; + + configASSERT( pxTCB ); + + vTaskSuspendAll(); + { + /* A task can only be prematurely removed from the Blocked state if + * it is actually in the Blocked state. */ + if( eTaskGetState( xTask ) == eBlocked ) + { + xReturn = pdPASS; + + /* Remove the reference to the task from the blocked list. An + * interrupt won't touch the xStateListItem because the + * scheduler is suspended. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove it from + * the event list too. Interrupts can touch the event list item, + * even though the scheduler is suspended, so a critical section + * is used. */ + taskENTER_CRITICAL(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + + /* This lets the task know it was forcibly removed from the + * blocked state so it should not re-evaluate its block time and + * then block again. */ + pxTCB->ucDelayAborted = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + /* Place the unblocked task into the appropriate ready list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate context + * switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + taskENTER_CRITICAL(); + { + prvYieldForTask( pxTCB, pdFALSE ); + } + taskEXIT_CRITICAL(); + } + #endif /* configUSE_PREEMPTION */ + } + else + { + xReturn = pdFAIL; + } + } + ( void ) xTaskResumeAll(); + + return xReturn; + } + +#endif /* INCLUDE_xTaskAbortDelay */ +/*----------------------------------------------------------*/ + +BaseType_t xTaskIncrementTick( void ) +{ + TCB_t * pxTCB; + TickType_t xItemValue; + BaseType_t xSwitchRequired = pdFALSE; + + #if ( configUSE_PREEMPTION == 1 ) + UBaseType_t x; + BaseType_t xCoreYieldList[ configNUM_CORES ] = { pdFALSE }; + #endif /* configUSE_PREEMPTION */ + + taskENTER_CRITICAL(); + { + /* Called by the portable layer each time a tick interrupt occurs. + * Increments the tick then checks to see if the new tick value will cause any + * tasks to be unblocked. */ + traceTASK_INCREMENT_TICK( xTickCount ); + + /* Tick increment should occur on every kernel timer event. Core 0 has the + * responsibility to increment the tick, or increment the pended ticks if the + * scheduler is suspended. If pended ticks is greater than zero, the core that + * calls xTaskResumeAll has the responsibility to increment the tick. */ + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + /* Minor optimisation. The tick count cannot change in this + * block. */ + const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1; + + /* Increment the RTOS tick, switching the delayed and overflowed + * delayed lists if it wraps to 0. */ + xTickCount = xConstTickCount; + + if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */ + { + taskSWITCH_DELAYED_LISTS(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* See if this tick has made a timeout expire. Tasks are stored in + * the queue in the order of their wake time - meaning once one task + * has been found whose block time has not expired there is no need to + * look any further down the list. */ + if( xConstTickCount >= xNextTaskUnblockTime ) + { + for( ; ; ) + { + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The delayed list is empty. Set xNextTaskUnblockTime + * to the maximum possible value so it is extremely + * unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass + * next time through. */ + xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + break; + } + else + { + /* The delayed list is not empty, get the value of the + * item at the head of the delayed list. This is the time + * at which the task at the head of the delayed list must + * be removed from the Blocked state. */ + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) ); + + if( xConstTickCount < xItemValue ) + { + /* It is not time to unblock this item yet, but the + * item value is the time at which the task at the head + * of the blocked list must be removed from the Blocked + * state - so record the item value in + * xNextTaskUnblockTime. */ + xNextTaskUnblockTime = xItemValue; + break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* It is time to remove the item from the Blocked state. */ + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + + /* Is the task waiting on an event also? If so remove + * it from the event list. */ + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + ( void ) uxListRemove( &( pxTCB->xEventListItem ) ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Place the unblocked task into the appropriate ready + * list. */ + prvAddTaskToReadyList( pxTCB ); + + /* A task being unblocked cannot cause an immediate + * context switch if preemption is turned off. */ + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdTRUE ); + } + #endif /* configUSE_PREEMPTION */ + } + } + } + + /* Tasks of equal priority to the currently running task will share + * processing time (time slice) if preemption is on, and the application + * writer has not explicitly turned time slicing off. */ + #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) + { + /* TODO: If there are fewer "non-IDLE" READY tasks than cores, do not + * force a context switch that would just shuffle tasks around cores */ + /* TODO: There are certainly better ways of doing this that would reduce + * the number of interrupts and also potentially help prevent tasks from + * moving between cores as often. This, however, works for now. */ + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCBs[ x ]->uxPriority ] ) ) > ( UBaseType_t ) 1 ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */ + + #if ( configUSE_TICK_HOOK == 1 ) + { + /* Guard against the tick hook being called when the pended tick + * count is being unwound (when the scheduler is being unlocked). */ + if( xPendedTicks == ( TickType_t ) 0 ) + { + vApplicationTickHook(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICK_HOOK */ + + #if ( configUSE_PREEMPTION == 1 ) + { + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + if( xYieldPendings[ x ] != pdFALSE ) + { + xCoreYieldList[ x ] = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + #endif /* configUSE_PREEMPTION */ + + #if ( configUSE_PREEMPTION == 1 ) + { + BaseType_t xCoreID; + + xCoreID = portGET_CORE_ID(); + + for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configNUM_CORES; x++ ) + { + #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) + if( pxCurrentTCBs[ x ]->xPreemptionDisable == pdFALSE ) + #endif + { + if( xCoreYieldList[ x ] != pdFALSE ) + { + if( x == xCoreID ) + { + xSwitchRequired = pdTRUE; + } + else + { + prvYieldCore( x ); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #endif /* configUSE_PREEMPTION */ + } + else + { + ++xPendedTicks; + + /* The tick hook gets called at regular intervals, even if the + * scheduler is locked. */ + #if ( configUSE_TICK_HOOK == 1 ) + { + vApplicationTickHook(); + } + #endif + } + } + taskEXIT_CRITICAL(); + + return xSwitchRequired; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + void vTaskSetApplicationTaskTag( TaskHandle_t xTask, + TaskHookFunction_t pxHookFunction ) + { + TCB_t * xTCB; + + /* If xTask is NULL then it is the task hook of the calling task that is + * getting set. */ + if( xTask == NULL ) + { + xTCB = ( TCB_t * ) pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + /* Save the hook function in the TCB. A critical section is required as + * the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xTCB->pxTaskTag = pxHookFunction; + } + taskEXIT_CRITICAL(); + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + TaskHookFunction_t xReturn; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + * the value can be accessed from an interrupt. */ + taskENTER_CRITICAL(); + { + xReturn = pxTCB->pxTaskTag; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + TaskHookFunction_t xReturn; + UBaseType_t uxSavedInterruptStatus; + + /* If xTask is NULL then set the calling task's hook. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + /* Save the hook function in the TCB. A critical section is required as + * the value can be accessed from an interrupt. */ + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + xReturn = pxTCB->pxTaskTag; + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_APPLICATION_TASK_TAG == 1 ) + + BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, + void * pvParameter ) + { + TCB_t * xTCB; + BaseType_t xReturn; + + /* If xTask is NULL then we are calling our own task hook. */ + if( xTask == NULL ) + { + xTCB = pxCurrentTCB; + } + else + { + xTCB = xTask; + } + + if( xTCB->pxTaskTag != NULL ) + { + xReturn = xTCB->pxTaskTag( pvParameter ); + } + else + { + xReturn = pdFAIL; + } + + return xReturn; + } + +#endif /* configUSE_APPLICATION_TASK_TAG */ +/*-----------------------------------------------------------*/ + +void vTaskSwitchContext( BaseType_t xCoreID ) +/*@ requires 0 <= xCoreID &*& xCoreID < configNUM_CORES &*& + xCoreID == coreID_f() + &*& + // access to locks and disabled interrupts + locked_p(nil) &*& + [?f_ISR]isrLock_p() &*& + [?f_task]taskLock_p() &*& + interruptState_p(xCoreID, ?state) &*& + interruptsDisabled_f(state) == true + &*& + // opened predicate `coreLocalInterruptInv_p()` + pointer(&pxCurrentTCBs[coreID_f], ?gCurrentTCB) &*& + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*& + TCB_criticalNesting_p(gCurrentTCB, 0) + &*& + // read access to current task's stack pointer, etc + TCB_stack_p(gCurrentTCB, ?ulFreeBytesOnStack); + +@*/ +/*@ ensures // all locks are released and interrupts remain disabled + locked_p(nil) &*& + [f_ISR]isrLock_p() &*& + [f_task]taskLock_p() &*& + interruptState_p(xCoreID, state) + &*& + // opened predicate `coreLocalInterruptInv_p()` + pointer(&pxCurrentTCBs[coreID_f], ?gNewCurrentTCB) &*& + integer_(&xYieldPendings[coreID_f], sizeof(BaseType_t), true, _) &*& + TCB_criticalNesting_p(gCurrentTCB, 0) + &*& + // read access to current task's stack pointer, etc + TCB_stack_p(gCurrentTCB, ulFreeBytesOnStack); +@*/ +{ + /* Acquire both locks: + * - The ISR lock protects the ready list from simultaneous access by + * both other ISRs and tasks. + * - We also take the task lock to pause here in case another core has + * suspended the scheduler. We don't want to simply set xYieldPending + * and move on if another core suspended the scheduler. We should only + * do that if the current core has suspended the scheduler. */ + + portGET_TASK_LOCK(); /* Must always acquire the task lock first */ + portGET_ISR_LOCK(); + //@ produce_taskISRLockInv(); + { + /* vTaskSwitchContext() must never be called from within a critical section. + * This is not necessarily true for vanilla FreeRTOS, but it is for this SMP port. */ + #ifdef VERIFAST + /* Reason for rewrite: VeriFast cannot handle non-pure assertions. */ + { + // PROBLEM: + // Line + // UBaseType_t nesting = pxCurrentTCB->uxCriticalNesting; + // leads to VF error + // "This potentially side-effecting expression is not supported in this position, because of C's unspecified evaluation order" + // + // TODO: Inspect reason. + TaskHandle_t currentHandle = pxCurrentTCB; + //@ assert( currentHandle == gCurrentTCB ); + //@ open TCB_criticalNesting_p(gCurrentTCB, 0); + UBaseType_t nesting = currentHandle->uxCriticalNesting; + configASSERT( nesting == 0 ); + //@ close TCB_criticalNesting_p(gCurrentTCB, 0); + } + #else + configASSERT( pxCurrentTCB->uxCriticalNesting == 0 ); + #endif /* VERIFAST */ + + //@ open taskISRLockInv_p(); + //@ open _taskISRLockInv_p(_); + if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE ) + { + /* The scheduler is currently suspended - do not allow a context + * switch. */ + xYieldPendings[ xCoreID ] = pdTRUE; + //@ close _taskISRLockInv_p(_); + //@ close taskISRLockInv_p(); + } + else + { + xYieldPendings[ xCoreID ] = pdFALSE; + traceTASK_SWITCHED_OUT(); + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE + portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime ); + #else + ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE(); + #endif + + /* Add the amount of time the task has been running to the + * accumulated time so far. The time the task started running was + * stored in ulTaskSwitchedInTime. Note that there is no overflow + * protection here so count values are only valid until the timer + * overflows. The guard against negative values is to protect + * against suspect run time stat counter implementations - which + * are provided by the application, not the kernel. */ + if( ulTotalRunTime > ulTaskSwitchedInTime ) + { + pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + ulTaskSwitchedInTime = ulTotalRunTime; + } + #endif /* configGENERATE_RUN_TIME_STATS */ + + /* Check for stack overflow, if configured. */ + taskCHECK_FOR_STACK_OVERFLOW(); + + /* Before the currently running task is switched out, save its errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + pxCurrentTCB->iTaskErrno = FreeRTOS_errno; + } + #endif + + /* Select a new task to run using either the generic C or port + * optimised asm code. */ + //@ close _taskISRLockInv_p(_); + //@ close taskISRLockInv_p(); + ( void ) prvSelectHighestPriorityTask( xCoreID ); + traceTASK_SWITCHED_IN(); + + /* After the new task is switched in, update the global errno. */ + #if ( configUSE_POSIX_ERRNO == 1 ) + { + FreeRTOS_errno = pxCurrentTCB->iTaskErrno; + } + #endif + + #if ( ( configUSE_NEWLIB_REENTRANT == 1 ) && ( configNEWLIB_REENTRANT_IS_DYNAMIC == 0 ) ) + { + /* Switch Newlib's _impure_ptr variable to point to the _reent + * structure specific to this task. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. + * + * Note: Updating the _impure_ptr is not required when Newlib is compiled with + * __DYNAMIC_REENT__ enabled. The the port should provide __getreent() instead. */ + _impure_ptr = &( pxCurrentTCB->xNewLib_reent ); + } + #endif /* ( configUSE_NEWLIB_REENTRANT == 1 ) && ( configNEWLIB_REENTRANT_IS_DYNAMIC == 0 ) */ + } + } + //@ consume_taskISRLockInv(); + portRELEASE_ISR_LOCK(); + portRELEASE_TASK_LOCK(); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnEventList( List_t * const pxEventList, + const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE + * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */ + + /* Place the event list item of the TCB in the appropriate event list. + * This is placed in the list in priority order so the highest priority task + * is the first to be woken by the event. The queue that contains the event + * list is locked, preventing simultaneous access from interrupts. */ + vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, + const TickType_t xItemValue, + const TickType_t xTicksToWait ) +{ + configASSERT( pxEventList ); + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event groups implementation. */ + configASSERT( uxSchedulerSuspended != 0 ); + + /* Store the item value in the event list item. It is safe to access the + * event list item here as interrupts won't access the event list item of a + * task that is not in the Blocked state. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Place the event list item of the TCB at the end of the appropriate event + * list. It is safe to access the event list here because it is part of an + * event group implementation - and interrupts don't access event groups + * directly (instead they access them indirectly by pending function calls to + * the task level). */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TIMERS == 1 ) + + void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, + TickType_t xTicksToWait, + const BaseType_t xWaitIndefinitely ) + { + configASSERT( pxEventList ); + + /* This function should not be called by application code hence the + * 'Restricted' in its name. It is not part of the public API. It is + * designed for use by kernel code, and has special calling requirements - + * it should be called with the scheduler suspended. */ + + + /* Place the event list item of the TCB in the appropriate event list. + * In this case it is assume that this is the only task that is going to + * be waiting on this event list, so the faster vListInsertEnd() function + * can be used in place of vListInsert. */ + vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) ); + + /* If the task should block indefinitely then set the block time to a + * value that will be recognised as an indefinite delay inside the + * prvAddCurrentTaskToDelayedList() function. */ + if( xWaitIndefinitely != pdFALSE ) + { + xTicksToWait = portMAX_DELAY; + } + + traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) ); + prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely ); + } + +#endif /* configUSE_TIMERS */ +/*-----------------------------------------------------------*/ + +BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList ) +{ + TCB_t * pxUnblockedTCB; + BaseType_t xReturn; + + /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be + * called from a critical section within an ISR. */ + + /* The event list is sorted in priority order, so the first in the list can + * be removed as it is known to be the highest priority. Remove the TCB from + * the delayed list, and add it to the ready list. + * + * If an event is for a queue that is locked then this function will never + * get called - the lock count on the queue will get modified instead. This + * means exclusive access to the event list is guaranteed here. + * + * This function assumes that a check has already been made to ensure that + * pxEventList is not empty. */ + pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + * might be set to the blocked task's time out time. If the task is + * unblocked for a reason other than a timeout xNextTaskUnblockTime is + * normally left unchanged, because it is automatically reset to a new + * value when the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter sleep mode + * at the earliest possible time - so reset xNextTaskUnblockTime here to + * ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + } + else + { + /* The delayed and ready lists cannot be accessed, so hold this task + * pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) ); + } + + xReturn = pdFALSE; + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxUnblockedTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] != pdFALSE ) + { + xReturn = pdTRUE; + } + #endif + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, + const TickType_t xItemValue ) +{ + TCB_t * pxUnblockedTCB; + + /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by + * the event flags implementation. */ + configASSERT( uxSchedulerSuspended != pdFALSE ); + + /* Store the new item value in the event list. */ + listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE ); + + /* Remove the event list form the event flag. Interrupts do not access + * event flags. */ + pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + configASSERT( pxUnblockedTCB ); + ( void ) uxListRemove( pxEventListItem ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked on a kernel object then xNextTaskUnblockTime + * might be set to the blocked task's time out time. If the task is + * unblocked for a reason other than a timeout xNextTaskUnblockTime is + * normally left unchanged, because it is automatically reset to a new + * value when the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter sleep mode + * at the earliest possible time - so reset xNextTaskUnblockTime here to + * ensure it is updated at the earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + /* Remove the task from the delayed list and add it to the ready list. The + * scheduler is suspended so interrupts will not be accessing the ready + * lists. */ + ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxUnblockedTCB ); + + #if ( configUSE_PREEMPTION == 1 ) + taskENTER_CRITICAL(); + { + prvYieldForTask( pxUnblockedTCB, pdFALSE ); + } + taskEXIT_CRITICAL(); + #endif +} +/*-----------------------------------------------------------*/ + +void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + configASSERT( pxTimeOut ); + taskENTER_CRITICAL(); + { + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; + } + taskEXIT_CRITICAL(); +} +/*-----------------------------------------------------------*/ + +void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut ) +{ + /* For internal use only as it does not use a critical section. */ + pxTimeOut->xOverflowCount = xNumOfOverflows; + pxTimeOut->xTimeOnEntering = xTickCount; +} +/*-----------------------------------------------------------*/ + +BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, + TickType_t * const pxTicksToWait ) +{ + BaseType_t xReturn; + + configASSERT( pxTimeOut ); + configASSERT( pxTicksToWait ); + + taskENTER_CRITICAL(); + { + /* Minor optimisation. The tick count cannot change in this block. */ + const TickType_t xConstTickCount = xTickCount; + const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering; + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE ) + { + /* The delay was aborted, which is not the same as a time out, + * but has the same result. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + xReturn = pdTRUE; + } + else + #endif + + #if ( INCLUDE_vTaskSuspend == 1 ) + if( *pxTicksToWait == portMAX_DELAY ) + { + /* If INCLUDE_vTaskSuspend is set to 1 and the block time + * specified is the maximum block time then the task should block + * indefinitely, and therefore never time out. */ + xReturn = pdFALSE; + } + else + #endif + + if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */ + { + /* The tick count is greater than the time at which + * vTaskSetTimeout() was called, but has also overflowed since + * vTaskSetTimeOut() was called. It must have wrapped all the way + * around and gone past again. This passed since vTaskSetTimeout() + * was called. */ + xReturn = pdTRUE; + *pxTicksToWait = ( TickType_t ) 0; + } + else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */ + { + /* Not a genuine timeout. Adjust parameters for time remaining. */ + *pxTicksToWait -= xElapsedTime; + vTaskInternalSetTimeOutState( pxTimeOut ); + xReturn = pdFALSE; + } + else + { + *pxTicksToWait = ( TickType_t ) 0; + xReturn = pdTRUE; + } + } + taskEXIT_CRITICAL(); + + return xReturn; +} +/*-----------------------------------------------------------*/ + +void vTaskMissedYield( void ) +{ + /* Must be called from within a critical section */ + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask ) + { + UBaseType_t uxReturn; + TCB_t const * pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + uxReturn = pxTCB->uxTaskNumber; + } + else + { + uxReturn = 0U; + } + + return uxReturn; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskSetTaskNumber( TaskHandle_t xTask, + const UBaseType_t uxHandle ) + { + TCB_t * pxTCB; + + if( xTask != NULL ) + { + pxTCB = xTask; + pxTCB->uxTaskNumber = uxHandle; + } + } + +#endif /* configUSE_TRACE_FACILITY */ + +/* + * ----------------------------------------------------------- + * The MinimalIdle task. + * ---------------------------------------------------------- + * + * The minimal idle task is used for all the additional Cores in a SMP system. + * There must be only 1 idle task and the rest are minimal idle tasks. + * + * @todo additional conditional compiles to remove this function. + */ + +#if ( configNUM_CORES > 1 ) + static portTASK_FUNCTION( prvMinimalIdleTask, pvParameters ) + { + taskYIELD(); + + for( ; ; ) + { + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + { + extern void vApplicationMinimalIdleHook( void ); + + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* configUSE_MINIMAL_IDLE_HOOK */ + } + } +#endif /* if ( configNUM_CORES > 1 ) */ + +/* + * ----------------------------------------------------------- + * The Idle task. + * ---------------------------------------------------------- + * + * + */ +static portTASK_FUNCTION( prvIdleTask, pvParameters ) +{ + /* Stop warnings. */ + ( void ) pvParameters; + + /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE + * SCHEDULER IS STARTED. **/ + + /* In case a task that has a secure context deletes itself, in which case + * the idle task is responsible for deleting the task's secure context, if + * any. */ + portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE ); + + /* All cores start up in the idle task. This initial yield gets the application + * tasks started. */ + taskYIELD(); + + for( ; ; ) + { + /* See if any tasks have deleted themselves - if so then the idle task + * is responsible for freeing the deleted task's TCB and stack. */ + prvCheckTasksWaitingTermination(); + + #if ( configUSE_PREEMPTION == 0 ) + { + /* If we are not using preemption we keep forcing a task switch to + * see if any other task has become available. If we are using + * preemption we don't need to do this as any task becoming available + * will automatically get the processor anyway. */ + taskYIELD(); + } + #endif /* configUSE_PREEMPTION */ + + #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) + { + /* When using preemption tasks of equal priority will be + * timesliced. If a task that is sharing the idle priority is ready + * to run then the idle task should yield before the end of the + * timeslice. + * + * A critical region is not required here as we are just reading from + * the list, and an occasional incorrect value will not matter. If + * the ready list at the idle priority contains one more task than the + * number of idle tasks, which is equal to the configured numbers of cores + * then a task other than the idle task is ready to execute. */ + if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) configNUM_CORES ) + { + taskYIELD(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */ + + #if ( configUSE_IDLE_HOOK == 1 ) + { + extern void vApplicationIdleHook( void ); + + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationIdleHook(); + } + #endif /* configUSE_IDLE_HOOK */ + + /* This conditional compilation should use inequality to 0, not equality + * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when + * user defined low power mode implementations require + * configUSE_TICKLESS_IDLE to be set to a value other than 1. */ + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + TickType_t xExpectedIdleTime; + + /* It is not desirable to suspend then resume the scheduler on + * each iteration of the idle task. Therefore, a preliminary + * test of the expected idle time is performed without the + * scheduler suspended. The result here is not necessarily + * valid. */ + xExpectedIdleTime = prvGetExpectedIdleTime(); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + vTaskSuspendAll(); + { + /* Now the scheduler is suspended, the expected idle + * time can be sampled again, and this time its value can + * be used. */ + configASSERT( xNextTaskUnblockTime >= xTickCount ); + xExpectedIdleTime = prvGetExpectedIdleTime(); + + /* Define the following macro to set xExpectedIdleTime to 0 + * if the application does not want + * portSUPPRESS_TICKS_AND_SLEEP() to be called. */ + configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime ); + + if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP ) + { + traceLOW_POWER_IDLE_BEGIN(); + portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime ); + traceLOW_POWER_IDLE_END(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + ( void ) xTaskResumeAll(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configUSE_TICKLESS_IDLE */ + + #if ( configUSE_MINIMAL_IDLE_HOOK == 1 ) + { + extern void vApplicationMinimalIdleHook( void ); + + /* Call the user defined function from within the idle task. This + * allows the application designer to add background functionality + * without the overhead of a separate task. + * + * This hook is intended to manage core activity such as disabling cores that go idle. + * + * NOTE: vApplicationMinimalIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES, + * CALL A FUNCTION THAT MIGHT BLOCK. */ + vApplicationMinimalIdleHook(); + } + #endif /* configUSE_MINIMAL_IDLE_HOOK */ + } +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TICKLESS_IDLE != 0 ) + + eSleepModeStatus eTaskConfirmSleepModeStatus( void ) + { + /* The idle task exists in addition to the application tasks. */ + const UBaseType_t uxNonApplicationTasks = 1; + eSleepModeStatus eReturn = eStandardSleep; + + /* This function must be called from a critical section. */ + + if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 ) + { + /* A task was made ready while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xYieldPending != pdFALSE ) + { + /* A yield was pended while the scheduler was suspended. */ + eReturn = eAbortSleep; + } + else if( xPendedTicks != 0 ) + { + /* A tick interrupt has already occurred but was held pending + * because the scheduler is suspended. */ + eReturn = eAbortSleep; + } + else + { + /* If all the tasks are in the suspended list (which might mean they + * have an infinite block time rather than actually being suspended) + * then it is safe to turn all clocks off and just wait for external + * interrupts. */ + if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) ) + { + eReturn = eNoTasksWaitingTimeout; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + return eReturn; + } + +#endif /* configUSE_TICKLESS_IDLE */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, + BaseType_t xIndex, + void * pvValue ) + { + TCB_t * pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToSet ); + configASSERT( pxTCB != NULL ); + pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue; + } + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 ) + + void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, + BaseType_t xIndex ) + { + void * pvReturn = NULL; + TCB_t * pxTCB; + + if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) + { + pxTCB = prvGetTCBFromHandle( xTaskToQuery ); + pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ]; + } + else + { + pvReturn = NULL; + } + + return pvReturn; + } + +#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */ +/*-----------------------------------------------------------*/ + +#if ( portUSING_MPU_WRAPPERS == 1 ) + + void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, + const MemoryRegion_t * const xRegions ) + { + TCB_t * pxTCB; + + /* If null is passed in here then we are modifying the MPU settings of + * the calling task. */ + pxTCB = prvGetTCBFromHandle( xTaskToModify ); + + vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 ); + } + +#endif /* portUSING_MPU_WRAPPERS */ +/*-----------------------------------------------------------*/ + +static void prvInitialiseTaskLists( void ) +{ + UBaseType_t uxPriority; + + for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ ) + { + vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) ); + } + + vListInitialise( &xDelayedTaskList1 ); + vListInitialise( &xDelayedTaskList2 ); + vListInitialise( &xPendingReadyList ); + + #if ( INCLUDE_vTaskDelete == 1 ) + { + vListInitialise( &xTasksWaitingTermination ); + } + #endif /* INCLUDE_vTaskDelete */ + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + vListInitialise( &xSuspendedTaskList ); + } + #endif /* INCLUDE_vTaskSuspend */ + + /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList + * using list2. */ + pxDelayedTaskList = &xDelayedTaskList1; + pxOverflowDelayedTaskList = &xDelayedTaskList2; +} +/*-----------------------------------------------------------*/ + +static void prvCheckTasksWaitingTermination( void ) +{ + /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/ + + #if ( INCLUDE_vTaskDelete == 1 ) + { + TCB_t * pxTCB; + + /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL() + * being called too often in the idle task. */ + while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + taskENTER_CRITICAL(); + { + /* Since we are SMP, multiple idles can be running simultaneously + * and we need to check that other idles did not cleanup while we were + * waiting to enter the critical section */ + if( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U ) + { + pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + if( pxTCB->xTaskRunState == taskTASK_NOT_RUNNING ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + --uxCurrentNumberOfTasks; + --uxDeletedTasksWaitingCleanUp; + prvDeleteTCB( pxTCB ); + } + else + { + /* The TCB to be deleted still has not yet been switched out + * by the scheduler, so we will just exit this loop early and + * try again next time. */ + taskEXIT_CRITICAL(); + break; + } + } + } + taskEXIT_CRITICAL(); + } + } + #endif /* INCLUDE_vTaskDelete */ +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + void vTaskGetInfo( TaskHandle_t xTask, + TaskStatus_t * pxTaskStatus, + BaseType_t xGetFreeStackSpace, + eTaskState eState ) + { + TCB_t * pxTCB; + + /* xTask is NULL then get the state of the calling task. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB; + pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] ); + pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority; + pxTaskStatus->pxStackBase = pxTCB->pxStack; + pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber; + + #if ( configUSE_MUTEXES == 1 ) + { + pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority; + } + #else + { + pxTaskStatus->uxBasePriority = 0; + } + #endif + + #if ( configGENERATE_RUN_TIME_STATS == 1 ) + { + pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter; + } + #else + { + pxTaskStatus->ulRunTimeCounter = 0; + } + #endif + + /* Obtaining the task state is a little fiddly, so is only done if the + * value of eState passed into this function is eInvalid - otherwise the + * state is just set to whatever is passed in. */ + if( eState != eInvalid ) + { + if( taskTASK_IS_RUNNING( pxTCB->xTaskRunState ) ) + { + pxTaskStatus->eCurrentState = eRunning; + } + else + { + pxTaskStatus->eCurrentState = eState; + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + /* If the task is in the suspended list then there is a + * chance it is actually just blocked indefinitely - so really + * it should be reported as being in the Blocked state. */ + if( eState == eSuspended ) + { + vTaskSuspendAll(); + { + if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL ) + { + pxTaskStatus->eCurrentState = eBlocked; + } + } + ( void ) xTaskResumeAll(); + } + } + #endif /* INCLUDE_vTaskSuspend */ + } + } + else + { + pxTaskStatus->eCurrentState = eTaskGetState( pxTCB ); + } + + /* Obtaining the stack space takes some time, so the xGetFreeStackSpace + * parameter is provided to allow it to be skipped. */ + if( xGetFreeStackSpace != pdFALSE ) + { + #if ( portSTACK_GROWTH > 0 ) + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack ); + } + #else + { + pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack ); + } + #endif + } + else + { + pxTaskStatus->usStackHighWaterMark = 0; + } + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TRACE_FACILITY == 1 ) + + static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray, + List_t * pxList, + eTaskState eState ) + { + #ifdef VERIFAST + /* Reason for rewrite: + * VeriFast does not support multiple pointer declarations to + * user-defined types in single statement + * (i.e., `A p1, p2;` is ok, `A *p1, *p2;` fails) + */ + configLIST_VOLATILE TCB_t * pxNextTCB; + configLIST_VOLATILE TCB_t * pxFirstTCB; + #else + configLIST_VOLATILE TCB_t * pxNextTCB, * pxFirstTCB; + #endif /* VERIFAST */ + UBaseType_t uxTask = 0; + + if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 ) + { + listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + + /* Populate an TaskStatus_t structure within the + * pxTaskStatusArray array for each task that is referenced from + * pxList. See the definition of TaskStatus_t in task.h for the + * meaning of each TaskStatus_t structure member. */ + do + { + listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */ + vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState ); + uxTask++; + } while( pxNextTCB != pxFirstTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return uxTask; + } + +#endif /* configUSE_TRACE_FACILITY */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) + + static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) + { + uint32_t ulCount = 0U; + + while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE ) + { + pucStackByte -= portSTACK_GROWTH; + ulCount++; + } + + ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */ + + return ( configSTACK_DEPTH_TYPE ) ulCount; + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) + +/* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the + * same except for their return type. Using configSTACK_DEPTH_TYPE allows the + * user to determine the return type. It gets around the problem of the value + * overflowing on 8-bit types without breaking backward compatibility for + * applications that expect an 8-bit return type. */ + configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + uint8_t * pucEndOfStack; + configSTACK_DEPTH_TYPE uxReturn; + + /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are + * the same except for their return type. Using configSTACK_DEPTH_TYPE + * allows the user to determine the return type. It gets around the + * problem of the value overflowing on 8-bit types without breaking + * backward compatibility for applications that expect an 8-bit return + * type. */ + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) + + UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask ) + { + TCB_t * pxTCB; + uint8_t * pucEndOfStack; + UBaseType_t uxReturn; + + pxTCB = prvGetTCBFromHandle( xTask ); + + #if portSTACK_GROWTH < 0 + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxStack; + } + #else + { + pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack; + } + #endif + + uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack ); + + return uxReturn; + } + +#endif /* INCLUDE_uxTaskGetStackHighWaterMark */ +/*-----------------------------------------------------------*/ + +#if ( INCLUDE_vTaskDelete == 1 ) + + static void prvDeleteTCB( TCB_t * pxTCB ) + { + /* This call is required specifically for the TriCore port. It must be + * above the vPortFree() calls. The call is also used by ports/demos that + * want to allocate and clean RAM statically. */ + portCLEAN_UP_TCB( pxTCB ); + + /* Free up the memory allocated by the scheduler for the task. It is up + * to the task to free any memory allocated at the application level. + * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html + * for additional information. */ + #if ( configUSE_NEWLIB_REENTRANT == 1 ) + { + _reclaim_reent( &( pxTCB->xNewLib_reent ) ); + } + #endif /* configUSE_NEWLIB_REENTRANT */ + + #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) ) + { + /* The task can only have been allocated dynamically - free both + * the stack and TCB. */ + vPortFreeStack( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */ + { + /* The task could have been allocated statically or dynamically, so + * check what was statically allocated before trying to free the + * memory. */ + if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ) + { + /* Both the stack and TCB were allocated dynamically, so both + * must be freed. */ + vPortFreeStack( pxTCB->pxStack ); + vPortFree( pxTCB ); + } + else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY ) + { + /* Only the stack was statically allocated, so the TCB is the + * only memory that must be freed. */ + vPortFree( pxTCB ); + } + else + { + /* Neither the stack nor the TCB were allocated dynamically, so + * nothing needs to be freed. */ + configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB ); + mtCOVERAGE_TEST_MARKER(); + } + } + #endif /* configSUPPORT_DYNAMIC_ALLOCATION */ + } + +#endif /* INCLUDE_vTaskDelete */ +/*-----------------------------------------------------------*/ + +static void prvResetNextTaskUnblockTime( void ) +{ + if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE ) + { + /* The new current delayed list is empty. Set xNextTaskUnblockTime to + * the maximum possible value so it is extremely unlikely that the + * if( xTickCount >= xNextTaskUnblockTime ) test will pass until + * there is an item in the delayed list. */ + xNextTaskUnblockTime = portMAX_DELAY; + } + else + { + /* The new current delayed list is not empty, get the value of + * the item at the head of the delayed list. This is the time at + * which the task at the head of the delayed list should be removed + * from the Blocked state. */ + xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList ); + } +} +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) + + TaskHandle_t xTaskGetCurrentTaskHandle( void ) + /*@ requires interruptState_p(coreID_f(), ?state) &*& + pointer(&pxCurrentTCBs[coreID_f], ?taskHandle); + @*/ + /*@ ensures interruptState_p(coreID_f(), state) &*& + pointer(&pxCurrentTCBs[coreID_f], taskHandle) &*& + result == taskHandle; + @*/ + { + TaskHandle_t xReturn; + uint32_t ulState; + + ulState = portDISABLE_INTERRUPTS(); + xReturn = pxCurrentTCBs[ portGET_CORE_ID() ]; + portRESTORE_INTERRUPTS( ulState ); + + return xReturn; + } + + TaskHandle_t xTaskGetCurrentTaskHandleCPU( UBaseType_t xCoreID ) + { + TaskHandle_t xReturn = NULL; + + if( taskVALID_CORE_ID( xCoreID ) != pdFALSE ) + { + xReturn = pxCurrentTCBs[ xCoreID ]; + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) + + BaseType_t xTaskGetSchedulerState( void ) + { + BaseType_t xReturn; + + if( xSchedulerRunning == pdFALSE ) + { + xReturn = taskSCHEDULER_NOT_STARTED; + } + else + { + taskENTER_CRITICAL(); + { + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + xReturn = taskSCHEDULER_RUNNING; + } + else + { + xReturn = taskSCHEDULER_SUSPENDED; + } + } + taskEXIT_CRITICAL(); + } + + return xReturn; + } + +#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxMutexHolderTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + /* If the mutex was given back by an interrupt while the queue was + * locked then the mutex holder might now be NULL. _RB_ Is this still + * needed as interrupts can no longer use mutexes? */ + if( pxMutexHolder != NULL ) + { + /* If the holder of the mutex has a priority below the priority of + * the task attempting to obtain the mutex then it will temporarily + * inherit the priority of the task attempting to obtain the mutex. */ + if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority ) + { + /* Adjust the mutex holder state to account for its new + * priority. Only reset the event list item value if the value is + * not being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the task being modified is in the ready state it will need + * to be moved into a new list. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Inherit the priority before being moved into the new list. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + prvAddTaskToReadyList( pxMutexHolderTCB ); + } + else + { + /* Just inherit the priority. */ + pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority; + } + + traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority ); + + /* Inheritance occurred. */ + xReturn = pdTRUE; + } + else + { + if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority ) + { + /* The base priority of the mutex holder is lower than the + * priority of the task attempting to take the mutex, but the + * current priority of the mutex holder is not lower than the + * priority of the task attempting to take the mutex. + * Therefore the mutex holder must have already inherited a + * priority, but inheritance would have occurred if that had + * not been the case. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder ) + { + TCB_t * const pxTCB = pxMutexHolder; + BaseType_t xReturn = pdFALSE; + + if( pxMutexHolder != NULL ) + { + /* A task can only have an inherited priority if it holds the mutex. + * If the mutex is held by a task then it cannot be given from an + * interrupt, and if a mutex is given by the holding task then it must + * be the running state task. */ + configASSERT( pxTCB == pxCurrentTCB ); + configASSERT( pxTCB->uxMutexesHeld ); + ( pxTCB->uxMutexesHeld )--; + + /* Has the holder of the mutex inherited the priority of another + * task? */ + if( pxTCB->uxPriority != pxTCB->uxBasePriority ) + { + /* Only disinherit if no other mutexes are held. */ + if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 ) + { + /* A task can only have an inherited priority if it holds + * the mutex. If the mutex is held by a task then it cannot be + * given from an interrupt, and if a mutex is given by the + * holding task then it must be the running state task. Remove + * the holding task from the ready list. */ + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Disinherit the priority before adding the task into the + * new ready list. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority ); + pxTCB->uxPriority = pxTCB->uxBasePriority; + + /* Reset the event list item value. It cannot be in use for + * any other purpose if this task is running, and it must be + * running to give back the mutex. */ + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + prvAddTaskToReadyList( pxTCB ); + + /* Return true to indicate that a context switch is required. + * This is only actually required in the corner case whereby + * multiple mutexes were held and the mutexes were given back + * in an order different to that in which they were taken. + * If a context switch did not occur when the first mutex was + * returned, even if a task was waiting on it, then a context + * switch should occur when the last mutex is returned whether + * a task is waiting on it or not. */ + xReturn = pdTRUE; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + return xReturn; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, + UBaseType_t uxHighestPriorityWaitingTask ) + { + TCB_t * const pxTCB = pxMutexHolder; + UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse; + const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1; + + if( pxMutexHolder != NULL ) + { + /* If pxMutexHolder is not NULL then the holder must hold at least + * one mutex. */ + configASSERT( pxTCB->uxMutexesHeld ); + + /* Determine the priority to which the priority of the task that + * holds the mutex should be set. This will be the greater of the + * holding task's base priority and the priority of the highest + * priority task that is waiting to obtain the mutex. */ + if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask ) + { + uxPriorityToUse = uxHighestPriorityWaitingTask; + } + else + { + uxPriorityToUse = pxTCB->uxBasePriority; + } + + /* Does the priority need to change? */ + if( pxTCB->uxPriority != uxPriorityToUse ) + { + /* Only disinherit if no other mutexes are held. This is a + * simplification in the priority inheritance implementation. If + * the task that holds the mutex is also holding other mutexes then + * the other mutexes may have caused the priority inheritance. */ + if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld ) + { + /* If a task has timed out because it already holds the + * mutex it was trying to obtain then it cannot of inherited + * its own priority. */ + configASSERT( pxTCB != pxCurrentTCB ); + + /* Disinherit the priority, remembering the previous + * priority to facilitate determining the subject task's + * state. */ + traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse ); + uxPriorityUsedOnEntry = pxTCB->uxPriority; + pxTCB->uxPriority = uxPriorityToUse; + + /* Only reset the event list item value if the value is not + * being used for anything else. */ + if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL ) + { + listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* If the running task is not the task that holds the mutex + * then the task that holds the mutex could be in either the + * Ready, Blocked or Suspended states. Only remove the task + * from its current state list if it is in the Ready state as + * the task's priority is going to change and there is one + * Ready list per priority. */ + if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE ) + { + if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* It is known that the task is in its ready list so + * there is no need to check again and the port level + * reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + prvAddTaskToReadyList( pxTCB ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +/* + * If not in a critical section then yield immediately. + * Otherwise set xYieldPending to true to wait to + * yield until exiting the critical section. + */ +void vTaskYieldWithinAPI( void ) +{ + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portYIELD(); + } + else + { + xYieldPendings[ portGET_CORE_ID() ] = pdTRUE; + } +} +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskEnterCritical( void ) + { + portDISABLE_INTERRUPTS(); + + if( xSchedulerRunning != pdFALSE ) + { + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + portGET_TASK_LOCK(); + } + + portGET_ISR_LOCK(); + } + + ( pxCurrentTCB->uxCriticalNesting )++; + + /* This should now be interrupt safe. The only time there would be + * a problem is if this is called before a context switch and + * vTaskExitCritical() is called after pxCurrentTCB changes. Therefore + * this should not be used within vTaskSwitchContext(). */ + + if( ( uxSchedulerSuspended == 0U ) && ( pxCurrentTCB->uxCriticalNesting == 1U ) ) + { + prvCheckForRunStateChange(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( portCRITICAL_NESTING_IN_TCB == 1 ) + + void vTaskExitCritical( void ) + { + if( xSchedulerRunning != pdFALSE ) + { + /* If pxCurrentTCB->uxCriticalNesting is zero then this function + * does not match a previous call to vTaskEnterCritical(). */ + configASSERT( pxCurrentTCB->uxCriticalNesting > 0U ); + + if( pxCurrentTCB->uxCriticalNesting > 0U ) + { + ( pxCurrentTCB->uxCriticalNesting )--; + + if( pxCurrentTCB->uxCriticalNesting == 0U ) + { + portRELEASE_ISR_LOCK(); + + if( portCHECK_IF_IN_ISR() == pdFALSE ) + { + portRELEASE_TASK_LOCK(); + portENABLE_INTERRUPTS(); + + /* When a task yields in a critical section it just sets + * xYieldPending to true. So now that we have exited the + * critical section check if xYieldPending is true, and + * if so yield. */ + if( xYieldPending != pdFALSE ) + { + portYIELD(); + } + } + else + { + /* In an ISR we don't hold the task lock and don't + * need to yield. Yield will happen if necessary when + * the application ISR calls portEND_SWITCHING_ISR() */ + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* portCRITICAL_NESTING_IN_TCB */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) + + static char * prvWriteNameToBuffer( char * pcBuffer, + const char * pcTaskName ) + { + size_t x; + + /* Start by copying the entire string. */ + strcpy( pcBuffer, pcTaskName ); + + /* Pad the end of the string with spaces to ensure columns line up when + * printed out. */ + for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ ) + { + pcBuffer[ x ] = ' '; + } + + /* Terminate. */ + pcBuffer[ x ] = ( char ) 0x00; + + /* Return the new end of string. */ + return &( pcBuffer[ x ] ); + } + +#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */ +/*-----------------------------------------------------------*/ + +#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskList( char * pcWriteBuffer ) + { + TaskStatus_t * pxTaskStatusArray; + UBaseType_t uxArraySize, x; + char cStatus; + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskList() calls uxTaskGetSystemState(), then formats part of the + * uxTaskGetSystemState() output into a human readable table that + * displays task: names, states, priority, stack usage and task number. + * Stack usage specified as the number of unused StackType_t words stack can hold + * on top of stack - not the number of bytes. + * + * vTaskList() has a dependency on the sprintf() C library function that + * might bloat the code size, use a lot of stack, and provide different + * results on different platforms. An alternative, tiny, third party, + * and limited functionality implementation of sprintf() is provided in + * many of the FreeRTOS/Demo sub-directories in a file called + * printf-stdarg.c (note printf-stdarg.c does not provide a full + * snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskList(). + */ + + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + * function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! if + * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + * equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL ); + + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + switch( pxTaskStatusArray[ x ].eCurrentState ) + { + case eRunning: + cStatus = tskRUNNING_CHAR; + break; + + case eReady: + cStatus = tskREADY_CHAR; + break; + + case eBlocked: + cStatus = tskBLOCKED_CHAR; + break; + + case eSuspended: + cStatus = tskSUSPENDED_CHAR; + break; + + case eDeleted: + cStatus = tskDELETED_CHAR; + break; + + case eInvalid: /* Fall through. */ + default: /* Should not get here, but it is included + * to prevent static checking errors. */ + cStatus = ( char ) 0x00; + break; + } + + /* Write the task name to the string, padding with spaces so it + * can be printed in tabular form more easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + /* Write the rest of the string. */ + sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + * is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */ +/*----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) + + void vTaskGetRunTimeStats( char * pcWriteBuffer ) + { + TaskStatus_t * pxTaskStatusArray; + UBaseType_t uxArraySize, x; + uint32_t ulTotalTime, ulStatsAsPercentage; + + #if ( configUSE_TRACE_FACILITY != 1 ) + { + #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats(). + } + #endif + + /* + * PLEASE NOTE: + * + * This function is provided for convenience only, and is used by many + * of the demo applications. Do not consider it to be part of the + * scheduler. + * + * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part + * of the uxTaskGetSystemState() output into a human readable table that + * displays the amount of time each task has spent in the Running state + * in both absolute and percentage terms. + * + * vTaskGetRunTimeStats() has a dependency on the sprintf() C library + * function that might bloat the code size, use a lot of stack, and + * provide different results on different platforms. An alternative, + * tiny, third party, and limited functionality implementation of + * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in + * a file called printf-stdarg.c (note printf-stdarg.c does not provide + * a full snprintf() implementation!). + * + * It is recommended that production systems call uxTaskGetSystemState() + * directly to get access to raw stats data, rather than indirectly + * through a call to vTaskGetRunTimeStats(). + */ + + /* Make sure the write buffer does not contain a string. */ + *pcWriteBuffer = ( char ) 0x00; + + /* Take a snapshot of the number of tasks in case it changes while this + * function is executing. */ + uxArraySize = uxCurrentNumberOfTasks; + + /* Allocate an array index for each task. NOTE! If + * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will + * equate to NULL. */ + pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */ + + if( pxTaskStatusArray != NULL ) + { + /* Generate the (binary) data. */ + uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime ); + + /* For percentage calculations. */ + ulTotalTime /= 100UL; + + /* Avoid divide by zero errors. */ + if( ulTotalTime > 0UL ) + { + /* Create a human readable table from the binary data. */ + for( x = 0; x < uxArraySize; x++ ) + { + /* What percentage of the total run time has the task used? + * This will always be rounded down to the nearest integer. + * ulTotalRunTimeDiv100 has already been divided by 100. */ + ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime; + + /* Write the task name to the string, padding with + * spaces so it can be printed in tabular form more + * easily. */ + pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName ); + + if( ulStatsAsPercentage > 0UL ) + { + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + * printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + else + { + /* If the percentage is zero here then the task has + * consumed less than 1% of the total run time. */ + #ifdef portLU_PRINTF_SPECIFIER_REQUIRED + { + sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter ); + } + #else + { + /* sizeof( int ) == sizeof( long ) so a smaller + * printf() library can be used. */ + sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */ + } + #endif + } + + pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */ + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION + * is 0 then vPortFree() will be #defined to nothing. */ + vPortFree( pxTaskStatusArray ); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + +#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */ +/*-----------------------------------------------------------*/ + +TickType_t uxTaskResetEventItemValue( void ) +{ + TickType_t uxReturn; + + uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) ); + + /* Reset the event list item to its normal value - so it can be used with + * queues and semaphores. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */ + + return uxReturn; +} +/*-----------------------------------------------------------*/ + +#if ( configUSE_MUTEXES == 1 ) + + TaskHandle_t pvTaskIncrementMutexHeldCount( void ) + { + /* If xSemaphoreCreateMutex() is called before any tasks have been created + * then pxCurrentTCB will be NULL. */ + if( pxCurrentTCB != NULL ) + { + ( pxCurrentTCB->uxMutexesHeld )++; + } + + return pxCurrentTCB; + } + +#endif /* configUSE_MUTEXES */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait, + BaseType_t xClearCountOnExit, + TickType_t xTicksToWait ) + { + uint32_t ulReturn; + + configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + + taskENTER_CRITICAL(); + { + /* Only block if the notification count is not already non-zero. */ + if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL ) + { + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait ); + + /* All ports are written to allow a yield in a critical + * section (some will yield immediately, others wait until the + * critical section exits) - but it is not something that + * application code should ever do. */ + vTaskYieldWithinAPI(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_TAKE( uxIndexToWait ); + ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + + if( ulReturn != 0UL ) + { + if( xClearCountOnExit != pdFALSE ) + { + pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL; + } + else + { + pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1; + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait, + uint32_t ulBitsToClearOnEntry, + uint32_t ulBitsToClearOnExit, + uint32_t * pulNotificationValue, + TickType_t xTicksToWait ) + { + BaseType_t xReturn; + + configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + + taskENTER_CRITICAL(); + { + /* Only block if a notification is not already pending. */ + if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + { + /* Clear bits in the task's notification value as bits may get + * set by the notifying task or interrupt. This can be used to + * clear the value to zero. */ + pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry; + + /* Mark this task as waiting for a notification. */ + pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION; + + if( xTicksToWait > ( TickType_t ) 0 ) + { + prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE ); + traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait ); + + /* All ports are written to allow a yield in a critical + * section (some will yield immediately, others wait until the + * critical section exits) - but it is not something that + * application code should ever do. */ + vTaskYieldWithinAPI(); + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + taskENTER_CRITICAL(); + { + traceTASK_NOTIFY_WAIT( uxIndexToWait ); + + if( pulNotificationValue != NULL ) + { + /* Output the current notification value, which may or may not + * have changed. */ + *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ]; + } + + /* If ucNotifyValue is set then either the task never entered the + * blocked state (because a notification was already pending) or the + * task unblocked because of a notification. Otherwise the task + * unblocked because of a timeout. */ + if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED ) + { + /* A notification was not received. */ + xReturn = pdFALSE; + } + else + { + /* A notification was already pending or a notification was + * received while the task was waiting. */ + pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit; + xReturn = pdTRUE; + } + + pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION; + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue ) + { + TCB_t * pxTCB; + BaseType_t xReturn = pdPASS; + uint8_t ucOriginalNotifyState; + + configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + configASSERT( xTaskToNotify ); + pxTCB = xTaskToNotify; + + taskENTER_CRITICAL(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ]; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; + + pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits: + pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue; + break; + + case eIncrement: + ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++; + break; + + case eSetValueWithOverwrite: + pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue; + break; + + case eSetValueWithoutOverwrite: + + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + + break; + + case eNoAction: + + /* The task is being notified without its notify value being + * updated. */ + break; + + default: + + /* Should not get here if all enums are handled. + * Artificially force an assert by testing a value the + * compiler can't assume is const. */ + configASSERT( xTickCount == ( TickType_t ) 0 ); + + break; + } + + traceTASK_NOTIFY( uxIndexToNotify ); + + /* If the task is in the blocked state specifically to wait for a + * notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + #if ( configUSE_TICKLESS_IDLE != 0 ) + { + /* If a task is blocked waiting for a notification then + * xNextTaskUnblockTime might be set to the blocked task's time + * out time. If the task is unblocked for a reason other than + * a timeout xNextTaskUnblockTime is normally left unchanged, + * because it will automatically get reset to a new value when + * the tick count equals xNextTaskUnblockTime. However if + * tickless idling is used it might be more important to enter + * sleep mode at the earliest possible time - so reset + * xNextTaskUnblockTime here to ensure it is updated at the + * earliest possible time. */ + prvResetNextTaskUnblockTime(); + } + #endif + + #if ( configUSE_PREEMPTION == 1 ) + { + prvYieldForTask( pxTCB, pdFALSE ); + } + #endif + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + uint32_t ulValue, + eNotifyAction eAction, + uint32_t * pulPreviousNotificationValue, + BaseType_t * pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + BaseType_t xReturn = pdPASS; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + + /* RTOS ports that support interrupt nesting have the concept of a + * maximum system call (or maximum API call) interrupt priority. + * Interrupts that are above the maximum system call priority are keep + * permanently enabled, even when the RTOS kernel is in a critical section, + * but cannot make any calls to FreeRTOS API functions. If configASSERT() + * is defined in FreeRTOSConfig.h then + * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has + * been assigned a priority above the configured maximum system call + * priority. Only FreeRTOS functions that end in FromISR can be called + * from interrupts that have been assigned a priority at or (logically) + * below the maximum system call interrupt priority. FreeRTOS maintains a + * separate interrupt safe API to ensure interrupt entry is as fast and as + * simple as possible. More information (albeit Cortex-M specific) is + * provided on the following link: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + if( pulPreviousNotificationValue != NULL ) + { + *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ]; + } + + ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; + pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; + + switch( eAction ) + { + case eSetBits: + pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue; + break; + + case eIncrement: + ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++; + break; + + case eSetValueWithOverwrite: + pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue; + break; + + case eSetValueWithoutOverwrite: + + if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED ) + { + pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue; + } + else + { + /* The value could not be written to the task. */ + xReturn = pdFAIL; + } + + break; + + case eNoAction: + + /* The task is being notified without its notify value being + * updated. */ + break; + + default: + + /* Should not get here if all enums are handled. + * Artificially force an assert by testing a value the + * compiler can't assume is const. */ + configASSERT( xTickCount == ( TickType_t ) 0 ); + break; + } + + traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify ); + + /* If the task is in the blocked state specifically to wait for a + * notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + * this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + #endif + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify, + UBaseType_t uxIndexToNotify, + BaseType_t * pxHigherPriorityTaskWoken ) + { + TCB_t * pxTCB; + uint8_t ucOriginalNotifyState; + UBaseType_t uxSavedInterruptStatus; + + configASSERT( xTaskToNotify ); + configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + + /* RTOS ports that support interrupt nesting have the concept of a + * maximum system call (or maximum API call) interrupt priority. + * Interrupts that are above the maximum system call priority are keep + * permanently enabled, even when the RTOS kernel is in a critical section, + * but cannot make any calls to FreeRTOS API functions. If configASSERT() + * is defined in FreeRTOSConfig.h then + * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion + * failure if a FreeRTOS API function is called from an interrupt that has + * been assigned a priority above the configured maximum system call + * priority. Only FreeRTOS functions that end in FromISR can be called + * from interrupts that have been assigned a priority at or (logically) + * below the maximum system call interrupt priority. FreeRTOS maintains a + * separate interrupt safe API to ensure interrupt entry is as fast and as + * simple as possible. More information (albeit Cortex-M specific) is + * provided on the following link: + * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */ + portASSERT_IF_INTERRUPT_PRIORITY_INVALID(); + + pxTCB = xTaskToNotify; + + uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR(); + { + ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ]; + pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED; + + /* 'Giving' is equivalent to incrementing a count in a counting + * semaphore. */ + ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++; + + traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify ); + + /* If the task is in the blocked state specifically to wait for a + * notification then unblock it now. */ + if( ucOriginalNotifyState == taskWAITING_NOTIFICATION ) + { + /* The task should not have been on an event list. */ + configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL ); + + if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE ) + { + ( void ) uxListRemove( &( pxTCB->xStateListItem ) ); + prvAddTaskToReadyList( pxTCB ); + } + else + { + /* The delayed and ready lists cannot be accessed, so hold + * this task pending until the scheduler is resumed. */ + vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) ); + } + + #if ( configUSE_PREEMPTION == 1 ) + prvYieldForTask( pxTCB, pdFALSE ); + + if( xYieldPendings[ portGET_CORE_ID() ] == pdTRUE ) + { + if( pxHigherPriorityTaskWoken != NULL ) + { + *pxHigherPriorityTaskWoken = pdTRUE; + } + } + #endif + } + } + portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear ) + { + TCB_t * pxTCB; + BaseType_t xReturn; + + configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES ); + + /* If null is passed in here then it is the calling task that is having + * its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED ) + { + pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION; + xReturn = pdPASS; + } + else + { + xReturn = pdFAIL; + } + } + taskEXIT_CRITICAL(); + + return xReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( configUSE_TASK_NOTIFICATIONS == 1 ) + + uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask, + UBaseType_t uxIndexToClear, + uint32_t ulBitsToClear ) + { + TCB_t * pxTCB; + uint32_t ulReturn; + + /* If null is passed in here then it is the calling task that is having + * its notification state cleared. */ + pxTCB = prvGetTCBFromHandle( xTask ); + + taskENTER_CRITICAL(); + { + /* Return the notification as it was before the bits were cleared, + * then clear the bit mask. */ + ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ]; + pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear; + } + taskEXIT_CRITICAL(); + + return ulReturn; + } + +#endif /* configUSE_TASK_NOTIFICATIONS */ +/*-----------------------------------------------------------*/ + +#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) + + uint32_t ulTaskGetIdleRunTimeCounter( void ) + { + uint32_t ulReturn = 0; + + for( BaseType_t i = 0; i < configNUM_CORES; i++ ) + { + ulReturn += xIdleTaskHandle[ i ]->ulRunTimeCounter; + } + + return ulReturn; + } + +#endif /* if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) ) */ +/*-----------------------------------------------------------*/ + +static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, + const BaseType_t xCanBlockIndefinitely ) +{ + TickType_t xTimeToWake; + const TickType_t xConstTickCount = xTickCount; + + #if ( INCLUDE_xTaskAbortDelay == 1 ) + { + /* About to enter a delayed list, so ensure the ucDelayAborted flag is + * reset to pdFALSE so it can be detected as having been set to pdTRUE + * when the task leaves the Blocked state. */ + pxCurrentTCB->ucDelayAborted = pdFALSE; + } + #endif + + /* Remove the task from the ready list before adding it to the blocked list + * as the same list item is used for both lists. */ + if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 ) + { + /* The current task must be in a ready list, so there is no need to + * check, and the port reset macro can be called directly. */ + portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */ + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + + #if ( INCLUDE_vTaskSuspend == 1 ) + { + if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) ) + { + /* Add the task to the suspended task list instead of a delayed task + * list to ensure it is not woken by a timing event. It will block + * indefinitely. */ + vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* Calculate the time at which the task should be woken if the event + * does not occur. This may overflow but this doesn't matter, the + * kernel will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow + * list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list + * is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the + * head of the list of blocked tasks then xNextTaskUnblockTime + * needs to be updated too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + } + } + #else /* INCLUDE_vTaskSuspend */ + { + /* Calculate the time at which the task should be woken if the event + * does not occur. This may overflow but this doesn't matter, the kernel + * will manage it correctly. */ + xTimeToWake = xConstTickCount + xTicksToWait; + + /* The list item will be inserted in wake time order. */ + listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake ); + + if( xTimeToWake < xConstTickCount ) + { + /* Wake time has overflowed. Place this item in the overflow list. */ + vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + } + else + { + /* The wake time has not overflowed, so the current block list is used. */ + vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) ); + + /* If the task entering the blocked state was placed at the head of the + * list of blocked tasks then xNextTaskUnblockTime needs to be updated + * too. */ + if( xTimeToWake < xNextTaskUnblockTime ) + { + xNextTaskUnblockTime = xTimeToWake; + } + else + { + mtCOVERAGE_TEST_MARKER(); + } + } + + /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */ + ( void ) xCanBlockIndefinitely; + } + #endif /* INCLUDE_vTaskSuspend */ +} + +/* Code below here allows additional code to be inserted into this source file, + * especially where access to file scope functions and data is needed (for example + * when performing module tests). */ + +#ifdef FREERTOS_MODULE_TEST + #include "tasks_test_access_functions.h" +#endif + + +#if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) + + #include "freertos_tasks_c_additions.h" + + #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT + static void freertos_tasks_c_additions_init( void ) + { + FREERTOS_TASKS_C_ADDITIONS_INIT(); + } + #endif + +#endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */ \ No newline at end of file diff --git a/Test/VeriFast/tasks/vTaskSwitchContext/stats/stats.md b/Test/VeriFast/tasks/vTaskSwitchContext/stats/stats.md new file mode 100644 index 00000000000..f4bfbfe73db --- /dev/null +++ b/Test/VeriFast/tasks/vTaskSwitchContext/stats/stats.md @@ -0,0 +1,26 @@ +#lines of code verified +tasks.c: +vTaskSwitchContext: 22 +prvSelectHighestPriorityTask: 68 +xTaskGetCurrentTaskHandle: 9 +global definitions and variables: 103 + +stack_macros.h: +taskCHECK_FOR_STACK_OVERFLOW: 13 + +list.c: +vListInitialiseItem: 4 +vListInsertEnd: 11 +uxListRemove: 17 + +list.h: +global definitions: 23 + +FreeRTOSConfig.h: 65 + +total: 335 + + +#lines of annotation + +tasks_vf_pp.c: 2328 diff --git a/include/FreeRTOS.h b/include/FreeRTOS.h index 60abd75b6ca..5ab71a39875 100644 --- a/include/FreeRTOS.h +++ b/include/FreeRTOS.h @@ -1274,6 +1274,10 @@ typedef struct xSTATIC_TCB * users will recognise that it would be unwise to make direct use of the * structure members. */ +#ifndef VERIFAST +/* Reason for rewrite: + * VeriFast does not support nested union definitions. + */ typedef struct xSTATIC_QUEUE { void * pvDummy1[ 3 ]; @@ -1302,6 +1306,7 @@ typedef struct xSTATIC_QUEUE #endif } StaticQueue_t; typedef StaticQueue_t StaticSemaphore_t; +#endif /* VERIFAST */ /* * In line with software engineering best practice, especially when supplying a diff --git a/include/timers.h b/include/timers.h index 2ef995114b3..344cbf39568 100644 --- a/include/timers.h +++ b/include/timers.h @@ -28,6 +28,20 @@ #ifndef TIMERS_H #define TIMERS_H + +#ifdef VERIFAST + /* Reason for rewrite: + * VeriFast bug: + * Both `#ifdef INC_FREERTOS_H` and its negation `#ifdef INC_FREERTOS_H` + * evaluate to true. See minimal example `define_name`. + */ + #define INC_FREERTOS_H + /* Remember that this header is included indirectly `tasks.c` after it + * includes `FreeRTOS.h`. + */ + // TODO: Remove this work-around once VF has been fixed. +#endif /* VERIFAST */ + #ifndef INC_FREERTOS_H #error "include FreeRTOS.h must appear in source files before include timers.h" #endif diff --git a/portable/ThirdParty/GCC/RP2040/include/portmacro.h b/portable/ThirdParty/GCC/RP2040/include/portmacro.h index e2982370058..7bbf05ab02b 100644 --- a/portable/ThirdParty/GCC/RP2040/include/portmacro.h +++ b/portable/ThirdParty/GCC/RP2040/include/portmacro.h @@ -77,7 +77,13 @@ #define portSTACK_GROWTH ( -1 ) #define portTICK_PERIOD_MS ( ( TickType_t ) 1000 / configTICK_RATE_HZ ) #define portBYTE_ALIGNMENT 8 - #define portDONT_DISCARD __attribute__( ( used ) ) + #ifdef VERIFAST + /* Reason for rewrite: VeriFast does not support the attriibute `used`. + */ + #define portDONT_DISCARD + #else + #define portDONT_DISCARD __attribute__( ( used ) ) + #endif /* We have to use PICO_DIVIDER_DISABLE_INTERRUPTS as the source of truth rathern than our config, * as our FreeRTOSConfig.h header cannot be included by ASM code - which is what this affects in the SDK */ #define portUSE_DIVIDER_SAVE_RESTORE !PICO_DIVIDER_DISABLE_INTERRUPTS @@ -172,40 +178,45 @@ /* Note this is a single method with uxAcquire parameter since we have * static vars, the method is always called with a compile time constant for * uxAcquire, and the compiler should dothe right thing! */ - static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { - static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; - static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; - configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); - uint32_t ulCoreNum = get_core_num(); - uint32_t ulLockBit = 1u << ulLockNum; - configASSERT(ulLockBit < 256u ); - if( uxAcquire ) - { - if( __builtin_expect( !*pxSpinLock, 0 ) ) + #ifdef VERIFAST + /* Reason for rewrite: VeriFast does not support local static variables. + */ + #else + static inline void vPortRecursiveLock(uint32_t ulLockNum, spin_lock_t *pxSpinLock, BaseType_t uxAcquire) { + static uint8_t ucOwnedByCore[ portMAX_CORE_COUNT ]; + static uint8_t ucRecursionCountByLock[ portRTOS_SPINLOCK_COUNT ]; + configASSERT(ulLockNum >= 0 && ulLockNum < portRTOS_SPINLOCK_COUNT ); + uint32_t ulCoreNum = get_core_num(); + uint32_t ulLockBit = 1u << ulLockNum; + configASSERT(ulLockBit < 256u ); + if( uxAcquire ) { - if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + if( __builtin_expect( !*pxSpinLock, 0 ) ) { - configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); - ucRecursionCountByLock[ulLockNum]++; - return; + if( ucOwnedByCore[ulCoreNum] & ulLockBit ) + { + configASSERT(ucRecursionCountByLock[ulLockNum] != 255u ); + ucRecursionCountByLock[ulLockNum]++; + return; + } + while ( __builtin_expect( !*pxSpinLock, 0 ) ); + } + __mem_fence_acquire(); + configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); + ucRecursionCountByLock[ulLockNum] = 1; + ucOwnedByCore[ulCoreNum] |= ulLockBit; + } else { + configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); + configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); + if( !--ucRecursionCountByLock[ulLockNum] ) + { + ucOwnedByCore[ulCoreNum] &= ~ulLockBit; + __mem_fence_release(); + *pxSpinLock = 1; } - while ( __builtin_expect( !*pxSpinLock, 0 ) ); - } - __mem_fence_acquire(); - configASSERT(ucRecursionCountByLock[ulLockNum] == 0 ); - ucRecursionCountByLock[ulLockNum] = 1; - ucOwnedByCore[ulCoreNum] |= ulLockBit; - } else { - configASSERT((ucOwnedByCore[ulCoreNum] & ulLockBit) != 0 ); - configASSERT(ucRecursionCountByLock[ulLockNum] != 0 ); - if( !--ucRecursionCountByLock[ulLockNum] ) - { - ucOwnedByCore[ulCoreNum] &= ~ulLockBit; - __mem_fence_release(); - *pxSpinLock = 1; } } - } + #endif /* VERIFAST */ #define portGET_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdTRUE) #define portRELEASE_ISR_LOCK() vPortRecursiveLock(0, spin_lock_instance(configSMP_SPINLOCK_0), pdFALSE) diff --git a/portable/ThirdParty/GCC/RP2040/port.c b/portable/ThirdParty/GCC/RP2040/port.c index bc77140e6e4..bc8f0ad4d01 100644 --- a/portable/ThirdParty/GCC/RP2040/port.c +++ b/portable/ThirdParty/GCC/RP2040/port.c @@ -170,6 +170,8 @@ static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM; StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack, TaskFunction_t pxCode, void * pvParameters ) +//@ requires true; +//@ ensures true; { /* Simulate the stack frame as it would be created by a context switch * interrupt. */